diff options
2326 files changed, 65166 insertions, 48682 deletions
@@ -313,6 +313,7 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com> Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl> Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com> Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com> +Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@opinsys.com> Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com> Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com> Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com> diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block index 11545c9e2e93..4ba771b56b3b 100644 --- a/Documentation/ABI/stable/sysfs-block +++ b/Documentation/ABI/stable/sysfs-block @@ -547,6 +547,21 @@ Description: [RO] Maximum size in bytes of a single element in a DMA scatter/gather list. +What: /sys/block/<disk>/queue/max_write_streams +Date: November 2024 +Contact: linux-block@vger.kernel.org +Description: + [RO] Maximum number of write streams supported, 0 if not + supported. If supported, valid values are 1 through + max_write_streams, inclusive. + +What: /sys/block/<disk>/queue/write_stream_granularity +Date: November 2024 +Contact: linux-block@vger.kernel.org +Description: + [RO] Granularity of a write stream in bytes. The granularity + of a write stream is the size that should be discarded or + overwritten together to avoid write amplification in the device. What: /sys/block/<disk>/queue/max_segments Date: March 2010 diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 6a1acabb29d8..7772d4f034d2 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -517,6 +517,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/meltdown /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + /sys/devices/system/cpu/vulnerabilities/old_microcode /sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling /sys/devices/system/cpu/vulnerabilities/retbleed /sys/devices/system/cpu/vulnerabilities/spec_store_bypass diff --git a/Documentation/ABI/testing/sysfs-driver-qat_ras b/Documentation/ABI/testing/sysfs-driver-qat_ras index 176dea1e9c0a..82ceb04445ec 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat_ras +++ b/Documentation/ABI/testing/sysfs-driver-qat_ras @@ -4,7 +4,7 @@ KernelVersion: 6.7 Contact: qat-linux@intel.com Description: (RO) Reports the number of correctable errors detected by the device. - This attribute is only available for qat_4xxx devices. + This attribute is only available for qat_4xxx and qat_6xxx devices. What: /sys/bus/pci/devices/<BDF>/qat_ras/errors_nonfatal Date: January 2024 @@ -12,7 +12,7 @@ KernelVersion: 6.7 Contact: qat-linux@intel.com Description: (RO) Reports the number of non fatal errors detected by the device. - This attribute is only available for qat_4xxx devices. + This attribute is only available for qat_4xxx and qat_6xxx devices. What: /sys/bus/pci/devices/<BDF>/qat_ras/errors_fatal Date: January 2024 @@ -20,7 +20,7 @@ KernelVersion: 6.7 Contact: qat-linux@intel.com Description: (RO) Reports the number of fatal errors detected by the device. - This attribute is only available for qat_4xxx devices. + This attribute is only available for qat_4xxx and qat_6xxx devices. What: /sys/bus/pci/devices/<BDF>/qat_ras/reset_error_counters Date: January 2024 @@ -38,4 +38,4 @@ Description: (WO) Write to resets all error counters of a device. # cat /sys/bus/pci/devices/<BDF>/qat_ras/errors_fatal 0 - This attribute is only available for qat_4xxx devices. + This attribute is only available for qat_4xxx and qat_6xxx devices. diff --git a/Documentation/ABI/testing/sysfs-fs-erofs b/Documentation/ABI/testing/sysfs-fs-erofs index b134146d735b..bf3b6299c15e 100644 --- a/Documentation/ABI/testing/sysfs-fs-erofs +++ b/Documentation/ABI/testing/sysfs-fs-erofs @@ -27,3 +27,11 @@ Description: Writing to this will drop compression-related caches, - 1 : invalidate cached compressed folios - 2 : drop in-memory pclusters - 3 : drop in-memory pclusters and cached compressed folios + +What: /sys/fs/erofs/accel +Date: May 2025 +Contact: "Bo Liu" <liubo03@inspur.com> +Description: Used to set or show hardware accelerators in effect + and multiple accelerators are separated by '\n'. + Supported accelerator(s): qat_deflate. + Disable all accelerators with an empty string (echo > accel). diff --git a/Documentation/RCU/listRCU.rst b/Documentation/RCU/listRCU.rst index ed5c9d8c9afe..d8bb98623c12 100644 --- a/Documentation/RCU/listRCU.rst +++ b/Documentation/RCU/listRCU.rst @@ -334,7 +334,7 @@ If the system-call audit module were to ever need to reject stale data, one way to accomplish this would be to add a ``deleted`` flag and a ``lock`` spinlock to the ``audit_entry`` structure, and modify audit_filter_task() as follows:: - static enum audit_state audit_filter_task(struct task_struct *tsk) + static struct audit_entry *audit_filter_task(struct task_struct *tsk, char **key) { struct audit_entry *e; enum audit_state state; @@ -346,16 +346,18 @@ to accomplish this would be to add a ``deleted`` flag and a ``lock`` spinlock to if (e->deleted) { spin_unlock(&e->lock); rcu_read_unlock(); - return AUDIT_BUILD_CONTEXT; + return NULL; } rcu_read_unlock(); if (state == AUDIT_STATE_RECORD) *key = kstrdup(e->rule.filterkey, GFP_ATOMIC); - return state; + /* As long as e->lock is held, e is valid and + * its value is not stale */ + return e; } } rcu_read_unlock(); - return AUDIT_BUILD_CONTEXT; + return NULL; } The ``audit_del_rule()`` function would need to set the ``deleted`` flag under the diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst index 53faeed7c190..be2eb6be16ec 100644 --- a/Documentation/RCU/whatisRCU.rst +++ b/Documentation/RCU/whatisRCU.rst @@ -15,6 +15,9 @@ to start learning about RCU: | 2014 Big API Table https://lwn.net/Articles/609973/ | 6. The RCU API, 2019 Edition https://lwn.net/Articles/777036/ | 2019 Big API Table https://lwn.net/Articles/777165/ +| 7. The RCU API, 2024 Edition https://lwn.net/Articles/988638/ +| 2024 Background Information https://lwn.net/Articles/988641/ +| 2024 Big API Table https://lwn.net/Articles/988666/ For those preferring video: diff --git a/Documentation/admin-guide/blockdev/index.rst b/Documentation/admin-guide/blockdev/index.rst index 957ccf617797..3262397ebe8f 100644 --- a/Documentation/admin-guide/blockdev/index.rst +++ b/Documentation/admin-guide/blockdev/index.rst @@ -11,6 +11,7 @@ Block Devices nbd paride ramdisk + zoned_loop zram drbd/index diff --git a/Documentation/admin-guide/blockdev/zoned_loop.rst b/Documentation/admin-guide/blockdev/zoned_loop.rst new file mode 100644 index 000000000000..9c7aa3b482f3 --- /dev/null +++ b/Documentation/admin-guide/blockdev/zoned_loop.rst @@ -0,0 +1,169 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================= +Zoned Loop Block Device +======================= + +.. Contents: + + 1) Overview + 2) Creating a Zoned Device + 3) Deleting a Zoned Device + 4) Example + + +1) Overview +----------- + +The zoned loop block device driver (zloop) allows a user to create a zoned block +device using one regular file per zone as backing storage. This driver does not +directly control any hardware and uses read, write and truncate operations to +regular files of a file system to emulate a zoned block device. + +Using zloop, zoned block devices with a configurable capacity, zone size and +number of conventional zones can be created. The storage for each zone of the +device is implemented using a regular file with a maximum size equal to the zone +size. The size of a file backing a conventional zone is always equal to the zone +size. The size of a file backing a sequential zone indicates the amount of data +sequentially written to the file, that is, the size of the file directly +indicates the position of the write pointer of the zone. + +When resetting a sequential zone, its backing file size is truncated to zero. +Conversely, for a zone finish operation, the backing file is truncated to the +zone size. With this, the maximum capacity of a zloop zoned block device created +can be larger configured to be larger than the storage space available on the +backing file system. Of course, for such configuration, writing more data than +the storage space available on the backing file system will result in write +errors. + +The zoned loop block device driver implements a complete zone transition state +machine. That is, zones can be empty, implicitly opened, explicitly opened, +closed or full. The current implementation does not support any limits on the +maximum number of open and active zones. + +No user tools are necessary to create and delete zloop devices. + +2) Creating a Zoned Device +-------------------------- + +Once the zloop module is loaded (or if zloop is compiled in the kernel), the +character device file /dev/zloop-control can be used to add a zloop device. +This is done by writing an "add" command directly to the /dev/zloop-control +device:: + + $ modprobe zloop + $ ls -l /dev/zloop* + crw-------. 1 root root 10, 123 Jan 6 19:18 /dev/zloop-control + + $ mkdir -p <base directory/<device ID> + $ echo "add [options]" > /dev/zloop-control + +The options available for the add command can be listed by reading the +/dev/zloop-control device:: + + $ cat /dev/zloop-control + add id=%d,capacity_mb=%u,zone_size_mb=%u,zone_capacity_mb=%u,conv_zones=%u,base_dir=%s,nr_queues=%u,queue_depth=%u,buffered_io + remove id=%d + +In more details, the options that can be used with the "add" command are as +follows. + +================ =========================================================== +id Device number (the X in /dev/zloopX). + Default: automatically assigned. +capacity_mb Device total capacity in MiB. This is always rounded up to + the nearest higher multiple of the zone size. + Default: 16384 MiB (16 GiB). +zone_size_mb Device zone size in MiB. Default: 256 MiB. +zone_capacity_mb Device zone capacity (must always be equal to or lower than + the zone size. Default: zone size. +conv_zones Total number of conventioanl zones starting from sector 0. + Default: 8. +base_dir Path to the base directoy where to create the directory + containing the zone files of the device. + Default=/var/local/zloop. + The device directory containing the zone files is always + named with the device ID. E.g. the default zone file + directory for /dev/zloop0 is /var/local/zloop/0. +nr_queues Number of I/O queues of the zoned block device. This value is + always capped by the number of online CPUs + Default: 1 +queue_depth Maximum I/O queue depth per I/O queue. + Default: 64 +buffered_io Do buffered IOs instead of direct IOs (default: false) +================ =========================================================== + +3) Deleting a Zoned Device +-------------------------- + +Deleting an unused zoned loop block device is done by issuing the "remove" +command to /dev/zloop-control, specifying the ID of the device to remove:: + + $ echo "remove id=X" > /dev/zloop-control + +The remove command does not have any option. + +A zoned device that was removed can be re-added again without any change to the +state of the device zones: the device zones are restored to their last state +before the device was removed. Adding again a zoned device after it was removed +must always be done using the same configuration as when the device was first +added. If a zone configuration change is detected, an error will be returned and +the zoned device will not be created. + +To fully delete a zoned device, after executing the remove operation, the device +base directory containing the backing files of the device zones must be deleted. + +4) Example +---------- + +The following sequence of commands creates a 2GB zoned device with zones of 64 +MB and a zone capacity of 63 MB:: + + $ modprobe zloop + $ mkdir -p /var/local/zloop/0 + $ echo "add capacity_mb=2048,zone_size_mb=64,zone_capacity=63MB" > /dev/zloop-control + +For the device created (/dev/zloop0), the zone backing files are all created +under the default base directory (/var/local/zloop):: + + $ ls -l /var/local/zloop/0 + total 0 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000000 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000001 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000002 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000003 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000004 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000005 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000006 + -rw-------. 1 root root 67108864 Jan 6 22:23 cnv-000007 + -rw-------. 1 root root 0 Jan 6 22:23 seq-000008 + -rw-------. 1 root root 0 Jan 6 22:23 seq-000009 + ... + +The zoned device created (/dev/zloop0) can then be used normally:: + + $ lsblk -z + NAME ZONED ZONE-SZ ZONE-NR ZONE-AMAX ZONE-OMAX ZONE-APP ZONE-WGRAN + zloop0 host-managed 64M 32 0 0 1M 4K + $ blkzone report /dev/zloop0 + start: 0x000000000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x000020000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x000040000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x000060000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x000080000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x0000a0000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x0000c0000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x0000e0000, len 0x020000, cap 0x020000, wptr 0x000000 reset:0 non-seq:0, zcond: 0(nw) [type: 1(CONVENTIONAL)] + start: 0x000100000, len 0x020000, cap 0x01f800, wptr 0x000000 reset:0 non-seq:0, zcond: 1(em) [type: 2(SEQ_WRITE_REQUIRED)] + start: 0x000120000, len 0x020000, cap 0x01f800, wptr 0x000000 reset:0 non-seq:0, zcond: 1(em) [type: 2(SEQ_WRITE_REQUIRED)] + ... + +Deleting this device is done using the command:: + + $ echo "remove id=0" > /dev/zloop-control + +The removed device can be re-added again using the same "add" command as when +the device was first created. To fully delete a zoned device, its backing files +should also be deleted after executing the remove command:: + + $ rm -r /var/local/zloop/0 diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 1a16ce68a4d7..9e7de8e70048 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -3019,7 +3019,7 @@ Filesystem Support for Writeback -------------------------------- A filesystem can support cgroup writeback by updating -address_space_operations->writepage[s]() to annotate bio's using the +address_space_operations->writepages() to annotate bio's using the following two functions. wbc_init_bio(@wbc, @bio) diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index ce296b8430fc..09890a8f3ee9 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -23,4 +23,5 @@ are configurable at compile, boot or run time. gather_data_sampling reg-file-data-sampling rsb + old_microcode indirect-target-selection diff --git a/Documentation/admin-guide/hw-vuln/old_microcode.rst b/Documentation/admin-guide/hw-vuln/old_microcode.rst new file mode 100644 index 000000000000..6ded8f86b8d0 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/old_microcode.rst @@ -0,0 +1,21 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============= +Old Microcode +============= + +The kernel keeps a table of released microcode. Systems that had +microcode older than this at boot will say "Vulnerable". This means +that the system was vulnerable to some known CPU issue. It could be +security or functional, the kernel does not know or care. + +You should update the CPU microcode to mitigate any exposure. This is +usually accomplished by updating the files in +/lib/firmware/intel-ucode/ via normal distribution updates. Intel also +distributes these files in a github repo: + + https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files.git + +Just like all the other hardware vulnerabilities, exposure is +determined at boot. Runtime microcode updates do not change the status +of this vulnerability. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8f75ec177399..1d6d39f4c68d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5672,6 +5672,31 @@ are zero, rcutorture acts as if is interpreted they are all non-zero. + rcutorture.gpwrap_lag= [KNL] + Enable grace-period wrap lag testing. Setting + to false prevents the gpwrap lag test from + running. Default is true. + + rcutorture.gpwrap_lag_gps= [KNL] + Set the value for grace-period wrap lag during + active lag testing periods. This controls how many + grace periods differences we tolerate between + rdp and rnp's gp_seq before setting overflow flag. + The default is always set to 8. + + rcutorture.gpwrap_lag_cycle_mins= [KNL] + Set the total cycle duration for gpwrap lag + testing in minutes. This is the total time for + one complete cycle of active and inactive + testing periods. Default is 30 minutes. + + rcutorture.gpwrap_lag_active_mins= [KNL] + Set the duration for which gpwrap lag is active + within each cycle, in minutes. During this time, + the grace-period wrap lag will be set to the + value specified by gpwrap_lag_gps. Default is + 5 minutes. + rcutorture.irqreader= [KNL] Run RCU readers from irq handlers, or, more accurately, from a timer handler. Not all RCU @@ -6268,7 +6293,7 @@ port and the regular usb controller gets disabled. root= [KNL] Root filesystem - Usually this a a block device specifier of some kind, + Usually this is a block device specifier of some kind, see the early_lookup_bdev comment in block/early-lookup.c for details. Alternatively this can be "ram" for the legacy initial @@ -6295,6 +6320,11 @@ Memory area to be used by remote processor image, managed by CMA. + rt_group_sched= [KNL] Enable or disable SCHED_RR/FIFO group scheduling + when CONFIG_RT_GROUP_SCHED=y. Defaults to + !CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED. + Format: <bool> + rw [KNL] Mount root device read-write on boot S [KNL] Run init in single mode diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 8290177b4f75..d385985b305f 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -75,6 +75,7 @@ Currently, these files are in /proc/sys/vm: - unprivileged_userfaultfd - user_reserve_kbytes - vfs_cache_pressure +- vfs_cache_pressure_denom - watermark_boost_factor - watermark_scale_factor - zone_reclaim_mode @@ -1017,19 +1018,28 @@ vfs_cache_pressure This percentage value controls the tendency of the kernel to reclaim the memory which is used for caching of directory and inode objects. -At the default value of vfs_cache_pressure=100 the kernel will attempt to -reclaim dentries and inodes at a "fair" rate with respect to pagecache and -swapcache reclaim. Decreasing vfs_cache_pressure causes the kernel to prefer -to retain dentry and inode caches. When vfs_cache_pressure=0, the kernel will -never reclaim dentries and inodes due to memory pressure and this can easily -lead to out-of-memory conditions. Increasing vfs_cache_pressure beyond 100 -causes the kernel to prefer to reclaim dentries and inodes. +At the default value of vfs_cache_pressure=vfs_cache_pressure_denom the kernel +will attempt to reclaim dentries and inodes at a "fair" rate with respect to +pagecache and swapcache reclaim. Decreasing vfs_cache_pressure causes the +kernel to prefer to retain dentry and inode caches. When vfs_cache_pressure=0, +the kernel will never reclaim dentries and inodes due to memory pressure and +this can easily lead to out-of-memory conditions. Increasing vfs_cache_pressure +beyond vfs_cache_pressure_denom causes the kernel to prefer to reclaim dentries +and inodes. -Increasing vfs_cache_pressure significantly beyond 100 may have negative -performance impact. Reclaim code needs to take various locks to find freeable -directory and inode objects. With vfs_cache_pressure=1000, it will look for -ten times more freeable objects than there are. +Increasing vfs_cache_pressure significantly beyond vfs_cache_pressure_denom may +have negative performance impact. Reclaim code needs to take various locks to +find freeable directory and inode objects. When vfs_cache_pressure equals +(10 * vfs_cache_pressure_denom), it will look for ten times more freeable +objects than there are. +Note: This setting should always be used together with vfs_cache_pressure_denom. + +vfs_cache_pressure_denom +======================== + +Defaults to 100 (minimum allowed value). Requires corresponding +vfs_cache_pressure setting to take effect. watermark_boost_factor ====================== diff --git a/Documentation/admin-guide/xfs.rst b/Documentation/admin-guide/xfs.rst index 5becb441c3cb..a18328a5fb93 100644 --- a/Documentation/admin-guide/xfs.rst +++ b/Documentation/admin-guide/xfs.rst @@ -151,6 +151,17 @@ When mounting an XFS filesystem, the following options are accepted. optional, and the log section can be separate from the data section or contained within it. + max_atomic_write=value + Set the maximum size of an atomic write. The size may be + specified in bytes, in kilobytes with a "k" suffix, in megabytes + with a "m" suffix, or in gigabytes with a "g" suffix. The size + cannot be larger than the maximum write size, larger than the + size of any allocation group, or larger than the size of a + remapping operation that the log can complete atomically. + + The default value is to set the maximum I/O completion size + to allow each CPU to handle one at a time. + max_open_zones=value Specify the max number of zones to keep open for writing on a zoned rt device. Many open zones aids file data separation diff --git a/Documentation/arch/powerpc/htm.rst b/Documentation/arch/powerpc/htm.rst new file mode 100644 index 000000000000..fcb4eb6306b1 --- /dev/null +++ b/Documentation/arch/powerpc/htm.rst @@ -0,0 +1,104 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. _htm: + +=================================== +HTM (Hardware Trace Macro) +=================================== + +Athira Rajeev, 2 Mar 2025 + +.. contents:: + :depth: 3 + + +Basic overview +============== + +H_HTM is used as an interface for executing Hardware Trace Macro (HTM) +functions, including setup, configuration, control and dumping of the HTM data. +For using HTM, it is required to setup HTM buffers and HTM operations can +be controlled using the H_HTM hcall. The hcall can be invoked for any core/chip +of the system from within a partition itself. To use this feature, a debugfs +folder called "htmdump" is present under /sys/kernel/debug/powerpc. + + +HTM debugfs example usage +========================= + +.. code-block:: sh + + # ls /sys/kernel/debug/powerpc/htmdump/ + coreindexonchip htmcaps htmconfigure htmflags htminfo htmsetup + htmstart htmstatus htmtype nodalchipindex nodeindex trace + +Details on each file: + +* nodeindex, nodalchipindex, coreindexonchip specifies which partition to configure the HTM for. +* htmtype: specifies the type of HTM. Supported target is hardwareTarget. +* trace: is to read the HTM data. +* htmconfigure: Configure/Deconfigure the HTM. Writing 1 to the file will configure the trace, writing 0 to the file will do deconfigure. +* htmstart: start/Stop the HTM. Writing 1 to the file will start the tracing, writing 0 to the file will stop the tracing. +* htmstatus: get the status of HTM. This is needed to understand the HTM state after each operation. +* htmsetup: set the HTM buffer size. Size of HTM buffer is in power of 2 +* htminfo: provides the system processor configuration details. This is needed to understand the appropriate values for nodeindex, nodalchipindex, coreindexonchip. +* htmcaps : provides the HTM capabilities like minimum/maximum buffer size, what kind of tracing the HTM supports etc. +* htmflags : allows to pass flags to hcall. Currently supports controlling the wrapping of HTM buffer. + +To see the system processor configuration details: + +.. code-block:: sh + + # cat /sys/kernel/debug/powerpc/htmdump/htminfo > htminfo_file + +The result can be interpreted using hexdump. + +To collect HTM traces for a partition represented by nodeindex as +zero, nodalchipindex as 1 and coreindexonchip as 12 + +.. code-block:: sh + + # cd /sys/kernel/debug/powerpc/htmdump/ + # echo 2 > htmtype + # echo 33 > htmsetup ( sets 8GB memory for HTM buffer, number is size in power of 2 ) + +This requires a CEC reboot to get the HTM buffers allocated. + +.. code-block:: sh + + # cd /sys/kernel/debug/powerpc/htmdump/ + # echo 2 > htmtype + # echo 0 > nodeindex + # echo 1 > nodalchipindex + # echo 12 > coreindexonchip + # echo 1 > htmflags # to set noWrap for HTM buffers + # echo 1 > htmconfigure # Configure the HTM + # echo 1 > htmstart # Start the HTM + # echo 0 > htmstart # Stop the HTM + # echo 0 > htmconfigure # Deconfigure the HTM + # cat htmstatus # Dump the status of HTM entries as data + +Above will set the htmtype and core details, followed by executing respective HTM operation. + +Read the HTM trace data +======================== + +After starting the trace collection, run the workload +of interest. Stop the trace collection after required period +of time, and read the trace file. + +.. code-block:: sh + + # cat /sys/kernel/debug/powerpc/htmdump/trace > trace_file + +This trace file will contain the relevant instruction traces +collected during the workload execution. And can be used as +input file for trace decoders to understand data. + +Benefits of using HTM debugfs interface +======================================= + +It is now possible to collect traces for a particular core/chip +from within any partition of the system and decode it. Through +this enablement, a small partition can be dedicated to collect the +trace data and analyze to provide important information for Performance +analysis, Software tuning, or Hardware debug. diff --git a/Documentation/arch/powerpc/kvm-nested.rst b/Documentation/arch/powerpc/kvm-nested.rst index 5defd13cc6c1..574592505604 100644 --- a/Documentation/arch/powerpc/kvm-nested.rst +++ b/Documentation/arch/powerpc/kvm-nested.rst @@ -208,13 +208,9 @@ associated values for each ID in the GSB:: flags: Bit 0: getGuestWideState: Request state of the Guest instead of an individual VCPU. - Bit 1: takeOwnershipOfVcpuState Indicate the L1 is taking - over ownership of the VCPU state and that the L0 can free - the storage holding the state. The VCPU state will need to - be returned to the Hypervisor via H_GUEST_SET_STATE prior - to H_GUEST_RUN_VCPU being called for this VCPU. The data - returned in the dataBuffer is in a Hypervisor internal - format. + Bit 1: getHostWideState: Request stats of the Host. This causes + the guestId and vcpuId parameters to be ignored and attempting + to get the VCPU/Guest state will cause an error. Bits 2-63: Reserved guestId: ID obtained from H_GUEST_CREATE vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU @@ -406,9 +402,10 @@ the partition like the timebase offset and partition scoped page table information. +--------+-------+----+--------+----------------------------------+ -| ID | Size | RW | Thread | Details | -| | Bytes | | Guest | | -| | | | Scope | | +| ID | Size | RW |(H)ost | Details | +| | Bytes | |(G)uest | | +| | | |(T)hread| | +| | | |Scope | | +========+=======+====+========+==================================+ | 0x0000 | | RW | TG | NOP element | +--------+-------+----+--------+----------------------------------+ @@ -434,6 +431,29 @@ table information. | | | | |- 0x8 Table size. | +--------+-------+----+--------+----------------------------------+ | 0x0007-| | | | Reserved | +| 0x07FF | | | | | ++--------+-------+----+--------+----------------------------------+ +| 0x0800 | 0x08 | R | H | Current usage in bytes of the | +| | | | | L0's Guest Management Space | +| | | | | for an L1-Lpar. | ++--------+-------+----+--------+----------------------------------+ +| 0x0801 | 0x08 | R | H | Max bytes available in the | +| | | | | L0's Guest Management Space for | +| | | | | an L1-Lpar | ++--------+-------+----+--------+----------------------------------+ +| 0x0802 | 0x08 | R | H | Current usage in bytes of the | +| | | | | L0's Guest Page Table Management | +| | | | | Space for an L1-Lpar | ++--------+-------+----+--------+----------------------------------+ +| 0x0803 | 0x08 | R | H | Max bytes available in the L0's | +| | | | | Guest Page Table Management | +| | | | | Space for an L1-Lpar | ++--------+-------+----+--------+----------------------------------+ +| 0x0804 | 0x08 | R | H | Cumulative Reclaimed bytes from | +| | | | | L0 Guest's Page Table Management | +| | | | | Space due to overcommit | ++--------+-------+----+--------+----------------------------------+ +| 0x0805-| | | | Reserved | | 0x0BFF | | | | | +--------+-------+----+--------+----------------------------------+ | 0x0C00 | 0x10 | RW | T |Run vCPU Input Buffer: | diff --git a/Documentation/arch/x86/amd-debugging.rst b/Documentation/arch/x86/amd-debugging.rst new file mode 100644 index 000000000000..d92bf59d62c7 --- /dev/null +++ b/Documentation/arch/x86/amd-debugging.rst @@ -0,0 +1,368 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Debugging AMD Zen systems ++++++++++++++++++++++++++ + +Introduction +============ + +This document describes techniques that are useful for debugging issues with +AMD Zen systems. It is intended for use by developers and technical users +to help identify and resolve issues. + +S3 vs s2idle +============ + +On AMD systems, it's not possible to simultaneously support suspend-to-RAM (S3) +and suspend-to-idle (s2idle). To confirm which mode your system supports you +can look at ``cat /sys/power/mem_sleep``. If it shows ``s2idle [deep]`` then +*S3* is supported. If it shows ``[s2idle]`` then *s2idle* is +supported. + +On systems that support *S3*, the firmware will be utilized to put all hardware into +the appropriate low power state. + +On systems that support *s2idle*, the kernel will be responsible for transitioning devices +into the appropriate low power state. When all devices are in the appropriate low +power state, the hardware will transition into a hardware sleep state. + +After a suspend cycle you can tell how much time was spent in a hardware sleep +state by looking at ``cat /sys/power/suspend_stats/last_hw_sleep``. + +This flowchart explains how the AMD s2idle suspend flow works. + +.. kernel-figure:: suspend.svg + +This flowchart explains how the amd s2idle resume flow works. + +.. kernel-figure:: resume.svg + +s2idle debugging tool +===================== + +As there are a lot of places that problems can occur, a debugging tool has been +created at +`amd-debug-tools <https://git.kernel.org/pub/scm/linux/kernel/git/superm1/amd-debug-tools.git/about/>`_ +that can help test for common problems and offer suggestions. + +If you have an s2idle issue, it's best to start with this and follow instructions +from its findings. If you continue to have an issue, raise a bug with the +report generated from this script to +`drm/amd gitlab <https://gitlab.freedesktop.org/drm/amd/-/issues/new?issuable_template=s2idle_BUG_TEMPLATE>`_. + +Spurious s2idle wakeups from an IRQ +=================================== + +Spurious wakeups will generally have an IRQ set to ``/sys/power/pm_wakeup_irq``. +This can be matched to ``/proc/interrupts`` to determine what device woke the system. + +If this isn't enough to debug the problem, then the following sysfs files +can be set to add more verbosity to the wakeup process: :: + + # echo 1 | sudo tee /sys/power/pm_debug_messages + # echo 1 | sudo tee /sys/power/pm_print_times + +After making those changes, the kernel will display messages that can +be traced back to kernel s2idle loop code as well as display any active +GPIO sources while waking up. + +If the wakeup is caused by the ACPI SCI, additional ACPI debugging may be +needed. These commands can enable additional trace data: :: + + # echo enable | sudo tee /sys/module/acpi/parameters/trace_state + # echo 1 | sudo tee /sys/module/acpi/parameters/aml_debug_output + # echo 0x0800000f | sudo tee /sys/module/acpi/parameters/debug_level + # echo 0xffff0000 | sudo tee /sys/module/acpi/parameters/debug_layer + +Spurious s2idle wakeups from a GPIO +=================================== + +If a GPIO is active when waking up the system ideally you would look at the +schematic to determine what device it is associated with. If the schematic +is not available, another tactic is to look at the ACPI _EVT() entry +to determine what device is notified when that GPIO is active. + +For a hypothetical example, say that GPIO 59 woke up the system. You can +look at the SSDT to determine what device is notified when GPIO 59 is active. + +First convert the GPIO number into hex. :: + + $ python3 -c "print(hex(59))" + 0x3b + +Next determine which ACPI table has the ``_EVT`` entry. For example: :: + + $ sudo grep EVT /sys/firmware/acpi/tables/SSDT* + grep: /sys/firmware/acpi/tables/SSDT27: binary file matches + +Decode this table:: + + $ sudo cp /sys/firmware/acpi/tables/SSDT27 . + $ sudo iasl -d SSDT27 + +Then look at the table and find the matching entry for GPIO 0x3b. :: + + Case (0x3B) + { + M000 (0x393B) + M460 (" Notify (\\_SB.PCI0.GP17.XHC1, 0x02)\n", Zero, Zero, Zero, Zero, Zero, Zero) + Notify (\_SB.PCI0.GP17.XHC1, 0x02) // Device Wake + } + +You can see in this case that the device ``\_SB.PCI0.GP17.XHC1`` is notified +when GPIO 59 is active. It's obvious this is an XHCI controller, but to go a +step further you can figure out which XHCI controller it is by matching it to +ACPI.:: + + $ grep "PCI0.GP17.XHC1" /sys/bus/acpi/devices/*/path + /sys/bus/acpi/devices/device:2d/path:\_SB_.PCI0.GP17.XHC1 + /sys/bus/acpi/devices/device:2e/path:\_SB_.PCI0.GP17.XHC1.RHUB + /sys/bus/acpi/devices/device:2f/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT1 + /sys/bus/acpi/devices/device:30/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT1.CAM0 + /sys/bus/acpi/devices/device:31/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT1.CAM1 + /sys/bus/acpi/devices/device:32/path:\_SB_.PCI0.GP17.XHC1.RHUB.PRT2 + /sys/bus/acpi/devices/LNXPOWER:0d/path:\_SB_.PCI0.GP17.XHC1.PWRS + +Here you can see it matches to ``device:2d``. Look at the ``physical_node`` +to determine what PCI device that actually is. :: + + $ ls -l /sys/bus/acpi/devices/device:2d/physical_node + lrwxrwxrwx 1 root root 0 Feb 12 13:22 /sys/bus/acpi/devices/device:2d/physical_node -> ../../../../../pci0000:00/0000:00:08.1/0000:c2:00.4 + +So there you have it: the PCI device associated with this GPIO wakeup was ``0000:c2:00.4``. + +The ``amd_s2idle.py`` script will capture most of these artifacts for you. + +s2idle PM debug messages +======================== + +During the s2idle flow on AMD systems, the ACPI LPS0 driver is responsible +to check all uPEP constraints. Failing uPEP constraints does not prevent +s0i3 entry. This means that if some constraints are not met, it is possible +the kernel may attempt to enter s2idle even if there are some known issues. + +To activate PM debugging, either specify ``pm_debug_messagess`` kernel +command-line option at boot or write to ``/sys/power/pm_debug_messages``. +Unmet constraints will be displayed in the kernel log and can be +viewed by logging tools that process kernel ring buffer like ``dmesg`` or +``journalctl``." + +If the system freezes on entry/exit before these messages are flushed, a +useful debugging tactic is to unbind the ``amd_pmc`` driver to prevent +notification to the platform to start s0i3 entry. This will stop the +system from freezing on entry or exit and let you view all the failed +constraints. :: + + cd /sys/bus/platform/drivers/amd_pmc + ls | grep AMD | sudo tee unbind + +After doing this, run the suspend cycle and look specifically for errors around: :: + + ACPI: LPI: Constraint not met; min power state:%s current power state:%s + +Historical examples of s2idle issues +==================================== + +To help understand the types of issues that can occur and how to debug them, +here are some historical examples of s2idle issues that have been resolved. + +Core offlining +-------------- +An end user had reported that taking a core offline would prevent the system +from properly entering s0i3. This was debugged using internal AMD tools +to capture and display a stream of metrics from the hardware showing what changed +when a core was offlined. It was determined that the hardware didn't get +notification the offline cores were in the deepest state, and so it prevented +CPU from going into the deepest state. The issue was debugged to a missing +command to put cores into C3 upon offline. + +`commit d6b88ce2eb9d2 ("ACPI: processor idle: Allow playing dead in C3 state") <https://git.kernel.org/torvalds/c/d6b88ce2eb9d2>`_ + +Corruption after resume +----------------------- +A big problem that occurred with Rembrandt was that there was graphical +corruption after resume. This happened because of a misalignment of PSP +and driver responsibility. The PSP will save and restore DMCUB, but the +driver assumed it needed to reset DMCUB on resume. +This actually was a misalignment for earlier silicon as well, but was not +observed. + +`commit 79d6b9351f086 ("drm/amd/display: Don't reinitialize DMCUB on s0ix resume") <https://git.kernel.org/torvalds/c/79d6b9351f086>`_ + +Back to Back suspends fail +-------------------------- +When using a wakeup source that triggers the IRQ to wakeup, a bug in the +pinctrl-amd driver may capture the wrong state of the IRQ and prevent the +system going back to sleep properly. + +`commit b8c824a869f22 ("pinctrl: amd: Don't save/restore interrupt status and wake status bits") <https://git.kernel.org/torvalds/c/b8c824a869f22>`_ + +Spurious timer based wakeup after 5 minutes +------------------------------------------- +The HPET was being used to program the wakeup source for the system, however +this was causing a spurious wakeup after 5 minutes. The correct alarm to use +was the ACPI alarm. + +`commit 3d762e21d5637 ("rtc: cmos: Use ACPI alarm for non-Intel x86 systems too") <https://git.kernel.org/torvalds/c/3d762e21d5637>`_ + +Disk disappears after resume +---------------------------- +After resuming from s2idle, the NVME disk would disappear. This was due to the +BIOS not specifying the _DSD StorageD3Enable property. This caused the NVME +driver not to put the disk into the expected state at suspend and to fail +on resume. + +`commit e79a10652bbd3 ("ACPI: x86: Force StorageD3Enable on more products") <https://git.kernel.org/torvalds/c/e79a10652bbd3>`_ + +Spurious IRQ1 +------------- +A number of Renoir, Lucienne, Cezanne, & Barcelo platforms have a +platform firmware bug where IRQ1 is triggered during s0i3 resume. + +This was fixed in the platform firmware, but a number of systems didn't +receive any more platform firmware updates. + +`commit 8e60615e89321 ("platform/x86/amd: pmc: Disable IRQ1 wakeup for RN/CZN") <https://git.kernel.org/torvalds/c/8e60615e89321>`_ + +Hardware timeout +---------------- +The hardware performs many actions besides accepting the values from +amd-pmc driver. As the communication path with the hardware is a mailbox, +it's possible that it might not respond quickly enough. +This issue manifested as a failure to suspend: :: + + PM: dpm_run_callback(): acpi_subsys_suspend_noirq+0x0/0x50 returns -110 + amd_pmc AMDI0005:00: PM: failed to suspend noirq: error -110 + +The timing problem was identified by comparing the values of the idle mask. + +`commit 3c3c8e88c8712 ("platform/x86: amd-pmc: Increase the response register timeout") <https://git.kernel.org/torvalds/c/3c3c8e88c8712>`_ + +Failed to reach hardware sleep state with panel on +-------------------------------------------------- +On some Strix systems certain panels were observed to block the system from +entering a hardware sleep state if the internal panel was on during the sequence. + +Even though the panel got turned off during suspend it exposed a timing problem +where an interrupt caused the display hardware to wake up and block low power +state entry. + +`commit 40b8c14936bd2 ("drm/amd/display: Disable unneeded hpd interrupts during dm_init") <https://git.kernel.org/torvalds/c/40b8c14936bd2>`_ + +Runtime power consumption issues +================================ + +Runtime power consumption is influenced by many factors, including but not +limited to the configuration of the PCIe Active State Power Management (ASPM), +the display brightness, the EPP policy of the CPU, and the power management +of the devices. + +ASPM +---- +For the best runtime power consumption, ASPM should be programmed as intended +by the BIOS from the hardware vendor. To accomplish this the Linux kernel +should be compiled with ``CONFIG_PCIEASPM_DEFAULT`` set to ``y`` and the +sysfs file ``/sys/module/pcie_aspm/parameters/policy`` should not be modified. + +Most notably, if L1.2 is not configured properly for any devices, the SoC +will not be able to enter the deepest idle state. + +EPP Policy +---------- +The ``energy_performance_preference`` sysfs file can be used to set a bias +of efficiency or performance for a CPU. This has a direct relationship on +the battery life when more heavily biased towards performance. + + +BIOS debug messages +=================== + +Most OEM machines don't have a serial UART for outputting kernel or BIOS +debug messages. However BIOS debug messages are useful for understanding +both BIOS bugs and bugs with the Linux kernel drivers that call BIOS AML. + +As the BIOS on most OEM AMD systems are based off an AMD reference BIOS, +the infrastructure used for exporting debugging messages is often the same +as AMD reference BIOS. + +Manually Parsing +---------------- +There is generally an ACPI method ``\M460`` that different paths of the AML +will call to emit a message to the BIOS serial log. This method takes +7 arguments, with the first being a string and the rest being optional +integers:: + + Method (M460, 7, Serialized) + +Here is an example of a string that BIOS AML may call out using ``\M460``:: + + M460 (" OEM-ASL-PCIe Address (0x%X)._REG (%d %d) PCSA = %d\n", DADR, Arg0, Arg1, PCSA, Zero, Zero) + +Normally when executed, the ``\M460`` method would populate the additional +arguments into the string. In order to get these messages from the Linux +kernel a hook has been added into ACPICA that can capture the *arguments* +sent to ``\M460`` and print them to the kernel ring buffer. +For example the following message could be emitted into kernel ring buffer:: + + extrace-0174 ex_trace_args : " OEM-ASL-PCIe Address (0x%X)._REG (%d %d) PCSA = %d\n", ec106000, 2, 1, 1, 0, 0 + +In order to get these messages, you need to compile with ``CONFIG_ACPI_DEBUG`` +and then turn on the following ACPICA tracing parameters. +This can be done either on the kernel command line or at runtime: + +* ``acpi.trace_method_name=\M460`` +* ``acpi.trace_state=method`` + +NOTE: These can be very noisy at bootup. If you turn these parameters on +the kernel command, please also consider turning up ``CONFIG_LOG_BUF_SHIFT`` +to a larger size such as 17 to avoid losing early boot messages. + +Tool assisted Parsing +--------------------- +As mentioned above, parsing by hand can be tedious, especially with a lot of +messages. To help with this, a tool has been created at +`amd-debug-tools <https://git.kernel.org/pub/scm/linux/kernel/git/superm1/amd-debug-tools.git/about/>`_ +to help parse the messages. + +Random reboot issues +==================== + +When a random reboot occurs, the high-level reason for the reboot is stored +in a register that will persist onto the next boot. + +There are 6 classes of reasons for the reboot: + * Software induced + * Power state transition + * Pin induced + * Hardware induced + * Remote reset + * Internal CPU event + +.. csv-table:: + :header: "Bit", "Type", "Reason" + :align: left + + "0", "Pin", "thermal pin BP_THERMTRIP_L was tripped" + "1", "Pin", "power button was pressed for 4 seconds" + "2", "Pin", "shutdown pin was tripped" + "4", "Remote", "remote ASF power off command was received" + "9", "Internal", "internal CPU thermal limit was tripped" + "16", "Pin", "system reset pin BP_SYS_RST_L was tripped" + "17", "Software", "software issued PCI reset" + "18", "Software", "software wrote 0x4 to reset control register 0xCF9" + "19", "Software", "software wrote 0x6 to reset control register 0xCF9" + "20", "Software", "software wrote 0xE to reset control register 0xCF9" + "21", "ACPI-state", "ACPI power state transition occurred" + "22", "Pin", "keyboard reset pin KB_RST_L was tripped" + "23", "Internal", "internal CPU shutdown event occurred" + "24", "Hardware", "system failed to boot before failed boot timer expired" + "25", "Hardware", "hardware watchdog timer expired" + "26", "Remote", "remote ASF reset command was received" + "27", "Internal", "an uncorrected error caused a data fabric sync flood event" + "29", "Internal", "FCH and MP1 failed warm reset handshake" + "30", "Internal", "a parity error occurred" + "31", "Internal", "a software sync flood event occurred" + +This information is read by the kernel at bootup and printed into +the syslog. When a random reboot occurs this message can be helpful +to determine the next component to debug. diff --git a/Documentation/arch/x86/cpuinfo.rst b/Documentation/arch/x86/cpuinfo.rst index f80e2a558d2a..dd8b7806944e 100644 --- a/Documentation/arch/x86/cpuinfo.rst +++ b/Documentation/arch/x86/cpuinfo.rst @@ -173,10 +173,10 @@ For example, when an old kernel is running on new hardware. The kernel disabled support for it at compile-time -------------------------------------------------- -For example, if 5-level-paging is not enabled when building (i.e., -CONFIG_X86_5LEVEL is not selected) the flag "la57" will not show up [#f1]_. +For example, if Linear Address Masking (LAM) is not enabled when building (i.e., +CONFIG_ADDRESS_MASKING is not selected) the flag "lam" will not show up. Even though the feature will still be detected via CPUID, the kernel disables -it by clearing via setup_clear_cpu_cap(X86_FEATURE_LA57). +it by clearing via setup_clear_cpu_cap(X86_FEATURE_LAM). The feature is disabled at boot-time ------------------------------------ @@ -200,5 +200,3 @@ missing at runtime. For example, AVX flags will not show up if XSAVE feature is disabled since they depend on XSAVE feature. Another example would be broken CPUs and them missing microcode patches. Due to that, the kernel decides not to enable a feature. - -.. [#f1] 5-level paging uses linear address of 57 bits. diff --git a/Documentation/arch/x86/index.rst b/Documentation/arch/x86/index.rst index 8ac64d7de4dc..58a006525ae8 100644 --- a/Documentation/arch/x86/index.rst +++ b/Documentation/arch/x86/index.rst @@ -25,6 +25,7 @@ x86-specific Documentation shstk iommu intel_txt + amd-debugging amd-memory-encryption amd_hsmp tdx diff --git a/Documentation/arch/x86/resume.svg b/Documentation/arch/x86/resume.svg new file mode 100644 index 000000000000..ad8839f762bf --- /dev/null +++ b/Documentation/arch/x86/resume.svg @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- Do not edit this file with editors other than draw.io --> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> +<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="582px" height="1152px" viewBox="-0.5 -0.5 582 1152" content="<mxfile host="confluence.amd.com" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0" version="24.7.10" scale="1" border="0"> <diagram id="lFF5s3GfZ4Py3fAf5dUH" name="Page-1"> <mxGraphModel dx="2364" dy="1473" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0"> <root> <mxCell id="0" /> <mxCell id="1" parent="0" /> <mxCell id="jeVlbFHk8Qahp5zcIn_D-10" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-1" target="jeVlbFHk8Qahp5zcIn_D-9"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-1" value="Wakeup event occurs" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.start_2;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="-240" y="-190" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-8" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-3" target="jeVlbFHk8Qahp5zcIn_D-4"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-56" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-3" target="jeVlbFHk8Qahp5zcIn_D-13"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-3" value="MP1 hands off control to OS" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="-190" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-4" value="&lt;span style=&quot;background-color: rgb(232, 205, 151);&quot;&gt;&lt;span style=&quot;color: rgb(0, 0, 0); font-family: Helvetica; font-size: 12px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; white-space: normal; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;&quot;&gt;OS Moves one core out of ACPI C3&lt;/span&gt;&lt;br&gt;&lt;/span&gt;" style="verticalLabelPosition=middle;verticalAlign=middle;html=1;shape=process;whiteSpace=wrap;rounded=1;size=0.14;arcSize=6;labelPosition=center;align=center;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="240" y="-170" width="100" height="60" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-11" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-9" target="jeVlbFHk8Qahp5zcIn_D-3"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-9" value="MP0/MP1 boot process" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="-80" y="-190" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-27" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-13" target="jeVlbFHk8Qahp5zcIn_D-16"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-13" value="OS checks all wake sources" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="-40" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-29" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-15" target="jeVlbFHk8Qahp5zcIn_D-17"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-44" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-29"> <mxGeometry x="-0.28" y="2" relative="1" as="geometry"> <mxPoint x="8" y="-8" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-15" target="jeVlbFHk8Qahp5zcIn_D-25"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-41" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-34"> <mxGeometry x="-0.1165" y="-1" relative="1" as="geometry"> <mxPoint x="153" y="11" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-15" value="ACPI fixed &lt;br&gt;event active" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="260" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-28" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-16" target="jeVlbFHk8Qahp5zcIn_D-15"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-43" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-28"> <mxGeometry y="2" relative="1" as="geometry"> <mxPoint x="8" y="-15" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-16" target="jeVlbFHk8Qahp5zcIn_D-25"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-58" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-57"> <mxGeometry x="0.0145" y="1" relative="1" as="geometry"> <mxPoint x="102" y="9" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-16" value="IRQ other &lt;br&gt;than ACPI SCI active" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="110" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-30" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-17"> <mxGeometry relative="1" as="geometry"> <mxPoint x="130" y="560" as="targetPoint" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-65" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-30"> <mxGeometry x="-0.4694" y="1" relative="1" as="geometry"> <mxPoint x="9" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-36" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-17" target="jeVlbFHk8Qahp5zcIn_D-35"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-45" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-36"> <mxGeometry x="-0.2867" y="2" relative="1" as="geometry"> <mxPoint x="5" y="8" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-17" value="GPIO&lt;br&gt;IRQ shared&lt;br&gt;with SCI" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="410" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-31" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1"> <mxGeometry relative="1" as="geometry"> <mxPoint x="130" y="660" as="sourcePoint" /> <mxPoint x="130" y="710" as="targetPoint" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-32" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" target="jeVlbFHk8Qahp5zcIn_D-23"> <mxGeometry relative="1" as="geometry"> <mxPoint x="130" y="810" as="sourcePoint" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-48" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-32"> <mxGeometry x="-0.1714" y="-4" relative="1" as="geometry"> <mxPoint x="14" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-52" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-23" target="jeVlbFHk8Qahp5zcIn_D-25"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-53" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-52"> <mxGeometry x="-0.5259" y="-5" relative="1" as="geometry"> <mxPoint x="220" y="15" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-62" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-23" target="jeVlbFHk8Qahp5zcIn_D-54"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-64" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-62"> <mxGeometry x="-0.7472" y="3" relative="1" as="geometry"> <mxPoint x="-92" y="13" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-23" value="Any PM&lt;br&gt;wakeup event&lt;br&gt;pending" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="860" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-24" value="Kernel resumes system" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="-240" y="-20" width="100" height="60" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-26" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-25" target="jeVlbFHk8Qahp5zcIn_D-24"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-25" value="uPEP driver removes OS_HINT" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="-240" y="110" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-35" target="jeVlbFHk8Qahp5zcIn_D-25"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-40" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-37"> <mxGeometry x="-0.56" y="-2" relative="1" as="geometry"> <mxPoint x="67" y="12" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-60" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=-0.008;entryY=0.422;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;entryPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-35" target="jeVlbFHk8Qahp5zcIn_D-38"> <mxGeometry relative="1" as="geometry"> <mxPoint x="-50" y="535" as="sourcePoint" /> <mxPoint x="20" y="685" as="targetPoint" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-61" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-60"> <mxGeometry x="0.1126" y="-3" relative="1" as="geometry"> <mxPoint x="-16" y="-85" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-35" value="Any GPIO&lt;br&gt;w/ WAKESTS&lt;br&gt;active" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="-90" y="410" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-38" value="Check for ACPI Notify() events" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="560" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-49" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-39" target="jeVlbFHk8Qahp5zcIn_D-23"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-50" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-39" target="jeVlbFHk8Qahp5zcIn_D-25"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-51" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];fontStyle=1;labelBackgroundColor=none;fontColor=#393C56;" vertex="1" connectable="0" parent="jeVlbFHk8Qahp5zcIn_D-50"> <mxGeometry x="-0.4506" y="-2" relative="1" as="geometry"> <mxPoint x="215" y="12" as="offset" /> </mxGeometry> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-39" value="Any GPE &lt;br&gt;pending" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="80" y="710" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-63" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="jeVlbFHk8Qahp5zcIn_D-54" target="jeVlbFHk8Qahp5zcIn_D-55"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-54" value="OS moves active &lt;br&gt;core back to&lt;br&gt;ACPI C3" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="240" y="110" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="jeVlbFHk8Qahp5zcIn_D-55" value="MP1 puts system back to sleep" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F27979;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="240" y="-20" width="100" height="60" as="geometry" /> </mxCell> </root> </mxGraphModel> </diagram> </mxfile> "><defs/><g><g data-cell-id="0"><g data-cell-id="1"><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-10"><g><path d="M 101 51 L 154.63 51" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 51 L 152.88 54.5 L 154.63 51 L 152.88 47.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-1"><g><ellipse cx="51" cy="51" rx="50" ry="50" fill="#0cf232" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Wakeup event occurs</div></div></div></foreignObject><image x="2" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQt0HMWV9nd7RpKBAGYhvBYSIPlhwyZZEi+WNBoJ8QwQjMFmRjYGAssuj+ADgSUPHnltCPsn7J8EAjiQEF4BWxowz8UJEBCS5iETE5YlwGbJ8gxgJ6wNDsiSZvr+1Ghm3Oqpnu6ehzQa3T7H5/hoqm5Xfbe6vqpbde8lyCMICAKCgCAgCGgQIEFFEBAEBAFBQBDQISAEIeNCEBAEBAFBQIuAEIQMDEFAEBAEBAEhCBkDgoAgIAgIAt4RkB2Ed6ykpCAgCAgCswoBIYhZpW7prCAgCAgC3hEQgvCOlZQUBAQBQWBWISAEMavULZ0VBAQBQcA7AjOKILq7T58zmn4vBtBxli4+YfCWBfH4/Zu9d3uiZFs4ejUB59vqvckGHZ4a6H3et7yOnmVE/AtLvbJl+X13vnxn56I90hwcBPCx3N82muAjh4di68qVKfUEAUFgdiIwowgiO6l3Rr9CjO9VOgmHwyfvlMH4GoBa7apnplNS8d47fA4JagtHbyLgjGqQl893F4oLQZSLnNQTBKqDQHt7ZBsYOBOEseRQ7MbqSJ0eKTOPIMLRIwj4dwDNOcgyzLQgFe9d4wfC1nBkngF6BMBORQQB3Jwa6jsTAHuVqSMcBq5JDfVd4FVGNcoJQVQDRZEhCPhHIBKJBP74lnEcs/l/AfobJnw1Ndj3ff+S6qfGjCOI9vbIXyNI/WB8PA8jMa5MxPsu8wNre0fP2SD+ib4OPzMWDB6+rn/ln73KDIUiB8GgRxjYZStx8aJUPHa/VxnVKCcEUQ0URYYg4B+B9s7oCjDOydcUgvCPYcU1uru7g6PpXe8EENkqjB9sCW4X6e+/ZYuXF2hkbARoI8D75eq/ZzIfPRyPDXmRp8q02c4fGHgtiEz30NDd/+NVRjXKCUFUA0WRIQj4R6C9M3oLGF8QgvCPXVVrtHVGLyTGDwpCCS8izd3JZOyPXl5UtAthPMZESQIXdiF+2b/4wNsfaXlpt5cyQhBeUJIygkD1ERCCqD6mZUls7YiEDaJfAtiunBV/m+0cQ50VAHhg8tmG9wm+o+P47U2a8wCAQ6Z79SAEUdaQkkqCQMUICEFUDGF1BMzrXrpLczr9a4A+XZDIdE4y3nuDlzeEOqLfZcKlubIZZl5EJtZZzzb8mIjauno+QSb/GsAe5RCWlzZ7LSME4RUpKScIVBcBIYjq4lmJNGrriN5GhFMKK3bGL1LxvtPcbh51d0c+NDpO94FwmKqbJ4Jg8O1XbWcbWeLwcsjc1hE5nohWAwhk2+PT5FUJEPa6QhDVRFNkCQLeERCC8I5VzUsW30Li4QCajhkaunNjqZcXr/a3mpLsMr1eU7XtSNTrYy3BDSf39/enndoyb95ZTXPmvDOPCREGHwLCPmDsbCk/AvDrxEYcjJWjozs+vm7djeNuwJZLEDmzm/L92NXyjg0wjBOTA6sSbu+dP//EnQNNwRNA+AcQHWDpywiAlwF62CTzxuHBmHJA9HR92P7BgXBrcrDvdLe25H/3Wt++aADwhyClOwcHV7+pZHV0RA4wDfoSTBwNwt65hcAImF9gwi/MsfSta9fe87bXdlWrXLbdaeMYwDwDoM8C2C0nW427V4hp0DT4Z3vvzqlYLJYp9d5QZ88S5qyTZ3aR42cHbZWrMf96chatxfhpC0cXEFC4RWg9V1RXUl97i9qIcR6ALgC7F/QKvMyg1RmDbnhyYNVrTrhpxk1p1focv9UaJ5XImXHXXPOdtfsxEPBnmHxkIhF7uiRB2G4bWa/IFvtGuJPOp486dbsPvT96DwNHFt5bwty11YmGvgHgwz6U9ycAF+y1B/eV+tjLIYj2zsh8MKkzFN/k0Nq6bIdAcPwKJpwLIOjWHwZSTHzm8GDsObeyXid4Jzle6zsRRDod2ATD+DaIL3TpW5qB6+cE+bL+/thf3PpV6e/ZMRTEcjB9HcD27vL4BTJwXmIg9rgTOc/viuwbMEmZSffNyyvHYbR4sUR3tgTXf8FpsVTL8eNEEPPDPfsb4FsJaHPBLkNEd3Ha/GIyGftfe1khCPeRN20ltI5pTItT8V5l6nF67N7OYwx8PjXU96iqoJHpGqYiHF68XxqBfkJ2Zakexzqt3Yv2MtKBu3Te2x6BzBDoxtGRHS9w2k34JQgncmBgWR4Xp7aFQj0Hs4FVluvBHrsBtav4cnKo7/pSuwmvE3wtCCKAzFEmBy5hwj967RTAw2Ywc9Jw/+rXvdfxV7Kte8k+lM6sKmMMZUD44dj7cy91GDvU3tlzDZiXW1rkuhO2tl5zNqgm2FMSg72rdL2s9fjREYQB/h0z3a5zkHXSBAP/YZjphYnE6lesZYQg/I3dqS5dFNrCzSRUNIA1ZwVF11VdDr/tN6LUJKEzdc07IrJjyxaKTdppTCCmtv5vAVgLkNolqDlT7SxCFpOBFdsxZlrqRIR+CEKZ22DyvQTsb3nBBi/k4EAsSkwa4BdBNAym0WxfCF0289lEv4kvSQ7G/s2JJKaRIF5iYA0BX7Tgovq1DjD+A8QtMDFhFrQ9BDwyOocj6x6NvVPtDyIUWvRRNoIPATjQJjsDxmtMSBHoXQbvQEAYwF7FbaAbx0Z2XK4jifZw5GiA7rNEKfBkHsq/o+hbKHEWNxXjx04QRLifObtrsO6UX2dgSOFWYqyCGD9rbtpwrnUnlI0NN/7+chD9nwkMzEOB/P+zf1Df9FaLBptrk/HYTdUeF7WUN2NNTAoUu3Oa+jj/sm3Lic88fPt7OtA09tGiFVJbR88iIr7bUr/kKqqtM3opMb6bL+9EUpoYUqrKvRmDL1o7EHtJ115l+84QrSDg0Mm/8xpksDiZjKmV+KTHK0E4TDYbifjUxGBMhTJxfBzqpomxIj0+/m27PV7Ze199A4cZZPzEttsYYeYlThcBppEgrH3PMOPeTMC40GaPpvauJQfBNG8D8ElrBQKtaA6uP7/UGZTfj9phgZFtmxngL2vGELV2Lv2UwembAPp7y/syDP5Kaii21Y8o92OltwOLFldE1yYHe1UwzElnTlM1fuwEYcV8wtQZOHt4cOV/Wtunxurrb5JydvuRzXy3gUw6PJHofdZJd3JI7XdU17i8/cDZ7WCtaJLW7A6KDrFLrIJ00WVZY+ZqbV26m9GUUWaswkRCRL2j7+94qtvBc9beHKBbrZ7jpfrphSAqIQflhT42vusKm+lFTfRnpOKxvlImo4O7I7sH01gJULeFUFPm2PhxukPeOiCIDAFXjI7M/a6TntrbI3+FAPUCOMIy3N8lk49OJGLJan0CmgVGBsyX77Unrip1JpU7yL4e4FMtbXkVGT48mYy9aG9fKBz9FgPf3Pp3b/5AmjE+BvDC5FBM+SsVnqkcP04EoXYDbPL5ugVWvqGhzuhiZihT1DaFseoSW0kIolqjvUpyNGcGk84UJg/MolDh2rMCjV3RUaYmLpR2S15shoLrasTads3OZxOZdFQi0fukHUo3gtBN0urcxMvOQb0rFOr5JBtZn4/8Nt3VVGRtY3t75OMIZA9DP5L7u6OderoJgoGbx0fmnu1G4upcAGnzV5NMdQ6r53KGvm6BATibiuzvUDeEjOamB62HsgR8OzHU960igghF2tnIOqHukPvNk5mp2DylN7VO5fjREcTEecL4UYnEPRtK6WJiYYa7ATqmQBAuV+mFIMoZ3TWuY9/WOoXIKD5Mdr6hZL+J4RQMsGjiZjzW0sQL7TdZVCA/06AzCPz3AH2UiIaaA+tP8WqC0Ez6as9+fGqoT908mvSUIggHM8VmZjrd5XC/8A77ClNt1Z12AE6q97pKnWaC8EXiRSv8KvrC2K+gAnDcAThibrvGCugDUmoPXt2dUIsOuB0JyLZDqeX40RKEjwirRTeyHL7vPOZCEDWe7MsRX+Sk5uCD0NbRcwwRqwk1f8/bMRR30YrfYWDYY0KVE1XWS5+rQRDpOXhRc0juixx0k4ffmFWqv3ZidTKZTS9BlL6eadebxpvecefpReeWMrrLGGWEoy+6becYkLI9HFW+AddubYPzmZcqo9lJa01sUz1+NAThKwin/YwTQhA+h24dFNc4vmlDdevCazgejhaHFC/aZmsiwlZrQihCtVKCAPGJYONMmx16nIhOc7qCqFOt5kpvebk4std9gwOWO/da7KaTIPz6ADjE47ooNdj3w0o+E+11boJvubr2wWFnoCG7krupokWaw0Q61eNHQxAvmcF0l9dryEX1hSAqGcrTU1ezKimyzzuF13AKxa0LKW6fMLxcma0EkQlP67f3NxE4HsTLVAKSQiiPiZNgryYmdaNLhRz/lL09Xg/K8/U0ZyHqIPJBwPCcN0PJYuZtiXC8xc4N3YQ8jQTheMZT0m5tC/fs1/PbIymrYr8CaNKdfLexxuAmAj4HYM98Wacdrz6kPpYnh/qu07xHl0lRW3aqx0+RHwTwex4PdA0Pr1zvhpf6XQhCRQ1qgMduK7RPNqXCazh1XxN2Y9K2fsLJhx/+4Drc3JwMX05FuTo0r3vpzs2Z9H5sYl+DsuESPsOACkKYD5ugbaIPgiil4TFmPi0Vj6lbOK5PqWuDrpVdCuhMVdNIEL5WmvmulRNyxQ23UpkP3eq6/l4i9EPxuYfezKTxwHY81J7q8eN3grfj5be+nEG4jrjpKWA/X1DX2BLxvn/Kt6YomQ/hstRg35WlWltMAJMPtYsIxOPhl9oZNG2z6WhiXAzCwdZrdH7QK5MgVIwe9W9O/l0M/B5B43Op/lUvu71/qj/waSSISbGY3HApjDN7vnQXk4QXudNFEJpzBa2ZSXOA7rhQmurx43eCF4IoHpENsYPQ2DafMHjLgnj8/s0qtmpbOHoTAWfkuu/prMBuQrLFerLL9HL4RaGuyKFs4rqcucjL/KDKZL1kQdkdxdY72d5NTPn3ZMNbZCPNMu6xyvLq2DXVH7gQBDBdBKG+G03ojUmmowlT1G63AnxybpCVDK0x1eNHCMLrFONcriEIQuOwVtjmVnBWUHx7hOmUVLz3jmKPU9cc1tQejqqwDVe57BjU6v4NAOtArJysnmgJbPdMJvPuTmkODgL4mGXl7/UMQlUpxD7q7u4O6BzdTOaFw/HYI6WGlOYDL2ul7XXYzjiCCEevJkB5Dk88VYjeqSEI1/hgXvF1K1fk22DbEWkWZi9lDD7cKTLAVI8fIQg3Dbv/3hAEobppC3lRuF3jJbyGE0yaPNPZq7H2j1Y5VKWG+s508iJuCy9pJZhrbAHCMh94mv6WYNwMEwPNzebLTpFAK7zFNEbAlxNDfT/Ot0/jrKYOvV0diIpMeR4j6LoPQ32J6SIIr5GBba0uzlECOF6l9oqJ8qGBQY8wsEt+lc5MC1LxXjWeavq4Ba8sugbq4hw41eNHCKLy4dE4BGFPI5o7ZyjKG+Hu9FNA1X64nY/1tO17YycQZWPnZ59SVyI123BVZTNAZyaHeu/ykhuhQoLQrjjbOqLnEGXvuk8kOZpoyBWpoT4Vhlybr0E/WXlLqlTOUC2O7eM9H4Q2DLvDit6P97xTP3TXUZ2ukfrBQnlRU1NmwOqlXY7viZ93WssWOUbmzto0u3ZtaI1JsrRkV9PxMykfhJsfgx0jvwQjh9TljrIpqKc5VIuNjcxd1jRn088tmed8bc8112Oz1+SoKXOpxZRQMhSBZhvuOhHb4dLcmPJzzdU5pEiaFEGpq4/5ZyPDOCY1tGpYpzJtMLcyQkqocAumwQ/RxIH50wT8NzFfF4/HXrW+1+6dzB6zBioZuonVyeSjc+By8gR2Gsq1MgXp4n0BpR3XdG3M4aHCgexEwH+ZwPOGyTe75U8J2UNv5MxMW0xj78lpdl3NrJjy8WNLGCQE4X8ibpgdRPHdbX6GDWMJmaaKnZ/LXe2eAKhocp6cv3qjwVhsUjaY2SG5stYD8SIN6O5+W3NQeFFZsWerLz8IR1LUm764f2wOTnAIV607uPQb9kGd7fzLB+RwuaXv+hhW9ltBQEmsJ5GLbUeZ/c37DkJtohJjc3Cs17DdmvAhvuqXGgca/b9rsHFcPL5KnUt5ekKd0VOYcYtlx+jpsoYmJlFWV8jgs9ZdtMcoAlM7foQgPI2NUoUahiBUJ60rzqwdmXApM76Xt/275YvQAWUPu8GEi8C4MJ8gyE2mJlCfL+9j3XmBaqePa66ldk26ydoxHHR2Zd4ROdKgbM6Awo0qP4Hj2ruWfAamqYLBWWLy8+qW4HbL+vtv2TJpkrdl/wO8BTlUV4lbtn3ndmbumaRTXwQBFUp7eSre9xO3r6yjY/HfmhRQPjEFJzTA0bHMTVzR7zr5fvJOTCSqCqozC2tY8qcyY+NHeUmVaicoJj6DmI61RBj2HL12SsePEITvsWav0FAEYVutZ2PlE2FxrtMZZv/2ziLTFWMAlE060qyuoLrJ1Njt1eTuKUqoIgcO0F0E/J1dcbqw4qqMWzTXoh1S6MRdTaPpYds7VC7qo5MDq35rL59LmboSRAstv7mGxVZlHUJjjzjdoNJf8SwdxTQbz/8NdZ2XrrCer/jfQWRruCZPcsgS+HzaMD5XKp+xzy+X2jujPwDjS5Pr0e0tQfOLpdKcKn2RQdfYwrN7Jr/swqur5xM2c9LgB4sCRYYTt+p8+HtM5fjxe4Zg14nf+vYzCI+7Kp9DYWqLNxRBaG2cOTzdckU4wa4/jMuSg6fE7iqCavMWPASQyhCXfzIM3MDjTZcMD9/xrv3d2QTuLcHzwHSxU85hp4NKvwSRnQA6Ij1EpBLfZPs18dHzfTCxVBczX78LyFZ60GBcHI/H/svWpxI+IM4TvkNKR0fslFevkaGriHBCETmoBvnbQeS7sBnE30Ea19qwyPXJ+KktCVKGCKcnBvsKlxiq8Uk77AIU5r9hBJanhlattV8uaO2MHEhMygdoUu5lP7sP1XaHixbWbjmF4dB2farGj98JvlKCKLpUATyHDC/U5d2oxpiYChkNRRAOMWTyM96DLcHtInYzhheQHbLBZSdELzI19t/8a0fA/ALIeCq7G1GpIlVKRMrmty7cLsreeiKsB+PjhfnbwRu8HIJwMMmUnOh0CVUsWBbSOObSXx5mS/M4UZSQDCK9eHBw9ZtOeshl+Fs5ibwmCivfjjhA/5NNAQp0gLGvBbcRMF0LYpVTeqfc+25NDvadbn+XAxGN2d65mVQWMtBLJfvkI0+Dl7FnLVMiTafC8m2o3a1KW1siJWoWL8M8ITkQU5nUPD8aj+l8XV9h0fOVpmL8TDVBFN2YnOhszreJVMbFgeambc4tZw7yrKgqF2woglDYOCgJ7CG8hhO2moPmbFGv1w2zE/A271zN4LO0K9uSSuUXyDROMwMcJkYhTaTTjZ5yCEK93sGOXvIAOtQZ+bzfBPBbu8rDZjBzkltkTYVd87abrlTnPj6wG2HiLwbMwLMmmepswDdBEHCBCfyDzrznoK4MCD8ce3/upW4Jhir5hidW33yXbdfiUaQiBzpJZzp0E6C5JZir4m2RpJNf6/Ez1QTh8A1Zuu7/koybXmr9e8MRhENoAk83NpzAdjBdeQmvURCZnejmbLoABOVnsL0Hxf4JzP8CEzcp04bXhPDlEoRqT1s4chGBvm/zjSh5XnJw15K9m5ivYmZ11hP00C8nk41j1Vye4OgHE/3VAD5c+h1qR4ZzkoOxJ4rGgg8Tk7oEQBmOU9C4nplPKk1O/AIZOC8xEHvci1+LB4xKFmltXbYDNY1/hwAVb8xyWcCxWjZfeCbddLnOpOmxPbobSCVDa3iRW8vxM9UEkQ3r0xn5AjFdr9NLuWZuLzjWqkzDEYT+rjVeRJq7k8nYH8sEsjikcZkZw9T5QrC5aRmDe0B0ABg759qktqKvENMgGCtHR3d83LoS1aSd1JqAKiGI3HnJvdac0cqUw8xLnHJn5PFU7Qs0ZXqy/QKpw8t8NNoMgLdUBjPAuLklaK4pdahaSj/KFDSWoROY+VwbdusBfopBK8ZH5v4yj1ulBJHL2Edt4SXzCaYi96Ms+lIhox9j4Od778GPl8oLXeaYc62WzRA4SgvAOI2BAwHsbiEyhckfwPh5Zjx9r5fbSm4v1NzIKxlaw02e9fdajJ9pIIhsl9TZj8GkrnEr06o1KrOvRaUf/GpVtuEIolZAidzGRUCb6cwhGGLjoiA9EwSKERCCkFEx6xEQgpj1Q0AAcEBACEKGxqxHQAhi1g8BAUAIQsaAIKBHQAhCRoYgoEdAdhAyMmY9AkIQs34ICACyg5AxIAjIDkLGgCDgBwHZQfhBS8o2JAKyg2hItUqnqoCAEEQVQBQRMxsBIYiZrT9pfe0QEIKoHbYieYYgIAQxQxQlzZxyBIQgphxyeWG9ISAEUW8akfbUCwJCEPWiCWmHICAICAJ1hoAQRJ0pRJojCAgCgkC9ICAEUS+akHYIAoKAIFBnCAhB1JlCpDmCgCAgCNQLAkIQ9aIJaYcgIAgIAnWGgBBEnSlEmiMICAKCQL0gIARRL5qQdggCgoAgUGcICEHUmUKkOYKAICAI1AsCQhD1oglphyAgCAgCdYaAEESdKUSaIwgIAoJAvSAgBFEvmpB2CAKCgCBQZwgIQdSZQqQ5goAgIAjUCwJCEPWiCWmHICAICAJ1hoAQRJ0pRJojCAgCgkC9ICAEUS+akHYIAoKAIFBnCAhB1JlCpDmCgCAgCNQLAkIQ9aIJaYcgIAgIAnWGgBBEnSlEmiMICAKCQL0gIARRL5qQdggCgoAgUGcICEHUmUKkOYKAICAI1AsCQhD1ook6ase8eWc1tbS8cygbfDqALgC7AwjkmriegGdAuG20hR9Y92jsnUqaPn/+iTsHmoInEFEPA58GsFtO3giAlwF62CTzxuHB2PMA2O+7qiW/s3PRHmkODgL4WL4NDByfGup7wEub/NQvKst4rKWJF/b3x94LdUU72cSVAFoBBAGs/0BHjzHhur1351QsFsvY29PaunS3QFOmh8E9IDoAjJ1zZdIA3gAwDFCsJWiu6e+P/cVLf6TM7EBACGJ26NlTL9vbI9vAwJkg+gaAD3uoNMLAT3m86evDw3e866F8oUhr67IdqGn8OwT8E4BtXOpmAB7kYOCMVP+ql728p9ry/Uzwuvb5qa8jiMz4eDTQ0vQ1MC60kLX1VWMMfD411Pdo/o8Hd0d2b8oYP2LmxTkycYNuM4i/gzSuTSZjiqDlmeUICEHM8gEwaTJJ020MHFkGJM+yQdHUQK9a5bs+oVCknQ26C8CeroUnF9jAwDLrJKirXwv5fib4qhMEuJ9hxAn8NQdyUJurxNgcHJvf0bW3Rz6OAN0H4ECfGCtZ/ekglj7ZH3vLf12p0UgICEE0kjbL7EsotOijbAQf0k4mhLfBGADoTwzegYAwgL00r3rDYGNhPL7qN6Wa0d4ZmQ8mZZbZ1VYuA8JLAOJgGiXwvgy0AdjeVm4DDOPo5MCq3+reUyv500sQ2AygBUBzts+Ml2Hgiez/TRwCwj4AlieH+q5Tf5p3RGTHli0U05D9ZoD/EzCenRDjrE8Gbh4fmXv2unU3jpc5rKRaAyAgBNEASqykC93dkQ+NprOr+c/Z5DzLhHNSg30Ju+2/tTNyIDHdRBMTuPV51gymjxnuX/26rk2t3Yv2MtLBNQA+afk9w4x7zQB/ee1ATBFE4cm2LUPfBONLk0wkzPfBxFK7GaSW8qeZIPKYjBDwtb/eg6+znDVQOLx430zG2JRMxv5XFWzviCgz4Q2W3cYIES4YfX/uLZoJn9q7lhwE07zNppd3yeSjE4lYspLxJXVnNgJCEDNbfxW3vq0jeg4RrrWZLvrG5vBZpQ6g1UF287abriyyiRN+lBzsu0h3oBwKR7/FwDet5ADmy/faE1fpDldz5ai9M3IxmP7V0sYxgBcmh2K/tAJQS/l1QRDMlyTjse+VOqzv7j59zmj6vRhAx1mwKewunAZMW/eSfZA2f0XA/vkyDFyTGuq7oOJBJgJmLAJCEDNWdZU3XJkimrfgoQ9uCoUsk0LKHBs/bu3ae952e0P2ttO279zOzD2Wsm8YnDkqHr/7d9b66iaN0ZRRB6iF3YNXM0Z2JzFO94Fw2FaZdGdLcP0X+vv71U0c1Fp+HRDESxmDD7fvsuw6UjhQU2bAMtFvNMFHDg/F1rnpsz0cPQ/ILhZyDw8H0HTM0NCdG93qyu+NiYAQRGPq1VOv2sLRIwj494JtG9CuzEsJC4V6PskG/9p6psCEr6YG+75vrdfWETmeiFZbdgGeJy4lJ9TZs4SZf5Gvz8DveTzQNTy8Ul3zRK3l1wFBxFqCG07OE6KTTjTtfM9kPno4HhtyGxShUM/BbPA9DLwH4CkDeJozfI3caHJDrnF/F4JoXN269izUEf0uEy6tZMXY3d0dHE3veieAiEXOgy3B7SL9/bdsyf+tLRy9moDzLWXWIIPFXiefcHjxfhkYiswyIPoNAXFO8915u3ut5U83QTDhstRgn/J/KPl0dBy/vUlz1CWAQyxYy60kN+Dkdy0CQhCzdGB8+qhTt/vQ+6NqtVi41lquzbm9o+dsEP/EAuUfgpTuHBxc/ab6m/ZdHic8L+qptXzVhmknCB9OeZqzGNWFTSCsMky6dcuWHdfJ7SQvI0vKCEHM0jEwr3vpLs3p9K8BUt7LEw/TOcl4r7r94utp7YiEDSJ1YLxdruIk85F2cmVanIr3KpNTxU+t5dcBQXg2E6m25nwglNnvIw7gjoDwNEy6lZvoV6n+Va+U46VeseJEQN0jIARR9yqqTQMrXRFbW9UajswzQI8A2ElHELnrpwMA9s39nmGmBal4r7ryWvFTa/l1QBC+zmtUe9s6ehYR8S0aPxId3htAWM1s3LL3HpnflLhRVrGuRMDMQkAIYmbpq2qtrTFBqKBJhThFbgRSaadqLX8mEoRqcwl/lVKQbyLg/2XGm67xGz6lUj1K/fpDQAii/nQyJS2qMUFMMonUegKvtfyZShA0YUlhAAAG00lEQVS5gUStnZFPENPFBJzkcUehqvoKnzIlg1ZeMuUICEFMOeT18cIaE8Qkk4i6gZRGoJ+AvWthYqq1/BlOEIUBF4lEAq+tN/anDJ/AhB4C/tYliF9Jz/j6GMnSiloiIARRS3TrWHY1D6nt/hQE/BkmH5lIxJ52nFzlkHqSGc46VDTk7fsMwsvQy3pdm1s+CzZPBWORJj6WEuPqhe3lXVJmZiIgBDEz9VZxq3UhGYjxs0S8T4Xf9vW0dUYvJMYP8pXsTmy6u/le7/VbG9IWjv6cgKMAfoVBTwPGbamhVcO1lq/aoCNUP/kgQqHIQTDoEQZ2seCkzScxVQRhxVbtLl5/A6eD6MfW8OsEPPKXbVtOfObh25XznDyzDAEhiFmmcGt3p9JRLtQR/SkT/nHr+7nIma6UKsLhk3fKYHwNQCpRTvZhplNS8d471P9rLV8X7oN97ILaOnqOIWLlwJZPvFSTHURbeEkrceZYImpn4EAVziQ11Pd1j8Oc2sLRfyXgq5byk3xaPMqRYg2CgBBEgyiynG7oQm0w01I//gmaA2LoQm3YQ2UA2EAmHZ5I9GZDT7s9uRwPytdih1zZN9mgw/M5KGotX0sQPpz9ij29s+nxqr6DaOuMfoUYKqBfjkUL2eg8ZYprC0cXEHC/EITbiJwdvwtBzA49a3tZo2B92ol/fldk34BJynkr7wuhJkhPOQdUOI+x8V1X2HYgk0J11Fq+Q0gRT+FCnBzXakEQGqfFdw02jovHV6l0qa5PEcEATxi8ZUE8fr/KSSHPLENACGKWKdzeXV24byY8NN7CJ5cT7ptAK5qD68/XBZXThuMmviQ5GPu3Ep681NYRiRLRzRbb+Jhup1Nr+ZrJU+VZODUx2He30zBqb4/8FQLUC+AIe5laEISO9AE8R2b62ERitfKYdnx0Ib+JcWUi3nfZLP9MZm33hSBmreonOu6cMIhfIAPnJQZij9sn746OyAEZohUEHGqDr+REFAqduKtpND1MwN9Z6mUYuMEcG/+GPcS4atuWNP0zAZd7SRhUa/k6cxqAjWBevtee6LV6IKtD31ffwGEG4RqA/kY3zGpBEOo9oc7oKcxQXtSF8w6AXzAZ539kTzym8ZSmUFfkUDaNnwK8n6Wt2tDts/yTmVXdF4KYVerWd7ZKKUc3gHhBcjC2thSkJVKCphn4HYFUytIMiD8FxkHWGzU5uSVJqJby9aauQm9VvuzHCPQuwB8GoQuMnS1YPAfQEMBn5f9WK4Job49sAwMrQbSwSBeEt4nxFIOy2fuyqV0Jn7W1Vf2kdOC2u5Ovp8EREIJocAV77d5EPKPAnQB1eq2ztRy/ACNwslOeaLu89q4ln4HJd9lWq66vVddnYdAJ+YNppwq1lJ/D6S7rbSq3hufbDeYF1gPkWhGEas/B3ZHdg2msBKjbrX2a39MEfHt0ZO73JOprGeg1UBUhiAZSZqVdmVh50nkgKJvzXA/yNhPw/eYg/6i/P+bplkxeZmvrsh0CwfErmHCuizevqpImxopMuulyr/GBainfh+xsvu1MEy9/sj/2lv0Mo5YEoUBT+uQAfTOXh2MbD/pURZ4lgy/QmRY91pdiDYSAEEQDKbNaXZk4lzCOAVglAVLpSHfP2bMzAN4CkAAo1hI01/glBnsb588/cedgc9MyBveA6ACLqWMEzC984KP2QDoQ+NmTA6teK6d/tZR/cNeSvQMmn01g5YW8T84cplKgvgLCIyb4x8ODsefzZzhTTRB5vGz6VH4ke1pIeTPArwLGozDMm/baDc9JNNdyRlpj1hGCaEy9Sq8EAUFAEKgYASGIiiEUAYKAICAINCYCQhCNqVfplSAgCAgCFSMgBFExhCJAEBAEBIHGREAIojH1Kr0SBAQBQaBiBIQgKoZQBAgCgoAg0JgICEE0pl6lV4KAICAIVIyAEETFEIoAQUAQEAQaEwEhiMbUq/RKEBAEBIGKERCCqBhCESAICAKCQGMiIATRmHqVXgkCgoAgUDECQhAVQygCBAFBQBBoTASEIBpTr9IrQUAQEAQqRkAIomIIRYAgIAgIAo2JgBBEY+pVeiUICAKCQMUICEFUDKEIEAQEAUGgMREQgmhMvUqvBAFBQBCoGAEhiIohFAGCgCAgCDQmAkIQjalX6ZUgIAgIAhUjIARRMYQiQBAQBASBxkRACKIx9Sq9EgQEAUGgYgSEICqGUAQIAoKAINCYCAhBNKZepVeCgCAgCFSMgBBExRCKAEFAEBAEGhMBIYjG1Kv0ShAQBASBihEQgqgYQhEgCAgCgkBjIiAE0Zh6lV4JAoKAIFAxAkIQFUMoAgQBQUAQaEwE/j/HPA4l32L2BwAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-8"><g><path d="M 421 51 L 474.63 51" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 479.88 51 L 472.88 54.5 L 474.63 51 L 472.88 47.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-56"><g><path d="M 371 101 L 371 144.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 149.88 L 367.5 142.88 L 371 144.63 L 374.5 142.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-3"><g><rect x="321" y="1" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">MP1 hands off control to OS</div></div></div></foreignObject><image x="322" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXt8XFW1/3edmTStKK2iQHmo+LgqPi7XSptkkhIpTwUqrZO0QKHIVUBquSCICgreK+Bb4YIoyqO8mmRsecgFKVhjk0kmhf5EfhXRq4IChVb4tVBoHjNz1q97HunJmX1mzpnnmWSdv/LJ7Mfa37XP/p699lprE+QRBAQBQUAQEAQ0CJCgIggIAoKAICAI6BAQgpB5IQgIAoKAIKBFQAhCJoYgIAgIAoKAEITMAUFAEBAEBAH3CMgOwj1WUlIQEAQEgSmFgBDElFK3DFYQEAQEAfcICEG4x0pKCgKCgCAwpRAQgphS6pbBCgKCgCDgHoG6JIjmto5bwThDN0xmOi0W7b7TPQTOJZvmd36ATP41gNm2UttN8NFD/ZFN9trt7eE3jsbpXhCOLEGGrQy8AsZGw8BDidH4gxs33v1yCe05Vg2FlrSZZN4PYG8QVg329SyvRD/ZNtvaFs1OcLAPwLsz/3PEspJyTNW257WG5xighwG8OYPBX4OUaOvrW/vCVMXE7bhDofD7TIP+AyaOA+FgAIFM3a0AngPoisH+bvUupZ7m5vAMGDgZhPMA+jCAN2V+GgbwIkDrG4OjF/X23rPDrQzVLjfpCAJApDG47ZTe3t5EqWA2hzrPBvFPNO1UmiDsXSaIaE2c6OJHN3Q9W+q4svXnHBWeOW0E9wDUnvqfEES5oPVtO0IQ3lUzZ87nGhpn7LiEgcsBBB1aSDLTibFo94Pq98zHZQ+ADzn1SMDDr72h8eQn1t3+unepqlNj0hEEA88GkWzv71/zt1IgbG9fPn008XoEoBN8QBBZEbYT8bKBvsj/lDI2VVdN+mkzXrkO4M+NtyUEUSqsvq8vBOFdRU2hjnOIcJ1lx5DTCAEvweSjBwYij7e0nLyvaTSsI+Bf8/VGjJ8PRHs+612i6tWYdAShoCuHmSmPeUl1Ue0dhHVGbAPxiYN9kY0lTBNqbgtfBKarJ0x6IYgSIK2PqkIQ3vQ0p33pW6clEr8G6CO2mspCsQWgBMBqV/GaGQ8cOTS0equj5YHwMpheSe/WeSYDV8b6en7oTaLqlp6UBAHQXY3BrWeUYmbKY17yThCM9Y0NvLC3N/KaG/W2t39q1thY40GmwSsJONO+rWUgZo7FTyjyXIKaWzs+D+C7AGZMkEcIwo166rqMEIQ39WnwAjPu4ETDeUNDd76qa01zRroNhNMH+3rWqe9XbxLUtvRkIYidABoBTMvA+XTS4AUbN0SeLgbe1OFSAGsAOj5TPw7AsHxte9tBeCQIq8zN85e0wDTvBrCv5f9JZqyIRXt05yOOQ87YUi9l4DLtdlkIopjpUld1hCC8qaupteNEAu7L1ipkwtY5qRDjqoFoz6XeevZH6UlBEOqLmhhjIMzPwJokotMG+rq7ioG5paXzQ2ykvJeyi7JaoNVBbtbzo2oEoeRvaetYzIzbJ3zxeySd5ubwWxCgnwL4tCMmQhDFTJe6qiME4U1ddoIA8FuDR06MRu9TH6U5j44gmGlxLNq91lvP/ig9KQgCjPUAHgSlzCaZp3gzU3Nrx3lA6lBKPduZ8VUiXFUrgtDsaJRcL7BBC2Ibuv9YYCpRc2vnJwG+GcDbbGXHLLsu8WLyxztZUSmEILzBm0MQBT7MtAQBnBTr7/mlt579UXryEITBXwHTPZaYhaLMTLmLMT9oMq4yiNQ2syY7CDVVdGciXGDizWsLH2ow3ai87mwmpSQIP2PGi7snwBV7OFXiIPzxWlZOCiEIb9gKQXjDyxelcw6BGOvHGgKd0xKJWyxuqUWZmTTmpRUmOGYLLqqqiUmBrtnq5vXW0gSkZXW3E6AVg/3dtze1dVxMjG/7iSBSciNwGkwsBdH7LWY1FYy0noGbD57Nv4lEIskiJiM1tS95BxLJRQboOAYOBbC/hTxVm4o0n2Tm7mQ8cY8bRwDNV+OE4LO5c0/ex5gWPINApwD4F0vA1FaA/w+DbogPz/rVpk03qrMuz0+q/4RxPGCeCaK5YOyTaeQ5EO43TP5RNBr5k/pfsQQRDocDz23FoTCNswA+BsA7LbrZCfA/AOMRBq0+eHbysSL143ns2QqHz19ycJCTS8DUacNYBaU9A9A6GOZNB+2HJ/PJlue9ySebCpid40H4ugkOnTQ7COUlNBo3TrUGtjFwS6y/5ywvngM289I2MmlB0jAbfUkQhEtifT3f0U1MzURXB9v3JAPGBdlgu6a2ji/5hSBmBPGnkQRdSYDysHIKRsoMlZ8i0zh9YKD7UZcvJbXM72hjk78P0Mdc1lHFEsS4IZlouMzJY0UVciKIXbve+tK06TvOB+HrFlJw6v6fAH1msL9bxbi48nRJLdovUMfune01GvOhtZ8EmH4I07zcDOBQr5HUzW3hI8CkTK6OQV8TB8VPkYHzBjZEfuN2LB50MqFoZpf8AwBH5YtTsFTaTAaf7ySbEMRETUwqghgxjYMnpMYg/AUJbh8cjDzvZgLqzEtIYrHmpfLHDiKPiWniRNe/sH4hCBA+A+YvAzTPjZ4yZXYy0/JCh3+pgMA37LgKjAtcLiA5IjDwe8NMLBwYWPt3nXw6gkAyeQICgW8AUAu42yfB4Eti/RHlG5+XJFSfIwm6loDT3Y5LBWYlwats5tJ8qTaoKRTuIKJbclyiC48oScA3R4dnXVnszqhAF+psbRnAiriyKSwKS5UukSDgG6PDs75tl00IYhITBLBXwhb9nGTmRbFoZNxNLd8M0pmXBvt7rtdsy6tPEPavfeB1k/m4oWikP88O4hZm3HzwAbxGt632CUEo77OdFrOIGs4wCI+D6f+CWLkvh8A4xL4QMvBnBI1jY71dzzjoVR8QqAqngpb4T4CxOVVX9cM8D6D36HYwanGd1rDtXF1sjYYgngf4CYub9MQxAQEGf4yAD2r6Kmh+0EbB7wFgGwPrCfQqgQ/h9PnT+AJKhPuYU95+szJVHAmiuS08F0zqcNXqYq2qKXPfAED/TEOZ20+m7TFmPj0WjXS7XbldlnPWK5Bg4A8EegxAEsQfBuMwDcElCXTj6PDM860koWKQRhONlwB4S1oWfgeAYy1ybWHgIQJZzIHmMGCkYooY3EDp8gdY6jwEkPXjYpiD9KM889YlDJUvNql2ECoQzX6Y68XMpDMvDQx0b641QWRyJj2w247aMj4lPO6OdFPJJwRhFW0niP8LCVw3OBhRtuPxJ21KwCq7iUh9CQ709+w5aLfWyU1Mp5azv5lsnvP2A7BeR5rz5p26t9GQ+ArAF9kW75S5Uc0HO5YFEjTuJODKaUG+3h4oqezmDaZ5EwNHT2iT6LrBvu6VTruIlraO05hxq40wtwB81kGz8bB1XKnYlzfs6GTGtRYnC2t3WoLIpJq5E6BF2cKKkMkwlgxu6HrcLluKtKZvPx1Eavdj+aLngbHp+MSmRyLpCOIyPE2h8ElEpFzYrYGeCYC+Z8aDV9vNgSnLQBArwPQ1224jyeAvxfojykSlfeSQugwKq3YTukPqbKRyTooMlwupxrx0f2Nwr3Bv760jtSYIfS6Y4t14s/ryGUEUTCHS0rLoHWwEH0D6cDn9OLsdUnNb57VgXmGZn/9AkhcMDkb+UmDOUlNb+Axi+rl1EXZK4ZKHILbBME4e3NA14NSfIv/GEYpMIIk8c9Yhz8+TSPLCfOPKsxvQEoQm1YwjQVrH1hQKdxLRbRb36TEGPhnr73mkHOuEw/hd5Shrnr/k32DyLwB+l0WWLQYnj4lG1/xBJ58QRDm0VuU28hGEJsmeKzNTjnmJ6ZzBaLcKLNN5flTFxJQ6hNyC5SD6b9vX0rDJvHAoGlFpm4t+fEQQrk0Rtl2eGvvTZjAxf6h37XNWIBxy6KxQJkM3gLW2nvLmJOIPWs9FmHBprK9HxcNMeBwIwnW0e3Nr+DiA7rUsqjvIpGN0h/AtbZ1LmPkOC3G9arBxQjTapVKo5300i7cqryeIUOfxRKzMS+mU1i4DM3UxO+WMJNaMP0mE5QN9PQqTgs+E9PbZ0oQfDfb1XKjbsQlBFITUfwXyEYSSNidmoMCWXdWxLZYTgtCquYNQBLcrMfz2IHAUw/wCkHL1nPAos1l8eNbZpR7++Ycg3JshWlo6D2eDVU6brA1dS9YqcpwC9HkGPgrgMBCSAU4e6yXLb848c4g01xKEy52rUmxr6+J3JRDoJaTuGMisx7nBVe3t7cHRxL53AQjvmRDud5IOAZd6grClmAB4KICG4/v779peaEVoauv4KjGUiUzdkfA4g+8tR6CYPsMyP6gcSewmyTwy5u4s8+hKCKKQtn34eyGC0GyPN5vxwFEq06JuOLkvOI+bl1T5kgmivBg+SWbiE04eNV668gtBePnC1HiZOH5te8FCV7YkgvBwL4l2t6LxUGtuDh+IIPWCoQ7S00Ti8YKsplDnqUSpHUj20RKEhogr7ZVUUF1aIvU4/tT7HAq3GkS/2p2wcq9Mp45mMCGIgmrxX4FCBKH50hgDeOFgf0RNipwnhwAs5iU/EYRyt6Qkf9qFDd2V0vxCEF5y1ejcEAtFlLsCQ1OoFIJwMke5+0BJ+bjmpGfQLGxu062Md9vSEj4MBj3MwFsz/9QShNYxIk1JT4FwMwcCkVhvl/LMcRWzUawOrPWa7GYv9+lmJnSvMz+yQ0yREEQ5NFflNgoRhBLHi5nJulDqsjX6YAexA4wrYfL1HrbSBbXiG4LwkKumwgRBodCS2WyYRzLjVABHTDj78WBiYsKFbnP9u83f0xTqXETEa7KKVV5FHA/Md9oZaz+G5i3djxqSGygd0a0eRzdXB2+pPc2mXIWxjoHV8em8oZyeSjrZc9PN8BNjweCCTb2rXyo42S0FtKY6B90KQXhB1idl3RCEWzOT5uXMubK0ygSRuYiEnwPjEQOBX46M7P37Us8bdKrzCUEU9Pu3yl4OglA635UwDggQHwrwe5npowQcDkD5vDtHcXshCA+k55og7LEwLg+OrfgVSgtiLZtOD//KNZy+dTB7/7LTKpDM7C5WJSjQVc6rcbMd5szXIsafbcvt7lAIwieLvhcx3BCEzsykc7ezL/46m27JBFHCRPaCi9eyU4ggaF5b+APEdBERTrIF5bmHbYoRhALGQzoPO46bieiyA/c37y9XXqaKEoTDOyoE4f718E1JNwShMzMxcG2sv+d860AKmZdUWSGI8qleswOo6A5CBaMFkuYPifApF1/B1oFuSwUJW1OkT0GCyAKidhMNM3YcR4CKMlYpUQrky0rVTDJw2/Qgr3R7m2K+mVZRgnBwKhCCKN+7X7WW3BJErplpos3SjXlJCKK8aq0mQTgERukGpLKq/pVA/SZTb7zBeFTZtd2aIdyaiJyQdFu/HAukFxOTs7zLp48kdrUSYTEYJwA4KN8sIdAN04JbV5ZyBbBqvxzjFxOTt/d50qXasA6/kJnJtjNwTA8uOwhvkypf6WoRRMYL5x6A1E2A1ud5Bu4AjF/Fg7R5U+/ql508cXxHEDmxCc4HzE460LjU5kvW50rxaa8g8+O7U35/JudgP92CqyjsQp2V75B6+XRbzjaoXFsD0Z7P2mWQHUQhrfjwd7c7CCW6JjfTuJnJ9kXieMGQEET5JkG1CEIXcasuSUKCL3TrCeY3gnAbJJhPW5pYgpIJwtqfymVFwfj1RDjN+n8vXl1O8je1dhy125lApUNP3T1PwEsw+eiBgYjKDeX6mZfryQUn+YQgXMPqn4JeCEJjZkpFhAaD8fhonO4F4cj0yJwjUoUgyqf7ahCEzo1R3VtujsVPcHMBkBptKHTSm0yarlJNKFfXzBTR37jn1kTkhKLb+rqFDbaYnUKayiS6U/cjZ72ScggiE3Gt0n8sUJYdBvZl5lOcMgfb+0ztKJLJB8Apz7C82BWS1/q7BMp5Qas8ZSe1iUlBpDEzpdJkgzBsuTgl7+1zQhDlmWyqlWoQhENkco6DQr5RaVK/O97Z7XaBL5Ug1FdMU2vHTQScOd6WBw+5NHHutwpgdbNd9skhCO148lxOpRuX292Xl5mlTbXhYfzp9aA9OBbf9wYm/LuFvBzvjZEdhBcN+aSslx2EEtluZlKpHUwDr1huU8t7f7UQRPkUXw2C0EXKOtmYdSNLp8h+5XZmVtdX7nlq7MWUmsu5if1cJ6vTJqpzCJRrCXVcyYSvZgfvZQfmQDDaRIdeZ1ZlkvU5p1cXgvCqIR+U90oQmqA5dYesejL3yOZPeCYEUT6lV4MgPnLMsr3euGv0bmsKbReXC6UGmb47gH4AhjqwnBAcxow7YtEedYPbhPQSVdxBpOUzsBpECy1aUZcEnZovpXZzc/g9CKQyxu5JlZ5uwCEXU7iZjVS+or0tDHnj2PDMFYWCNjVZY/NebuVldjmk+3Z1u6CDV5tKyX7c4Iau3+nkEILwoh2flPVKEPoskOODKZgOXAiifIqvBkEoaVtaO65g4PKJkvNjJuGMob7Ik/YRZa4m/SSYr9Zl0E2VdzBnVJMgUiSmv+lNe9lSOtBNXUhEN9luOctCoM/FpN9Fpe41NwN88cYNkaftGCryogCtzOC+5zIf5nthYqlb54BCs83pwiCn+8PTchlnMvhb9guDQPyVwb7I95w82YQgCmnDh797JYjUSxXqPBvEP8kZjou0zEIQ5ZsE1SKIzBfzrwG83SZ9EoxnmRBT13KmdglsfhSUSqtuvaEsCXWlKfBuyz0NvzV45MRo9L6d1jarTRApAmzrWMyM2zVXaaob7GIMehrgt4Ewv0DkuKMXUyi0+IMmBVRqdev1map7txiqsgUvgipidpXjylF1pFNwRyQEUYR2al2lGILQmJkyH4W4Jdbfc1a+rJRCEOXTeLUIQkmcSW6nrub0eqn9Tia6FEDUYH7IkvlUezlRLQgidWCdvvXuxxqScFKYOq/oYuZDLNfX5nVzzbiW3qm5l9rNpNhCBi8b2BBZ76awxzLU0tp5rmZX4KYZdW/19+PDs75W0Fxmjz0pcChe6lxwI3w1y0x6L6YsmA5mpoLmJVVfCKJ8U7KaBJExxxwBxk8czUYTh6YWjtuThnG5SjanOezWzpdSF4VS6qu7uolJeTY1FdBSivTijeZt04Zp7R737sLBdk3tS95JieQtALW5TFeSIKI1caKLK5G0zzrO9F3ldKP6HnAjmzpsD7DxhWi06zE3s1p2EG5Q8lmZYnYQWjOTC/OSEER5lV9tglDSW/IILVPWGQD7WxaTrQQ8wczdyXjiHlucRM7tY0TUPbpr5jLrl2cpC7ySr9T66pzh2RepiRjnAZhvGd8wmJ9iwh3mWGKVGlspqTZSea3MZJiAxQAp09t+ltmRTVfSnYwHur2kIC/DDKO5rZ3vDTKfwYRjkU5lnt01KnPgn4nxUIJo1cb+7v/1coeFEEQZtCNNCAKCgCAgCEw+BOrSxDT51CAjEgQEAUHAfwgIQfhPJyKRICAICAK+QEAIwhdqECEEAUFAEPAfAkIQ/tOJSCQICAKCgC8QEILwhRpECEFAEBAE/IeAEIT/dCISCQKCgCDgCwSEIHyhBhFCEBAEBAH/ISAE4T+diESCgCAgCPgCASEIX6hBhBAEBAFBwH8ICEH4TycikSAgCAgCvkBACMIXahAhBAFBQBDwHwJCEP7TiUgkCAgCgoAvEBCC8IUaRAhBQBAQBPyHgBCE/3QiEgkCgoAg4AsEhCB8oQYRQhAQBAQB/yEgBOE/nYhEgoAgIAj4AgEhCF+oQYQQBAQBQcB/CAhB+E8nIpEgIAgIAr5AQAjCF2oQIQQBQUAQ8B8CQhD+04lIJAgIAoKALxAQgvCFGkSIWiHQ3NZxKxhnjPdPWDXY17O8VvJMpn7nzPlcw/Tpr8xhQpjBR4DwTjD2sWD9MphfBIxHGHz/9OAb+nt7bx0pNwbhcDjwjxdwWIBpEROOzZED2MnAC2BsNAw8lBwLPDw0tHprueWox/aEIOpRazWSubk5PAMGzgJhbLA/cmONxChrt9UkiMmIn04ZbW2LZsc5+EUCzgIwy4PChgG6K2mYV27cEHnaQz1t0T1409cBvM1De0kAMYbxxVh/10YA7KHupCoqBDGp1FmZwagvsOdfNE5gNr8F0PuZcEmsr+c7lemtuq1WgyAmM35WbaUW5CBWgOlrAN5UgiYTDPx4epAv7e2NvFZMOy0tnYezYd6m5msx9TN1kiDcYY41rBwauvPVEtqp26pCEHWruuoJ3tzWcQMY52R7FILwhv1kxi+LxOHt4f0bEnQbA0fnQScBYAtACYDV2rM/gBl5ym9mgzpiG7r/6AXxptaOowi4E8C+XurlKfsIktw5OBj5f2Vqr26aEYKoG1XVTlD7V7YQhDddTGb8FBItLYveYRrBewn4Vw0yz4P4GsMM3BmNdr1gN9fMaV/61oZk4gRiXOLwtb/FYGNhNNr1mBvUlSxsBB8AcKit/I7dZw+3Arx6LBD826be1S9nZWlvXz59V2L47UHgKIb5BZ0cDNwSH5519qZNN8bdyDFZyghBTBZNVnAck3mBq4aJaTLjN+eo8MzGEYpodg7bibBydNesbjeLqjLDPfeC2n3QTQAOsE3nJ8lMfGJgYO3fC0xzam7r+AEY/2Etx4w7ONFwnkszETWFwicS0a0A3mxpZ4yZlsai3Wsr+Kr5rmkhCN+pxH8CTeYFTgii+PmmvJSmzXjlOoA/N7EVHuJgYEmst+sZr60rU1UwgdUAtVvrEvDw6HQOb3ok8opTm3Pnhw8JmPRrAIdkyxBR9+iumcvckJS13Zb54SPZpLsB7L3n//wgklg8OBgZ9jquei0vBFGvmqui3EIQpYE9WfFrCoVPIqKuiecI3JsIYumjvZEXi0XNYVeSZPCXYv2RHzi12xTqPJ6IfwkgkCmzjUxaMDDQvbkIWXS7ke0m+Oih/simItqryypCEHWptuoKPVkXOIWi7CCKm0tqEZ82ggcAarG0sA2Gcdzghq7fFdfqnlqh0OIPmhRYZzM3PZ00eIGTC2xTW8eXiPFtyxf/UAANx/f337W9GHlaWsLNbNCvrLsIZlo8lcxMU54g5s49eZ9AQ/BTRNTJwEcA7JeZTGob+czuF2CdSeaNQ30R5Unh2R862z4InwHR+yyBQsqj4+/E1JeEeVNi5M1DbrfBys88wcE+AO9OycpY39jAC7MugYfPX3JwwOSzCbwIwDszX3jKt1t91W1gwvUH78+xSCSi/pfztLeH3zgap3tBONLVi2ULLssj3+st8zva2MRVAOYBCAJQAUnrC8mk5FDjCnJyCZg6AfyLxZVyXFcwzJsO2g9POo3NPp5KEESp+DlhXonxu9KvplBza/g4gO4FMC37c7mdF3IX/FRPKwb7e67Xya0p/1uDR06MRu/bWcw41QH6tETi1yA6EEwp0xbBvGKgP3J7Me3VY50pSxDz5p26NzXE/4uAzxZwtVN6TQLcx8HAmW7tqulF2vwGAcsyC2Gh+fE8gIsPms09hRY3pwVYdTCSoCsJ+HzhPvkpMo3TBwa6H7ULVuoCp5MvGY93BBobvgzGBRYTgLXrMQY+GevvecQuz7y28KEGkzItHOVQ115lMxl8/sCGyG8KkXo9EEQlx19oUuo/INqDo4n9VgF8iuX3vF/3xfSTIkTTfAjAB/bU54Gx6fiE7iyiqa3jAmJYTVClmJiKEXnS1ZmSBJHZOv5C4y1RSMHbGDhVt4hZK2YOuNRXht0bo1D7SQZumx7klfkChHQLMMzkeQgYtwKkvszdPjuZabl9y1x2ggD3Mowogb/svMBrX3xqbu1cBvB1RQReJQj4xujwrG/n25n5nCAqPn63E8VaTncYrCKgG4Nbz+jt7VU747I9Ta0d1xCw0tLgq2TycQMDkcGcD4lQuNWglElorz2/0Y1jwzNXuN2dl03wSdLQlCOI5rbwXDCpgyx7EI2KmlTh/VEwjRL4EAaaNAtTXjtrS1vHYmYocrAHALltX4UQPRBv5FOcPDbsBMHA7wkYBTDXMi93UipdAD0N4kYAIXDKuyN7gJct+seEYRz76IauZ7P/UH7ho/FdK0D03vT/zI8D2b9T/9gI0OPjfbG5cTAaUe6JqSeHwAC1xVcypM0RjGdg4LfpppHO0ZNrOqDmtvBFYLpaI7OKtP0DgZRvfBLEHwbjMB3mBLpxdHjm+U4LRGUIojT8MjBWZfzFrGOZQLT/sZqXwHTOYLT7p8W0l6+Ol74czkWSzLjHDPDF5UjfUe7x+b29KUUQ89oXHWQkgg8C+JBFMY4TKPUlnaTLM37Vyl6efpjvhYmldne35vlL/g2mqb5grOSTJEJXAImL+/rWqkCh8Sft+00du/2tr7HniiHQDdOCW1fqvsg0C7C12X8y0wXxkZk99kVxXtvSjxicVBGm1vGruo52XfWj10PqPPINE/DlA2fz9RYzGrW2Lj4kmTR2WCNV9R4ySAD0PTMevNru054nzUNe75dKEIT9pfeKn6pfrfEXs0BpTDmvm8zHDUUj/cW0l69Oa+vidyUQ6CXg4PFyeRIqNoXCnUR02wTySldUZuLfgfETMxG8X5LxudPUlCKIltaOKxi43EoOYL7soAPw3Tx2f92X3NjuybZwsD+iyCD1ZBKDrQbRQkv76vD04sH+nh/ns4WniSvwC5t5aNhkXjgUjTxsV2WeBfhJJHnh4GDkL07q10aa2g65S13gHOVj/spgNKK8TPIe9re0nLyvaTSss0XmbifiZQN9EfXl6vikSZp/AfC7LIW2GJw8Jhpd84dCY0MFsrl6JYhqjt/dMjGxVEuo42dM+Pfsfxl4Nohke3//mr8V016+OqHQSW8yabra8R8xXi7PfFWxGY0zXrmG07EZ9t3ynq4ILyvnDiLcR6axXhflXe6x1GN7U4Yg5s1bup/RkFQHoONfz27D5/U2+Yk2V61LHPDNWH+PyiRZ0PupuTn8HgRSQT5v3/Mi6HcqDguwI6HkLIqtHecBUHb97PO0GUzMH+pgUR4PAAAL/UlEQVRd+5xuEntd4Bzkc32I2dLWuYSZ77C84GoXtnygr0f9r+ATCi1pM8m8f0KQE+FHg309F9p14ccdRDXHXxBMW4H2dnVAve9dAMKWn/4apESbfYfstW1d+ZS5M/F6BKAT9vzOT4wFgws29a5+SVcnTRI7Lsl8DO7Z+ecXKG2SZe6WHYaVR8uhxTpoI7NlV2Hy2a8KT0Ev9peWgT9zPDA/u1XVHKZtNuOBo7xsZZtzF26tjNoFuMAuwKqiebmHeXmxKBNBRBqD204pdIjpsCB4jWCl5rbOa8G8YnzchL8gwe2DgxHlLTb++I0gqj1+r6+u9mPJw9zz2p8qn6MjwBUhZby/VFp6dZbovJvQCsVPAcbVSJqRqRQ5bYdiyuwgchdwb2HzyhaahKHMG0kQPUZAlBO8RtnNW1tPeXMS8QetJiJiXDUQ7bnUywuhtbdqDv90BOGlP00/FScIJlwa6+tR8Q95Hx0GzHRaLNqtzk5cPxoS1LrR+o0gqj1+14BmCtYTQWREprmtne8NEJ8LhnLL9Zrh9Z/M/IVYNNLjxhLgFU+/l58SBPGRY5bt9cZdo3dbE4q5XbDcKLClJXwYDHqYgbdmyjv69OdrT/v1qLGJ6wjCS4Snpn7lCQI4Kdbfo2zJeR9NuoQX2KAFXlM+jwc5gVTwY+rRBXL5jSCqPf5C+rD/XocEYR0ChUJLZptIHp8JjNV5KeogUR6IP0OCL5xqu4kpQRClLqiFXiKNK15Ri5rqpyXUcSUTvjrep2b7rhlPkplOjEW7lYdWwacGBOHay6U51Hk2iH+yZxD57c1Og9XayjVk6zeCqPb4C04WWwHdxxaAkiKW83805Z552M27XsdgKZ8iDDbMI5lV0B+15o+3mXoxFVOCIDLurRssWR49LaiFJmBTa8eJBNxnKefKRqprNyddgDuC8HSeUgOCcC2fm/EX0kf2dzeLv5sybvtzKuflDKfa4y9mbMWeCRTTl1cvpmL6yNZJuZ1vxaFg+iIYyv3cHss0xsynx6KR7lL6qae6U4MgWsNzDJByF83md3e9YLlRZkUJQnMg53WBt4/Ba30vC5zqy2v7VvkqukBqyHZKEUSZDpPtOiLgJZh89MBAZE/wpJsXx0UZzccd1P0OsWjP6ZU8E1BpPhpM86bcey6cU324GE7dFRGCKIPKKksQnJORspQFuJgFfNIQBJDjSTWlCEIz/mKmvy66uRhHAjd9a/siXBjr6/mhm/qllHFIO+7aXFpK336pOyUIQuMZIiYmazZYoNKH1K53bBXdQdTBGUS1x1/MQtTcHD4QQeoF4z176lcmF1POmRyQs0Ar546RxOtfJGApYMwA+G1M+JYbr7lC49fFN1UqrUghWWrx+5QgiHo6pLa746qbtF57Q+PJT6y7/fXsBJnMO4jyHdLmBlgR4+cD0R6VvXf88dsOotrjL2bRSTsA5GRz/QeSvCBfFL/XvnTBrUCuiUcfO5K7W/Tavyrv1huumLbroc6UIAjdQVcxbq5NrR03E3AMwH/nVLI647ZYf9dQ0/zOD5DJKgp6dkbpRe1QdB4iukVtMhOE3aRQrH1bLS7UkNxA6XsjUg9rTBN+I4hqj7/YRUp7H4SHzAFu+m0KdZxDlIr4Hw9yc7pzIifOySEw0k2/1jIOa8clsb6e73htqx7LTwmCUIqx548B+P7G4F7h3t5bR9woThcMl7W7+iBQzrUJR43VK8FU8wyi2oFifiOIao/fzdzXlUnlHgtgDUDHW37fzjCOVx9NxbabradNPQM4pmvRpSdhxopYtMfiMu1dKgd9TJlb5aYOQeTm9/F0mYjGFjkh1kGTaiMnjXah6am5Ecttqo1JQxBac4FH7xtlAhmL73uDNaEc6jnVRgXHX2hO5vt9Xih8tEGpW+Ws7qBPkpn4xMDA2r8X27bD4bA20DHbh+6OChUvgaBxrNtLvnTyanYxRcc4FYtHLetNGYJwmEC3xIdnnV3oMhHtgoOJqToqkqzP1kd2onjdAdgnmNf61dxBpHZ7FUnWR9cN9nWri2cmJE702w6i2uMvZfFJvReJ/a5l8LnWdlILs0Gf8hr9rto4vD28fzCB1QC1W9tUZ3Gj0znsdEeKug20qbXjPwm4bOKYeIiDgSXFkIT+7pjKHMaXoodK1p0yBJF68XTpvom/MtgX+V4en2pqCoU7iOgWy5fSGDMttd7E5pDuO8HgS2L9EeWS55jR1SndNzMviUUj1gC81FzwusCXmyAK5X0qVT6HdNfa2+/sY3NI9+14yVMtCKIQftUcf6mLi9PXPoDtKlniQQegu9AVuhkZqGV++ONsGj+zpWpXP28D8YmDfZGNeXc0+vteVJV/7r6N98tI8mo3qTJSAXNb0Akidf6RjZ1S7bxqsHFCNNql7oOfEs/UIgj9PQPqms+fmmPxr2/cePfLVq2rvDMjCVLuc+qrpOgLg9SNVsmAcYH11jbVj5qIz24xFhLxf+deT+oc1l/qAuy1vsZ8lvfeCa/t67f24ZOIqMtmvkgQ44ZkouEy3YVBFDDOZPC3bOkS1I1zjh8B1SAIr/gpPJwuDCr3+Muxyjl84GSb/iuYvmuAfqm7cyHtJaRuLDS/BNDHNPIocj95cEPXgBtZM4f8KmvzmzTld4DQxYw18WDg8U29q9X7nvpwa2//1KyxscaDTINPI+A0AAfa6uedR25kq8cyU4oglILyXDnq9hrLvDbWclw5Wmg7XeoC7LV+rutlaqqru4e3AKQW7Q3TGmacmz3w99q+w4tTjis3leUh753E1SAIr/hlv6jLcOVqwfGXa9Fqbg6/BQFSV45+Ok+b43MmXYbfVuCu8c1sUIdXU1VTqHMREd9axD3mTqInC11dWy4c/dbOlCOIFEnobx0rqBu3ttVMRs5V9mtEC3aQuhYRPWPT+dw8ttaqm5hCocUfNCmwLneXkx3RxGjvMhGEapxaWjvP1ewKXEAJRfjfjw/P+lq+M6ZqEIRX/CyDq/j43QDptky+K3TdtpEp57hTdNtOnut13TaRLbcTjP8cG5l1TaGzSq8N10P5KUkQSjHz5p26dyAY/yYT1AFboVunPE/YVC4X5u8y82IX7auvqaeIcNFAX+SBQjlmSl2Ai6hPTW3hM4hJXZ1qT2Cm9ugTrpwsov2874rXi18YiAXY+EI02vVYoZewGgSROkD1gJ9d5kqOvxA+xfyeerca4isZ+CKAWR7aUFf03hykxJXluJ1O3SzXMH37IiJcAdD7Pcihig4DdFfCoG/YTcMe26nr4lOWILJamzv35H2C0xpOZbA6lHofGPtkfhsGq1ul8MtEIPDzYieJIoqAmQwTsDinfeCZ3R/J6xi0+uDZycdcHuZVfQeRxSqzUKnzmCMB7GeZ+RPSH5SbIDL9pC5+CTKfwYRjkQ6Ay9qZdwL4MzEeShCt2tjf/b+FSDYre5UIItWdW/wcVpSKjL+Sq5faUfzjBRwWYFqU0dlBE+ZN+l7oZwj0W2JERkZmbqrQVzo1tS95B8X5WCZW3lEfpXRQq/WcYhjg54iNqEm8dnqQf9PbG3mtkvjUQ9tTniDqQUkioyAgCAgCtUBACKIWqEufgoAgIAjUAQJCEHWgJBFREBAEBIFaICAEUQvUpU9BQBAQBOoAASGIOlCSiCgICAKCQC0QEIKoBerSpyAgCAgCdYCAEEQdKElEFAQEAUGgFggIQdQCdelTEBAEBIE6QEAIog6UJCIKAoKAIFALBIQgaoG69CkICAKCQB0gIARRB0oSEQUBQUAQqAUCQhC1QF36FAQEAUGgDhAQgqgDJYmIgoAgIAjUAgEhiFqgLn0KAoKAIFAHCAhB1IGSRERBQBAQBGqBgBBELVCXPgUBQUAQqAMEhCDqQEkioiAgCAgCtUBACKIWqEufgoAgIAjUAQJCEHWgJBFREBAEBIFaICAEUQvUpU9BQBAQBOoAASGIOlCSiCgICAKCQC0QEIKoBerSpyAgCAgCdYCAEEQdKElEFAQEAUGgFggIQdQCdelTEBAEBIE6QEAIog6UJCIKAoKAIFALBIQgaoG69CkICAKCQB0gIARRB0oSEQUBQUAQqAUCQhC1QF36FAQEAUGgDhD4/9vTzY6FRhb6AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-4"><g><rect x="481" y="21" width="100" height="60" rx="3.6" ry="3.6" fill="#f2cc8f" stroke="#e07a5f" pointer-events="all"/><path d="M 495 21 L 495 81 M 567 21 L 567 81" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 70px; height: 1px; padding-top: 51px; margin-left: 496px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><span style="background-color: rgb(232, 205, 151);"><span style="color: rgb(0, 0, 0); font-family: Helvetica; font-size: 12px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: center; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; white-space: normal; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial; float: none; display: inline !important;">OS Moves one core out of ACPI C3</span><br /></span></div></div></div></foreignObject><image x="496" y="30" width="70" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARgAAAC4CAYAAAA45fPWAAAAAXNSR0IArs4c6QAAIABJREFUeF7tXQd4FUXXPioqvYkioCBNekep0qW3CNKlhYRACBAgdD8R6QmEFgiELkUQpApIEUFAkCpFqgiIFOnVDv//ru5ld+7s7uy9d3MNd+Z5vseP3Nkp78y8c86ZM2eeokROl/dNfZTIVcrqJAISgX8ReLlUl6cSE4xErQwdkwSTmMMr65II6BGQBCNnhERAIuAYApJgHINWFiwRkAhIgpFzQCIgEXAMAUkwjkErC5YISAQkwcg5IBGQCDiGgCQYx6CVBUsEJAKSYOQckAhIBBxDQBIMA+3fDx/SmXO/0Iov9tKWb76nM+ev0K07D1y5nk32DGXNnIHKlMhD9auXoLIl81La1CkcGaCLV27Spu1HaPWm/fT9qQt0/eY9XT2ZM6Wj13NloSplC1DNSkUpV46X6Jmnn3akLWyh3T+YS0vW7OLWFTesAzWp86ZP2nHqx8vUIDiGbt2+rysvRfLnaOWM3lS0QHaf1CMLcQYBSTD/4nr3/m80b+k2mvLxRreFbAV9tfKFaHD3ICqQJys99ZR3voSPHj2i3QdO0+CYJXTkxAWrqnW/v5AhNQ2OCKKg2m9Q8ueftfWt3cxmBAPijR/ZiZI94z3ZzVv2NfUdsdCteZJg7I6Yf/IHPMFAYlm1YZ8yiUEy3qS6VYvTqAEt6aUX0npUzK079ylq+EJFYvEm5XntZZo+qhMVzJvNm2JMvzUjGPR/9ewoypEtk1f1//7Hn9Qxajpt3n5EEoxXSPrv44AmmAe//k4DRi+mxau/MR0BSAbPP/ePRHD1+h3686+/DfNnTJ9aWdwV38hna1R/uX6HQvsl0K4Dp219Z5Q5XZqUNH9COL1RLJdPymMLMSMY5PWFmmSkHqF8KcE4Mqw+LzRgCebOvV8pbMBM+nLnUTdQoV60f7cytX+3Er2a9QU3u8b9B7/T13uO0/iZ6+jg0XNu38NOEz8ymOpVKyE0YCCsAaM+ofnLt7vlh/qFdhTJn50yZUxDKBsJkte1G3fpu+/P0ZxPt3H7AQliaXxPpQ++TlYE07hWaZr8UQev1CQj9UgSjK9H07nyApJgjBb0008/Rf27NqROLapSyhTPW6IOe8nRkz9T2MCZdPrsZV1+SBCfxEVQiUKvWZazdfcxatltEj18+PjiN8hh9tgwYTXnp4vXqVO/BIVwtKlV4wo0ZmArrxY6rwMswYD4/vr7IQETpCwvpVeMsNk9VJN++/1P6tA7XjG0I8G2pZYtCcZySv1nMgQcwWCSTpm3kT6auFw3CDgZmhXdmYoXymF7cKBqwYazdO23um9zZX+JlkzpQa9kyWhYJhZlt/dnK6dWasLp0IoZvSnnqy/aagtOmVp1n6wjGRDdypm9KX/urLbKssrMEgxwey5ZMvr2ux9cn8aPCCZIMp6k4z9cpEbBY+n23X9O8GpXKUZbdx2jX3/7Q/m3VJE8QTXxvwk4gjl84idq2nm8a+ICckgLsFfkzfmyxyNgJBW1a1qJhvdtbihBXL1xh+q3j6ZzP19z1T2ib3Pq2LyKR23hSUM4WerWvqZH5Rl9xBJMuVJ56e2KRWjohM9cn3ijJs1a/BUNHLPYRSZDIpvQkNhlkmB8OorOFxZQBMOTFqAWzY4Jo1qVi3qNNmwiTcLG04kfLrrKeu7ZZPTZ9EgqXZRvbD107Dw16jTWtXBwtLtseiSVKZ7Ho/bAttQifBLtP/Kj6/t3ar+hGF29PULXNohHMP/r8Q61jJjs8lnxVE1i1aOq5QpSz051lH5JCcajaeG3jwKKYFixG6j72kbx+ZcHqFPfBJ29oEOzygSphLfAWYKBgXlFQm+PVDV1Fg2KXkKzl3xFULVQJ060oge1cp2E+WK28Qhm5phQivhgru5Y2RM1iR0nYAeC1hKxXRUJRnHYpz5Z9Q1t3nGUfr58wwUD7G1QIYNql1ZUuhczmrsZ8Ei8R8faNCC8kS1oseGFDZhBazYfcH0X3KIqDY9qZlqOWV/Sp02pHAg0b1CO6lYtJmRL5FWGPm7Ydlg5YT18/LzO2RSnqgXzvkJBtUpTnarFKUO6VIbtDSiCiZm2hmKmfy4sXdiaLf9mxglTm55x9M2+U67PzXbyoycvKAvnnsYHxxsVyZM2e/INj2Dmjw+nZeu+1TnGYaKP/+A9W9KTVj1SbUh//PGXRwSDxfj55gP04fjPdKRi1ucGNUrS0F5NKUvmDIbZ2LlUrGAOWhLXndKlTSkMJ9TiBh2iCS4KSFbSLuyHO/aepD7DFtDZC1ct64HhHYcWIC1Rx0vYE0fGraKZi7foDh3MKoM6D3JNkyq5W7aAIRjerlOycE7lpMfXrv6841Wjnfz2nQfULHyizjArYhy2nF0OZzAiGFxv0Lr2w8C9elaUcqokknjqEU7TTp65ZJtgRP2ceO2y8mdipSxPVFt2npjNR+AyMm4lTVuwWQRGXR6Q35yYzqaEiQ/g6BkclUA79p6wXYeRHTNgCIbntCUijtpGmojs1sXuhqjztVdepNEDWyrqTWLdL7LTVyOCSZbsaTfv27njugjbuHjqEXZIVpW0UpGwoQRHTaevvz3u1i3s7LhDhkXxx59/0c59J+nCpccqk/qBmT8TS4T4xs584nkpGxnjzfykoN6VKpJT6Quktf1HzupsgGpfrDYtM9eNfLmyKio75qEZXmVL5KF547vqNuyAIZgvdxxVjnC1yRfeprxFyZNKcMoCFSJVSnf/GviwNA0brztJUsuFvtuiYXlq0aBcol5mtCIbI4JB/9id2Y6axFOPYB+xQzCwbQwas5jmLt2m6wakEtiicOTNkvalKzcJtqu1Ww7qvoGKtnRaTyqS71U3SNh+4uLpioRehHqsErsJGbkTGLlVmG1AN2/fp9FTVtEcpv+VyxZQXDF4cxAkizmo9cWK7FSHItrX4tpxzv98jXoPW+BG4KykHjAEEz9/k3LMqSZPRFqrSaP+jkkRPng2fbZ+j+uTbC9npLVz+yqGV16CcRiexWbXEHDiVaxADuUWd42KRRTCUT17Rdvmq3xmBMMuHlE1iZUKqlcsTLOiQxXjtB2C4R3Vi6gJGLfZS7YqF021C+3tt4pQwugQNzsGFhnsZ5d+uaXACoP64rjuVKlMfkuYtUSKzDgpgyrI2krYfiNvhdL5aGZ0CKVPa2xcRV9w2z1y6Me6vsC7umld95vuINeZn2xxtdvsYELNBCmxbc8puust2jFDvoAhGOivE2atdwFoJWJbzhCLDOyApU+XilbP7GPqa7N9zwkK7T+DbtzSh2Uwqypf7qxUu3JRalq3TKJKOGYEwxP/RdQkVj2CB3LbJm8p3RclGG8dF7Ewx8SvptgZ61ywg9gXTY6gymUK6IaCV5eImsRTr3jSNNoCXyCQnpqsVB1tA3l94dl5YKtq1yteJ43Mi+2ihACxSlt2QjOIc52ashddA4Zg2AVhJVFYAWv1++Q5G2jYpMfewqKEhhvdo6asUo6ZtbuoVX34Xb1D1eW9GoaSkkg5InnMCAbfs+qDyI6oxYwlZFGC4dm/7J7KwfkxKCRWd/3DSM37YushatdrqgsyETWJJVKjU0ZWQkIlRhKI0ZiJSFm8k09R8wFUy6DQWMKmAikR6mxIy2oE1T6gJJikQjDqRIEejbAN0xdsptPnroiseV0e7KSDujXy2A/CqkIrgmEXOkJILE+INPQxYSc5K2qLEgyOyaGeqsnT0BGsxGtEHCwZQU1aODGcqpYvZAghu/kYeTyz5CWqamor5vnasD47PJXeF97tkmBMbCJWC8zqd08lGF65WHzfHjxNy7/Yq9yahsewSIL6NGdsmO07TSJlWxEMqyZZLTyWQLTqEdojSjC9PppPC1fscHWBJSqRviHPtt3HqXn4RJfobyaB2rFf8KQFI/WRJbm33sxPc8eF2d40YBpAWWriYYK7cLi0q01QDetXL0ktG5aj8qVf98hRM2BUJHbiQSxdO6evpW+A6IRk87GTI3Wq5Mrt4kKvv+Jpka7vMEn3Hf6R1mzer5x6mBGOHZ3dTsOsCAZl2VGTtITMkzpECIZn+xGxifD6zTrBKf0xsEvsPniamoTGKrfJkcykNbYfRnl5kgfCbrB2IJExO3T8vELQauI5BUJibhkxiRt+RP0OGxaunSCwmmh41oAhGF9KFFaD6skpklWZZr9DTF+yepdhuE/YD2IGt/bpiZMIwYiqSeyuzgu5KUIwPOnA04ueV67dprrtxui8f40IhnXiNJPWWJ8nIwLk9cWbOaL91sj+iKsUuEsmcsiA08u61UpQy0blqULp1w3nVsAQzIZth6ht5GNjHABfOLEbVatgrCt7OqD3HvxGrSPiCDubmnB5ccGkcEqd0t2d2tN62O9wOhE3bwNFx6/R/WTlgu5J/SIEw1OTeMe4LHnwDIz/BYIxIyt2A+MZtVkiMnOV8AfBYB4Y+QOZzRGoUu2aVFIupLJuGAFDMLzTBU8uqIksRl5duFQ57v02Ip97lcfIMcvXfRUhGJ6axNuxrdQjlPNfIBizkxX2ZIin+uw9dIbeCY1VvGGRzK4G+Itg1MmHKx/TF35Ji1bu1IU2MZucvOsVAUMwvAFz6i4Se5KBQeFNTngX4/Ll5au3CL4IcMn2hZTDu3flqbHTaEKJEgxLtuxpjIh69F8hGDPfEJ5vC2u8Ze1yfULrUZ/O9bkQ+1Ld82bHwoaFaxSILAiy+e7YOVP3CdbzOWAIBiDzblN/OrWH8saRrxJvohn5ObBGUE+PVHltZ082zK4qeNJ3UYKxUpNYycToUqiIBONLIy9PCrVSqdmNRes7w14fsYo0yHN+Sywp2Gw+wNMc74Yt+XwXLVu7my5fve2WXaseBhTB8OLB4Gr+lOEdfWYA5bmpG8WcYY9CMVIiHq8ihMAeTfqLYKzUJK16ZBbWQoRgUBdLrJ4e7bJ310ROAVmnNq3fCjvWRlcDtGPLnnz6egxF5pFZHkg3X+06plxxUUObIj98aNbMiVJ8ngKKYHiu17D4x4/oSI1qehY7VjsAvKM+MwMrz1PT6N6LncnAO+L0dVQ7UQkG7WalAfWYNFmyZ3Sxc8xCbIoSTGI72mnHhXd1QN0wWOJj/Xx448tKuFZSD68Mdc4vX7+HcHqElzDxEmi96iVdYVwxPjgE2XPoDB04claR6LHpij6ch6BUPYbMc1Wv9RkKKIIBAid+uERBoeN0R3F2XgAwWuhG193NIubxJqQvCI/XR5EJbYfE7BAMq7qopycpnn9OF+PFLPqdKMHwVBu7R9UwcDbpHEs//vQ4qJPojXD2bg6M2n0719fF/BFVhXkSN2449w1rIBzAC31o3Gks4dhdTayh3VuPYbOxCTiCAchT52+iDzU3q/E3kAxuzIrchGUXojevCuCZ2He7THCdLKBs7ABYbDUrFRGeSGqbeHFQPI2Na0Y4dgiGpybhVCtNqhSu+1pWbRQlGCcuO1p5IWtxYq8OQFrr1akuBUdNczniiQZDhz0vpF8Cbfz6sKsKO5shL2wFjpTxVlb5Uq+7yuQ5Fdq588QSlNaJNSAJxiy4DkL/4bKWaIhBRFrrOniW2zvSGMiEUSFUr7r542tGsUsw+niRoFdIXeGLi0ZtsbvriUgydgmGlSxUj2aEDEWyWnSiBIOyeHYweKEumBBu+oSMUbgGszgqPKy06hDmEYKGbdI8f2vHzmb0ZpbVKxjoy6qN+6jLoFmWoSd4pCxKZLyQDVpHyYAkGEwKAAMj2hrOO9CILdqtfS1qVLOU4cuOuBsUv2Cz8lYPm0AuU4d3pIZvlxKSQODqj4fX8KQKL0Gqala/HJUrmYdeypTOZZCG6nH1+l3atvsYzVrylRvJoSwYOWdGh/o8LKhdgjF7ZxrttFp0dgjGiLQxrlAVG9Ys5RZwCioE4gXBVqFNogtN+w3r76L9zerSJzv+Rn2BlIvg4O/UedNtM8SNfByJI+aMaF94kjTwwlvrmMe8uEO8DY0NbRGwBAPgReOcat+mvnXngeKzYpSsYrkafQdfg9Y94rjhDkUkCl4ekV3b07LtEgxPTVLrFrklbIdg1A3E25CZolIoiyHPD0nNIxK2gleeUV/UIGSqRIhNin3dE+Uh38h+LRSpmJcg8fxv7FJKWPSl288gF7xUUDBvNuW3azfv0q79p3QvDagftQmqSCP7t3ARUkATDECxG6ndbEHiEhh2FbNo9GbfY+dBqEdEIvM2oS3YfWBQdCJ5QjBGj9mLGFDtEoxKMlHDF9LKDY9fzRTFArv3tJGdPL5Kwl4dQL12It7xSMZI4rbqE8hlWJ9mBHIzexvLyJZoVb76O8ZxZL/mutveAU8wKjiQZiAeI0gUnmC1k7CYB0U0ptw5Mtv5zHAnOXb6Ig2buJz7oL1VBQjOPKRnE+Wo0ZcPrbH1ekIwRmqSlXqEuj0hGHyHQNirNuxTwmCKjit2+X5dGgjF1jUaD94JkLee4570BfNh4pB2BA9qkYQ65n66TXmhE2tCJEHCB4HxVE9JMAyCkGjOnP+F1n/1HX3+5UE69/NV3cSEuAhHIjg9gVg8jZMhMnAQtSGKbtn5vfLmMx4Lg4qmTeojWHAYrFGxMOGN7cRInhAM2sX6doioR94QjIoFDPt4U+izdd8q/9U+vKY+VibykJgotjwyNbsaIFou8qEvB46epfmfbadv9p9S+qJGP1TnZ60qxahVo/KEcB2ebDRavNg6IBHBp6ZcybzU5p2KVKLQa/I2tZ0BlHklAhIB3yAgJRjf4ChLkQhIBDgISIKR00IiIBFwDAFJMI5BKwuWCEgEJMHIOSARkAg4hoAkGMeglQVLBCQCkmDkHJAISAQcQ0ASjGPQyoIlAhIBSTByDkgEJAKOISAJxjFoZcESAYmAJBg5ByQCEgHHEJAE4xi0smCJgETgP0cwl/dNfSSHRSIgEZAI8BCwIqynrGCTBGOFkPxdIhC4CEiCCdyxlz2XCDiOgCQYxyGWFUgEAhcBSTCBO/ay5xIBxxGQBOM4xLICiUDgIiAJJnDHXvZcIuA4ApJgHIdYViARCFwEJMEE7tjLnksEHEdAEozjEMsKJAKBi4AkmMAde9lziYDPELh05SZN+XgTtWhYjtRXK1G4JBifQSwLkggEHgJ43XT6ws00fsY6SpbsGVo5ozcVLZDdBYQkmMCbE7LHEgGfIHDtxl1qGByjPHyIlCL5c5JgfIKsLEQiIBGgK9duU912Y1yvbkqCkZNCIiAR8BkCkmB8BqUsSCIgEWARkAQj54REQCLgGAKSYByDVhYsEZAI+JVg/n74kL77/hx9suob2rzjqMsQhGFJmeJ5yp87KwXVLk2Na5WmFzOmFR6tyXM20LBJy13558V2oZqViir//vOvv2nLzqM0e8lWOnD0LN2680D5+7PJnqHXc2WhpvXKUIsG5ShDulTC9bEZH/z6u9KfhSt26OpAvmwvZ6QKpV+n9955i0oWeY2eefppj+vx9MObt+/Tui0HafkXe+nw8fMuDJ5++inKkS0TVa9QmNo2eYvy5nyZnnrKMp6YWzPU8ucv30HHf7hIwENNav/bvFORShR6TcFdJLETtVypvDR/fDilTPEc7T5wmj6auJz2Hf5RKSp92pRUuWxB6tSiqiXGRm31FRYifTPLc/HKTVq+fg99tn4PnTxzSZm/SNr2tQ6qoMxd0bnErg8Vy1QpnxdqrtX3h46dp0adxtKvv/0hVB4RNSSi1UaZLWcgG9EOxPL55gP04fjPdKRi1poGNUrS0F5NKUvmDJaNNiKYb/adosihH9PZC1cty+j63tvUt0sDSv78s5Z51Qy//f4nzfxkC42asso1Ecw+fu2VFylmcGuFcDxZyMIN+zcj/BHQttlLvqKHD62jmJYtkYcmDW1Pr2Z9QagqOFKNmrqaFq/+Rij/CxlS07A+zahhzVKWi4NHMLOiO9PEWetp6vxN3PqA6eK47lSpTH6334HFyLiVNGvxV0JtLV4oB00c0k5ZyImRQCbvj/2Utu46JlRdntdeplH9WwjNJSuCsKrQ6nu/Egx2swGjFwtPQm1nM6ZPTdNHdaKKb+QzxYAFYO64LgqRDY5ZIrSw1MKrlS9E8SODKW3qFFaY008Xr1OnfgmKRGY3dWlTgwZGNBbeze2Wj/x7D52htpFT6cate7Y+T5cmJSWMDuEuUm1B2/ecoND+M2yXjzKaNyhHI/s1V6RWo8QSDMivbMm8NH7mOsNvShbOSZ/ERbiN38Gj56hj1DSCdGAnQWoAIXZoVtmxDeHRo0e09PPd1Ouj+UKbFNv+fl0aUrf2NU3nkhVBWGFi9b3fCObOvV8pOGo6ff3tcbc+QFTGhIGI/seff9HOfSfpwqUb3HxY9PWqlTDEgQWgUc3StHrTPh25vJIlI5Uv9To992wyOvfzNdq1/xR3QAdHBCkDZpbQzmZdJ7icibR5X34xHb1ZPI8yydF/9AvOR2xqE1SRRvZv4QjJQBVsET6Jbt/9Rx3UJhEcQDJLp/WkIvle5cIAaTSkfwKXvEXKR6FWZM4SDObLX38/JCxIJOBcqUwB5f9v232MLl+9TSP6NqeOzavo2myGBSTKN4vnVubEtZt3advu4zr1Ti3o/e5B1LXt2z4nGfRlyryNirrHS/lyZ6WShf9Rq4+evEDfHTvHxbxd00o0LKqZ4VyyIghvCQab7bQFmwkSPQSKNZsPKGsaCVJl7cpF6YUMaVzVzF++HYv5oFG9QioSJsOgMYtp7tJtunIglUQPakW1qxRzE5Mhcg+KXkJrt+jrtprwLIDaCutWLU5DIptQ9myZdO0AGHHzNlB0/Brd3yF6Lk+INLQBGZGmUT2YRMdOX6TuH8yhIycu6Or6ILIJQZrxZQKG73adSKfPXtYVa9Q+TAhgwKodb79VRJFkWJXx8ImfqGnn8W7kFVT7DQXnzJnS6eqFerxqwz7qO2IhQU3RJiyM4X2bU7Jn3O1SLMGo30Gq+LBXU4VIVBsEMD5/8TqlS5OC0qd9bEsz2gjwbVTn+m52N7QVklnkhx/rpB3UOTsmjGpV/seu56v0xdZD1KFPvBtpYIPr0bEOpUmV3G3OGqnkZnPJaYLRNjLRjLxbdx+jlt0m6cArVjAHzYnpbGpXwWSBQZZVb4wmPDpnRDADuzWi8HY1DfV9ox0EKpbRZOLVZVUP2oiFjEW2dO23rvHAYlwxozflfPVFX81Zipm2hmKmf64rz6p9PByw8yycGE5VyxdylQVSDumXQBu/Puz6m6gaAeJr32eaTqXEt4smR1DlfyURs4mq/oa+RLSvZSlNYIMD3jC8a8lp6vCO1PDtUqbf/3L9DoX2S6BdB067voVNZtGkCK8OA7T9g1TbJGw8nfjhouvP8HoFqdeoWNh0PmCjahs5RUeC2LiXT+9F+XK724yeOIK58G3co27vz6YVX+z1aDFhwo+JX02xMx7r22aTkbfozQhJO3pXb9yhoJBY3Y4/ILwR9ehY222QeXntqDo4wWgZMYlgE1BTn9B61KdzfZ8QDK99sHfAsGx1enP/we/UpmccwTCuJpzmTf6og0vCgF3nndBYl/iLfJGd6lDfsAaWCx55f/zpKjXuNFZxJ1eT0TjxJJgsL6VX7rWw0igPPJxmNQoeq5O07Kg6vLbGjwhWTjh9kbA2wgbO1BUFrJvWfVOoeJykvdtlgm4sQlpWo6G9m7qNxRNHMF8v/eBRg+AYunX7vgssnn5shqTRYhn/wXuWAKJcMylEWy92urABMxS9UU3N6peliR+2c2seOyk8kUDYMnBKsSKhF2EH8jZB5G7Xa6puR2RvsprVwbYN9rE1c6Jc6iLUV4joarJSJ3l14RRn4JjFlm3kEUz96iUofmQnrkrF1sVKcp5IIGwZ1SsWplnRofT8c+InjTwMfv/jT+oYNZ02bz/i+rlquYI0e2yY8CkmNmHgCGlfTbB/rZ4VRSBibXriCCZuWIdH4YNnu/r40gtpafXsKMWgayfhWHHCrPWuT4wWIwug3frYengEgwHt+eHHutMwSAc8wjPrIwzMDTpEE8RwJNgflk2PpDLF89iBhpuXJQC7kxZta951otKmkkVyKgZQLGrYNW7feUDNwifqVBxIeZD27CS2//h2zMBWih+ONvEIxkiyZOvnSWMixnu2nN0HT1OT0FjFuIxkd14Z4cLDIG5YB2pSR0x6Uctl22d0TP/EEUyrxhUeaXVfT5kfVv3m4RNdJwe8m5kAmwUQi3XBpHBKnVJvJDMacPZ7HsHwFhiMmmE2jbT3HvxGrSPiCJNDTbwFZmfRIi9sPO16xetO7EQXpEhdOMWAM9W9fw21Zj4nZuXxdm8e3jyC0TpQ2iFx5F04sRtVq/DYniTSZ9iN6rYfQ5d+uaVk97TPbF1f7jhKrbpPdv05fbpUtHpmH8XR0U6CC0LjkHGKQ56aeET6pBFM8uoVC/+qFf+CW1Sl4VHN7GCn5OUxPW+SsQC+9WZ+mjsuzNTPQtsYEYLhtaVy2QL0ahYxpzS1Pnhmwkiq9U/xRBJgweQtyJnRoabH+3YGhCV7TxcF6mQlRp5nKdsfO5Ieb2dnj0pF+s4eueIbTyQNtq55y75WDNBq8lRNFlXvnzSCSV2uVN67WmOhJ+IpwBfdxUQIwmxCiXzvgTORyBxW8hjZfIQLICJ2t/V01zaqc8O2Q4rjnppwBWDt3L5ux9IibRaZ8CLHnaJtFWmTaB5P57LZhmbXdV9bVvcP5tKSNbtcf+LNJRG87awPs/aKjJu3Ee0cJRgREdDugn0SCIYlQCN1UnQhsfmcJBgeWYlMVEkwRCzB8Ba/JBiDmcKTYHgiqghB2GFoHkH91yWYpEww8I9aEted0qVN6RomSTBiWwFLMLyTNkkwNghGxAaTGBKMryUEselknItnI/LEsCkqFSQlFcmbtno7LrzvvV3wga4i+czIe+rHy8T60/AWTWJIMOwpiq9tHN5O5KRk5GWP03lGeW8kGPaUJnWq5IqDnvbpDG+drxo3AAAZmElEQVTx9uZ7Xxl5eSdyrRpXoHHvt9E1z1tCs/O9yLh5a4Oh4BZVH2kdsuye6qjoiE6UxCAYOP7Vbx+tnGypyRcGP28mqvZb3vG3J8fUPYbMo03bD1OObC9S0fyvUrMGZQm3lEXJ3qo/vON03qIQmahGdfE2A1HHS6v2++J39kTOUwLkzUme6wRLaHaNyrjprXU78beRF0d5fnW0c0JF8oX3JSYnJkXTsAmKlzOOJ/G/Vo3K+2R3ZSeCXf8jnq+PavPyt6OdHZWU5x+CkAvwJrcThwfXDZp1mUDPPPM0wU6UK/tLFNy8ihJEzJuU2I52rIHejp8Yb0PwO8HwrgrY3e0Ru6NJ51jl/oqajDxnE0OCQRtYN3dc8/90ag8qU0LcCxeXHXFPS02+ct5CeayrP26hr5zZW4kUKJLYu0asrwur2uCS5rJpkZRVICiYWj87VkbE4Y0Ew3Ojt3utg3cfzhvfHy3+vM3KrlTBu8xpdFWAJRhcJVg7p69QMDee5Op3gnHisiPvdq/RpHVCgkFdJ364REGh43ROclD/4NAmEqSKF0qhYN5syiL1JmSnisP5n68p3raq5yn+LnrZkTdh2asGTlx2NLrO4A3BoN+82/x2LqbywlLUqVJMCUjm7V0k3maAv3l72dFISuM5HrI35XkbEI9kkc/vBIOQmbwBRgCdBRPCCUxrlIzCNcBrFiETeXFEE0uCQdv+N3YpJSz6Utd83ICFu79ZhDaEOsDlNK0ui0JGD2hJiIviq8QL12B1ixj9WrVxH3UZNMsVXgOEPmNMiM4TmBeuAe1GLJKw1tVN1Q+jcA1GcVa8JRijtuL2eo/gOqa3y2/duU/BUQm0Y+8J17CY3eb3ZOx44Rpw490quBrq4oVrMIuZhA2nQcdoXUA3q42RNyfUftohGJ4HttdGXhCMUcApBNHBYuTFZcWkGhK7TAl6rE0AD6EQETSalxKLYFC3UUCnwvleUfqFNrJ6Pu6KdB8yVxemAWVZDbKvJi7Kad+0EvXr2tBNUoKOPfXjTRQ9TR94y27AKQS0wnUQNoYygjit/+o7ihq+0C28pplE4S3BoM9GwbFgm0K859w5MusgxqLasfck9Rm2wC2Osx3pR3TcjAJOISAWDPS8gFOfrNpJ78d86haN0WwT4V3URRsR82hUvxZuY4awIpgPRvGLzQiGZ/+CER/2LzV4mU8IBh3wRchM7BwJo0KoXnXxkJlOqUjqxDELwwgpBkGnM2VIo8TqUMM5spMOdosFE8OpQJ5sovNROJ9Z+0TCMMKYuWRKD0NJ0xchM63I1RcEA8DM2popYxoljKoa3hQBt3lhRrFpzBkX5tG1CLNB81XITBHyM3MUhQEb4VGxGew/clYXBAvtB+HhVQpV9TYjGJ59CWVAOnvxhX9eCrl45WZbIvrYCBuhkJnqxyAZ7F4rNzwOPiW6UsDg00Z2srwFm5gSjNp2npgq2i+Qy7zYrgSpx6nkafsQUmP+hHDLm71wIeg8YIZbGEyR/iBmMsKmmtmtfEUwaM+m7UeUSHw2ntVwdUMkCqNIn43ygGQQVpYnlYiUi4iN/bs2tAwoZmR6sKoDKmWLhuUV2x4C6SNZGaTRn34jF5kVPeL/XxQa5BOCQSFqXFaEwbx+UyzKPewS/bo0EArE5A+CQb/sPgui7gY88ddqoD353e5THUaiuVHdUBf/N24prd60X6h5CLKN4NTVKxSyPC72JcGgcXbbih0XCxeRAOw8ZSMEBCcT1Gg8saO+9WRVDgJoje7fUjk+F01mKiBbBjZ3REJEeFHELtI+aG9FMLzwsEz5y4ioBRH9ExmcSbYkGO23CFUAHfezdd8q/1UZEXnweFaR/NkpqFZpqlO1uK1TFX8RjNo3SGkbth1WglF9f+qCjkTRr1zZM1OboAq2+yU6cazyQadeunY3LV+/V/cwGtTPfLmyEuwneMzLznGztk64FKzcuI9Wb9zvVr76sNs7dd5QFoPoY2G+Jhi1vfBDwnE+sDhz/orrETr8jnebiubPTrAZgATNjPZWmHvyOwjgzPlflNvRkLq0D6+pDwUiXi9MAFBj7fj0sOuQ9xih+vghNpq6VYu5+m/0CJ7Zw22K7W3LdxQ3b6Py2J/6gNy/7YA6g6c7uO/IeEwwnoAuv5EISASeLAR8ZuR9smCRvZEISAR8gYAkGF+gKMuQCEgEuAhIgpETQyIgEXAMAUkwjkErC5YISAQkwcg5IBGQCDiGgCQYx6CVBUsEJAKSYOQckAhIBBxDQBKMY9DKgiUCEgFJMHIOSAQkAo4hIAnGMWhlwRIBiYAkGDkHJAISAccQ8JpgHGuZLFgiIBF44hGwvOz4xCMgOygRkAg4hoAkGMeglQVLBCQCkmDkHJAISAQcQ8DnBIMg4Y61VhYsEZAIOIqAldHWbuWSYOwiJvNLBJ5gBCTBPMGDK7smEfA3ApJg/D0Csn6JwBOMgCSYJ3hwZdckAv5GQBKMv0dA1i8ReIIRkATzBA+u7JpEwN8ISILx9wjI+iUCTzACkmCe4MGVXZMI+BsBSTCCI/Db73/S2i0HacaiLbrX6PACYuZM6ahKuYI0JLIJpUuTUrBEsWznf76mvP2rPi6Or557Nhl9Nj2SShfNJVaIF7nwCt/RExfo8y8PKC8K4sXNW3ceuErEi385X32RKr6RT3lKtGiB7JZvIfOaw77AabfJ6gPquXNkpiplC1DtKsVsv3DY/YO5ysuJarJ6BtVuG+3kx4uYwBtP7+IVR7ygqCbMuWwvZ6Q3iuZSXgStXLaA6Vveduudt+xr5b34cz9fo4cP//FzVV9Xbd6gnO5lR6uyJcFYIUREp368TB36TKPTZy8b5n7rzfw0d1yYz58UnbX4Kxo4ZrFbvR2aVaYRfZt7/ESoVbfxpOzkuRto5idbCOQqmvBuMd5tbh1U0da7zd4SDK99eV57mUb1b0EVSr8uhJO/CQZPqH6++QCNmrKKzl64Kgq5kg8Lv3+XBpQlcwZb36mZ8cZ0/5GLlE3UKtl5m1sSjAWa127cpSZh4+nEDxdNc+LN4nHvt7EaG1u/Y2F36B1PW7753u27LC+lp5UzelP2bJlslWmVGXWCVDDJmTeDrT7V/Y7H7ONHBBMeYhdJThCMWm+XNjVoYERjS8nKXwRj5+F5Myyx8D+IbELYfETf+UZ5R05coLaRUwhSk52E98TnxHQ2JTVJMBaIQlzsO2KhWy6oBhAbkaAy9A2rT2FtatgZH8u8ew+doXdCY+mPP/9S8uJBc0xGNUGCwWPkvkrYxcIHz6avvz1uWKSqEqptgeiuitHsR5jwU4Z3pPrVS1hKEE4SDNr1fvcg6tr2bdN2+INgQOLo++ipq0yH8YUMqen55551zbcHv/5umL9tk7doaO93hSTIH85doRbdJtFPF6/rysM4FyuQgwq9/gpBTd5/5Cx3k82V/SVaMqUHvZIlI7c9kmAsVic76WBjiR8ZrOj5WGROppFxK2nCrPUucglrXZ3iF2x2kUzJwjnpk7gIn+jfFy7doGZdJ9CZ87+4dQlSSHjbmlxdH4R35dodWrRyJ02Ytc5NncJETRgVQvWqlzCFiiWYFMmfUyQ02HRE0u9//ElXr9+ljV8fphGTV9Dd+7/pPoPd6tOpPahMiTyGxSU2wYBcBoz6hOYv3+7WJhBKaKvq9E7tN5TFy861+w9+p692fU8j41ZxVfc2QRVpZP8WplIbyugYNY227jqmq79900o0qHsQQd3VJmwmQ2KX0fL1e3R/b9e0Eg3v25ySPfO0Wz8kwZjMXgxAm55x9M2+U65cPTrWpgHhjUTmvFd5rt64Q0Ehsa7J89ILaSlhdAiFvz+bQAZIvjL23rn3KwVHTXeTXLJmzkATP2wnbMO4dec+9f5ogWIQ1qaM6VPT8um9KF/uLIaYeEsw2oLRjuCoBNqx94SuvrffKqJgmPz5fyQBNiUmwYCYp8zbSB9NXK5rBggZ8yukZTUhCQTSBex0H4xb6iZJjh7QkrD4jdKKL/ZS2MCZup+tJD0eKWLTXTmzN+XPnVUSjJ1VzyOYmdGhVK+a+W5spw6jvFt2HqVW3eNc0gpONObFdqXB0Uto8epvXJ95a+z96++HNGjMYpq7dJuuKWVL5KHpo0MIxGYnGe3KDWqUVNQlqE285EuCQfk8icxKKkpMgvli6yHq0CdeRwpYqCDASmXy24FcyQvjcEj/BF15ON1cMaO3csrHJp59z4qA1TLYzQ9/HxwRRN3a15QEY2fkeAQzL7YL1axU1E4xtvNi0Xd7fzZhh1GTKjlhYrbrNdX1d2+NvVt3H6OW3SbpJib07vnju3p8IoETqJYRk+jg0XOudmJnXjQ5giqXKZAoBINKeCdwYwa2ItgoeCmxCIZ3cABpdFZMZ6pRsbDt+YIPIBGNiV9NsTPW6b6HJDS0d1M3FevQsfOK+8Ovv/2h5IcKtnBiOFUtX0io/kHRS5TDADXBzhY/spObmiRVJBM4/UUw8D9o0CGaYHRVB39xXHdlZ4M/TIOO0S41Cb97auzl7WLY5ZdN60mw73iToCZ16pugM0o3rlWaJn/Ugaur+1qCQdvhXtAgOIZu3b7v6kqz+mUVtc+fBMMjvoHdGlFE+1pe2fVwCtSkcyz9+NPjI26jDWjZum8Vg76aXs+VhVYk9CKosyKJHS8jn6EngmAglu/Ye1IxNG7fc5yu37znwkjrIFSzUhFLg6jd0wwrsVtksNg87ATUDj52KvjFzF6y1fWZp8Ze9pQKBXqrcqmNgl2nRfgk2n/kR1c7zaQtJwgGRsm67cYozoFWOy1+TwwJhocL/HWWJ0TSixntqaO8uRUzbQ3FTP9c9xPcBUDu2gQVctvuY7Tn0Bnatf8UFS/0Gk0a2p5L/rx6tAcQ+N3IDyxJEwx24AXLtys+G+ypAQ8UiOkdmlVRHMFYC7ma398Ew5Oa2EXP2mcg3qoSjh1CY8VcnAIsmx5JZYobn7TYKX/Op1sVZ73alYspamWpIjkpVcrnuUUkFsGYeecmBsFs232cmodP1El2vjw4OP7DRWoXOZWK5H+ValcprnhYZ86U1ivJiB0wnuQb3KIqDY9q5ja2SZZgRHw2jBYDdozZMZ0pb86X3bL4m2B4vi8sedy4dY8ah4xTXMjVZFfyuH3nATULn0jfff/YTgLHqSVx3Sndv/49dsjE27yJRTD+VpHYnd/XpO7tOIh8j2sEYQNnuUjSzL6WJAnGzGcDDnCwVWTKkIYgju7cd5JgVGMTdM1Fk7oRFpU2fbnjqMtdGqoX/CqwoNWEex+vZnnB9W8ceXZuXZ1ezfr4byKDZJSHlSqMFj07Ue0ae4+evKAY+e5p/EWc8EYWxcIJgmENmWiLmbTgtAQD57h2veJ17gA5smWiNXOifKIeiWLtaT4ciS9ZvUtxPNV6eZudECY5gjFyDoJUEvu/96h00Zxu4iB2+u5D5upONQAyvvl0SnfD05LENvLyjv/6hNajPp3ru80Jnv3EjrF3w7ZD1Dby8WkUKjA7YfF0Uop+5wTB8OwRc8d1oVqV+aeAThMMzyZUvWJhmhUd6vLSFcUrsfKBVH65eps2bj+iOH1q7VloQ77cWWnBhPAnx5MX/hr9Ri7S4Vu/RknlHlDa1CkMcQfjjpi0gqbO36TLY3SMh0yJTTDsEbSZAZlnLLRj7GUXtKd2HF9NdF8TzIkfLlFQ6Did9Gkl5TlNMDyJysh24StcPS1n98HT1CQ0luAyYZTqVi1Oowa0NPWVSlISDG9RwY190aQIypAulSWWIJmug2YpV+DVZOZlmpgEw/N9qVquIM0eG2bo0ckjCVFfBl8vaEvwLTL4sj3fn/pZuSSK435tMttMkM8fBGPkoOYtnt5+z5Nw1TKxGXV5rwb16FDb0l6XpAiGtcDbdQ4CQLCyNwoeS7fvPo5pYjTIiUkwPJ8NK5WH1xdRYy+7mFKnSq7c/YGTnT+SNwQDMR52Nhis53y6jb7cedStC7B1LI3vaWorc5pgeIsWMYR8fUnWF+MnctghciqbpAiGNWx6cuoBSSFswAxas/nxfRkjPTgxCYb1fUmfLhWtntmHe9KlTiBc8OsYNZ02bz/imlNWaoCakV1MCGC0dm5fJXiWP5LIhPa0XaIXLv1BMInhGe4JbrjbBJX9zeK5lTtv127eJWzwvFvcZnaYJEMwPAu8p/orG4LBaHElFsHwVD8j12t2srAemfjdSvJBHva0ygmHQTsT2ymCsRMywmmC4fnA/FdVJN7YQVLcvucE9RuxyC0gFhztcE+PtYMmGYLh+X54eurBGrCMFldiEQzvRIjnfckbdF5ITRFjrzcqiR3iEM3rBMHg+HRor6bCd6qcJhiekTcpEYw6lka31Xl9STIEwzvi81S8ZAfa3wTDShOiag4GnGccFrFN8SSfhRO7UbUKYpfdRIlDNJ+3BIMrIfB9gqcubgWXLZnX8loI2zanCYa9Y4b6/el7JDo2vHwIUNU0bLzOkM67zyQJ5l/0eGSVGBIMz/fFm4FXvzW7WIg8vGNITyVCX7T3vyBROU0wPO/p/7ofjNnYsnZDnleyJBgi5YIX7w5OYhAML+iPLxaslRT0X9tNA4FgeAcMScmTl52XPJUvblgHalLnTVdWSTBE5C8Viafe+IJc1DLMjL28EyhPTuXM2osdu13veMqUIbXlxbtAIBhgxR4wOHEXKTp+DW3acYQa1ihJNSoWoVw5XrIMeO7JvOOZLVg7TJIhGF8aeVlrvpEPiNMSDM+PRRtM3M6g//33Q93bOfjWytjLLmpfT3ae8drI5hAoBGP3fpSdOYC8vBNJp6SkJ4pgeDuupway+P+/LoDgxWoyGgCnCYbVYUWMs0YTjneF3qo8HsEBU9hieAGc7Ux2XtwafG90OhYoBMMbJ4S0XDYtkhAD2dvEhvJAeaw9Dm1YuGIHHfz+HO0//CNd/OUWTRraznYo2EtXblLd9mN0jwKydrwkI8EAqCfJ0Y6303irovCeWDEz9vJUNJHo+yKLgHcXyCywUqAQDLDj2d0iO9WhvmENvIrbAuII6ZegRABQE2+TuffgN2odEacY+tUk6gGuHXueZz0bWiRJEQyvQzPGhNhiXju+CE5KMLxTHKOb0yILGnl4PjEI2r16dhRBSuOl3QdO07tdJrjeXkIeO/e7eGUavVJg5vMRSATDi1nsLbEbvVJgFGDLG9cIjDk2J4RtgCSkJjyvsnpWFOGAQU1JimCcuOxo9uSCUwTDUx984UlrpJaYGXuNXhWoUDofzYwOofRprS+RaknG6FUBuJMvi+9JmTKm4RJdIBEMADB6VWD+hHB6o5j9N8d5rwqYBYLiqVJ2pChefTwpKEkRDAaGF66hWvlCymNonoRrMHs0yimC4fm+WN2cFpVieJKRlbHXKIAXnn+NGdxa+F0k6OTdh8xze18JE312TJhhHBb0LdAIxoiI7bz7DNygFsXN20A4OWKT3bktemdrz3dnqE2PON2FYaOoBEmOYIwCTpktBjyP2XvYAiW4sTZZPXvpFMHwdHBfObnxpDwrYy8wOXD0rBKkW3vLXMXK7GVHLBQE9Jq2YDMtXbub+4ys1WNegUgw6LORKonfzF52VF/TXLZ2N035eKMuyL06ZkZ3g7Tzn6ceg2QQs7pTi6qEE01twn3AmYu/ojFTV7u9W47XIprWfez/kiRVJLXRvgiZCdUIz66WKPSaoXDgBMHwnK2snOJEpRc1H+9ZDCvPXnyLY2VEudOGCGXr1h6j42RP+4IDr50gl7D3alg+xh5oEoyKFe71hA2YpTwDa5TsvAeOMqqULUjxIztaqrcgKjxF/KHmRFVtg/ZtavwNIVa/O3aOu4HgyZXwdjW5Y5zkJBgVAIjjIf1nKIvCboK0M2NMKBXOZx77xAmC4R0Niyx+O33keema2Zq0ZRupOXbqR16IzFNHdKRKb+YXOhkJVIIBVmZqjl3co8LqK++IGz2Py5YHksETOINjlnDJw6x+kNCHvZpSx+ZVDDeQJEsw6sBgt8aLdhgkqwT9tldIPercupqb+Mf71gmC8SYKnVX/1N+NPIRFwjigDFzLx3vcwyYtd4tjbNUGTOweHetQp5ZVDZ+G4ZURyASj4oFTwNiZ65SngR8+fGQFte53hK8cFNGYcufIbOs7NbNR3GqjwvAEDWJg44KjWUrSBKN2DLrh5h1HCc8pfLP/lE5shy5brmRealSzNFWvUEiIWNRyfU0wTj+6pR1o3imBlbGXN1FgkN647TBt+Pow4Yj/6vU7Ov0bKhMcxfD+TsO3S1HRAtk9ckuXBPMYfaieO/eeVF63ANEj2NOtO48jMEJyQAyjN4rmojpVixNeujA74BBlHEgzZ87/QgtX7qS1Xx5QbkqrRIfNGWSCp23x9Avsl7DtWaUngmCsOil/lwhIBPyDgCQY/+Aua5UIBAQCkmACYphlJyUC/kFAEox/cJe1SgQCAgFJMAExzLKTEgH/ICAJxj+4y1olAgGBgCSYgBhm2UmJgH8QkATjH9xlrRKBgEBAEkxADLPspETAPwhIgvEP7rJWiUBAICAJJiCGWXZSIuAfBCTB+Ad3WatEICAQ+M8TTECMguykREAiIISA9fVKoWJkJomAREAi4I6AJBg5KyQCEgHHEJAE4xi0smCJgERAEoycAxIBiYBjCEiCcQxaWbBEQCIgCUbOAYmARMAxBCTBOAatLFgiIBGQBCPngERAIuAYApJgHINWFiwRkAhIgpFzQCIgEXAMAUkwjkErC5YISAT+D0udTC/E19fWAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-11"><g><path d="M 261 51 L 314.63 51" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 319.88 51 L 312.88 54.5 L 314.63 51 L 312.88 47.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-9"><g><rect x="161" y="1" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">MP0/MP1 boot process</div></div></div></foreignObject><image x="162" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmAXFWV9++8qk434kgYRiBARBwGlXGbL5h09ZJpDUsAQ4BQ3R0gJIojqEjEEXH/5BPkY5yRgY9FkC1sSVfJpsgutOnqqupARkBEPmAEJ8gSZFgidrq76p3hVld1Xr2679V7tXS97jrvr6Tr3O137ru/d8899xyCPIKAICAICAKCgAYBElQEAUFAEBAEBAEdAkIQMi8EAUFAEBAEtAgIQcjEEAQEAUFAEBCCkDkgCAgCgoAg4B0B2UF4x0okBQFBQBBoKgSEIJpK3TJYQUAQEAS8IyAE4R0rkRQEBAFBoKkQEIJoKnXLYAUBQUAQ8I5A4AliUVd0gQG6D8CupcPikRBaDk8kbnrN+5CdJdu7ei8k4PQSCcK61FBsja5kd/ex8zIcHgLwtxX2IQvgJYBfByjBwC/awvzg4GD8zxXWp4rRoq7o/zKIPgvGIQD2BRAGkAVjC4EGTYOvnL8np+PxuGq/qsemo7dM5qUjw/GEqlT0VxW0RYUXLPhcS+s73riemfsAvGaCDxlJxDfXroXSmtq7e79GjPOnfnF5F+rZD6m7MQjMcIJA0WJUDYQLelb+zZxM5pcAfWSaCULX7VEGfsITLd8ZGbnxTT/jWtjVd4ABXkdAe7lyDKSZ+OSRofgT5WTdfo909p0C4h9PyvBj4+Hwks2D6/9UniBEf35wb++M9hHRdQDmCEH4QS4YstFoNLTlBVoBwofTidh3gtEr917MdIIAMX6QHI59q1qw27t6DybgF/mXr7i6+u4gXLpOv4dBx6U2bvi1l/F1dEePZKbr9bstxxq2MdOa9PDALV7asMv09PSExzK73wQgmqMH4KJ0Ira2IOe+g4DozyPoke7oQjD9HMDu+SKyg/CIXQDEqL2rfyFg/nvuw20G7cJmPEEAtTEzOZqX1OxqGEHkpvYTZGaOSCZv+YPrV3zpAjIpTngVjI0AvQLwuwEcDOCvbHW9ZjIfVTAL+XmhFi6O7hcy6ZcA9lMmLGY+Nj0c/5lXghD9lUe7o+PYfdkI3wngQIu0EER56AIh0d7d9yVivgBAKP9OOpqsA9FhSydmAUHgTTJ5aTIZT1UKrqt5qRKC8PeFQAt6Vu42x+QDYJpnAlg2NZHyAyKigbG/7LJq8+YrJnRjXLjwmN2MOS132MxK28B8xvj2Xa+zlotEojshjNPApLa4U0TBwKOGOXFoMnnrVj84tndGjyIitfsIMbAljGxPInHz770ThOjPDe9FPcfuY2RCP1XHSiWkLmcQfqZqw2Rn8jnOzCSIya/i3QoaJ+DsZCL2vUpnQKQruhSg2/PmJXVoawJomarP7w7CH0FYu03tndFeIroGwE6WH0ZN5uUjw3F1WF/yRLp6vwjgYqs8EVYlh2I3O2HS0d27ghnKHDXVDhPOSg/F/sUPjsU7L76jNbxzdHDw2u2uBCH68wRx++K+D8Lk2wg4QFNAdhCeUGy8kBBEHXWgs2ET6DIGn7zjvICT4204YvP98Tcq6ApFuvsuAvNp+e3fM2B+HqCeBhBEbr8S6Y5+FUznWXcSDFyTTsTUmNk6xkWLVu5htGTvB/Chwt8ZOCediH3XLmvDRtfO4+ZE6OCRkfUve8Gxq+v4XbOYuKvwdasjGNHfJJJO+nPAWX0oLCOia13Ok4QgvEzSAMgIQdRRCdpDTubPgpQ76pTHUcVmJvsCq15kmrQVntQggoBu0bd7BxX6ZjXx5P/2bNbgJZs2xp8tpxZNO1kiOjE5NLChXFn1u5t7q+sOQvTnCG8kEv1rChuXMvNxNlOjMi8alr8JQXiZpAGQEYKooxJ0BMHAUeqw1XpnoVIzk828NA7wchD1g7G6UQSh2tUcmusWBGrv6r2KgE/vUAHd1Bp+efXg4GDGi1o6OnvPZcI3LbLx1vDW472Ud3NvdSMI0V+pZnJnQwZ9EQTlkTfXJvECEU7nyfsIhfs2QhBeJngAZIQg6qgEF4IYLXZLrcjMVGxeAnImFmNO9vyGE4T9ghJK7wzYTTw5UwbTienhgRu9qqS9s+9wIlbukzkPCwae4onQ4nJmJo17q94EprnomCeIptefVUcli0j+RwLumwjzSW3ZLNkuZDacIHp6ou8cy9BqMJ8Mog/kz7MyAD8DGPeYZF4xMhT/XRlTp+tUVQ4YoZbw0SB8BkTvt5w9qg+gPxDTUBbmVZntu444OXF4fRc+vrh/fpiz/WBSFxHVuU/BiWMUwHMA3QvDvGqfPfBEuQumke7ea4vWELdOMB5obeHlVV6O9TpMX3Iz8pBaLTAT4VDKdrHN9wtTYmIhujg1NHB6pLv3mgAShFq8j0onYmoxzz3qEJNMVi6m8/J/ep1MOjSZHHjI6yyotI5IJLo3wjQIxv7KvdXJNOVE8KK/Yg1pCOKVty/Drd1nHsfUYqS5se97vnudE67ENemA8elIV9+RAF8NQLlOOz3K4SOdBX1mU2LgKT/tq8U6ZJpnE7AqHwWgXPE/AjizgFc5Yevvi7qjBxpMP8q7gE+6oro/j5PBa5Mb4w86kZ8QRDkIa/S70wKjFkq7GcavF47OvJRKxO8uUe70eTFNoaZZMEoWBPvXP4BnzXBm8cjgLc97hV+RJLVkN1o9ZZhpRbmLc1bsdO6thfZFf1OacF3QLfrOEOOybKbl29Zb9IEhCOA6Bj9KIGXuUuFbvDyvEfGq5FBcXUQt+3Qsjn6SzdyFz73KChcLZBm4ri3Mp3v8GqdIV98qgJUHoP1uULmmM8qsPTY693zdzkUIohx8Nfq9zAJTfPvZ31ZNa15SppUAEETJ2YJuEW7v7DuBiG/YAbX/S4M5M8EE3Q7CJwv1eCHacu6tHgmiqfVnfUUiXdG1gPHhjEFnP7Rxwxb76xMgglBxz9qK3LAZz8HAr8A0BpgfBWiBhjy2gnhZaii+yW1p0Llf5+WzICjHi2HVDoH348lwMiULOxPunGjl48t4NWq9BfNtZRj4LYEezsUvI/4wGB+zuZ4r0SyBrhgb3WWtnSQindGTQcbCyfpYlc3/O/f/pwFD7T7yP/PTrS3vuNjqHl6j5bPqamasiUntIDQX3Dxvu53MS0p7jSYI2+3k/CQqtVOW7DL8EWSuXvtZQm762sJl2GeZF/dWLwTR7Prz8/YGiCAs3eaHTcJqeyyv3LlBa8u/gXGizVXb9TJmZHH/P8A077aEE5lchAkbQsicOTR0y4tWzFRso+dfpN63XYEvtJu6lCv8nPDLpzs5W+S9/5S3nvW+UQagfzUnwufZY6A5XTDNRQ8Afy2diCsTlfaRQ2o/M92nrNsOQlVVqZnJbl5i4Mh0IqbuE6DBBEGR7t4fgfFlK1Q6L61qPJCsdfsZryrnxb3VC0E0u/78vApBIwh1eD7WxlGnr/Tc4v0CzgTROUUk4XAZc9KLC+tBtNyCizocPjOViF3qdtDtcNvc8XJpR8cxu5tGy70EfNTSlicz2CSJ8U8Bfp+l7AsGZw8dHr75tzqdCkH4mek+ZT0QRCVmCpt5qTgCqZ8FUxvuu8Kb1PmX5OsgUq6O1sMy7QT000832P3W48W91QdBNK3+/LwKASMIT/HBbOHJC8PVXsbs6IhG2CC1e3hXQdDjhc+ceCQS3R+hXEyw90zhynw7TKxMpeKKaKaeju6+fuacabbwjqldyprkUMxirnXWTmdnf7dJ5h3WvoLw76mh2Fd0RCYE4Wem+5QtRxAaM8VWMmlJMjnwuFNTNg+cEpOKnwWzWoJQZwDj48Z7YfDRDJwKYO+SfjN/IzUcV4eCRbeo/fSzVgTh1b3VK0E0s/78vAoBIghflyk1C3/urpFyBrGOX3Pvx9et/hxJlIacKTE59/SsaRvLvBUH6FM72ue7kMUKO5G46Md+fqmCYj6DDPekUnHlTVX0CEH4mek+ZcsRhKpOM7lOSyVilzg1Zbt9PG41L+Ummt2H2a8Xk88xuom7beX99LNWBFFCrmXuXYj+3E0xXqdKgAjC18Ktc4Kwh+jX3eepJIx/V9eK92UQGiRg/o5dBJ2aGh64vPB/nYzfu0OqrkWd0S6DcjuenfN1l6wjhTaFILzO8grkPC4wtlwOrl8ENg+hYvNSwAjibmT5hFQq/t866BpBELa8GS+yQUvSGwfUZSjtI/pz1p+f1yEoBMGMG9LDMRWGpmg36zYW+1mZ+uj58ztaj3ns3uvfUuU6OqIfg0H3MfA35RZbt3a0uwPbx53GNbzsHNa1qYsA7eT9JwThZ6b7lPWywPgxU9i/gHVfKn4W3hqkHNUh4unSj59+1moHUbRb8+A1JfqrPqWr0l1gCKKSiL82d2z7bX1Nsq6KFu0c2dhDx9jmaPH5mSpR+oHoZYnSef855Y0RgvCCaIUyXhYYVbVXM5PNvKRNWepn4a0BQagDtJcIeIaJ72My7pi/h/lUuav8upehkq87PzsmP+6tBXWL/iqc+LZigSEID5co7SNu7+pdRsBUEikA/xmmTHfBbbXc734QLOf6Xe53P215XSeEIPyg6lPWxwJjNzOV5CZQobSLg9vpL5Z5VbzDl51rBjqfw3cVr8Vk93MPwo97awUE0XT68zMXAkMQtnAvXsbQ0dH3cTb4XksQwukjCDsZ2WOcedgFO42xZJ1wqEsIwsssqVDGK0FozEwl21Qv5iU/X9QNJ4iSm9T4lcHblw0P/2ybV7j93KT2497qlyCaUX9edRQoE1MFBKE50J1Ggij+CKzFR1VBb5pwGtpIyEIQfma6T1mvBKE1M3GxB4MX89KMIoiuXttXd/HW3QvU2lhMmkXAr3urX4JoRv150U9BJjA7CDExTanNq6VBCMLPTPcp65Mg3MwUdvOSYxY6r4pv+A6iNJqr51AjBTV4jebq1721QoJoKv35eRUCQxA1OKQGbF/1pR86FR9S288i7R5TtTukLr1PQYwrk8Oxf7LrVQjCz0z3KeuHINzMFCXmJZc81jOFIKYzH4Rf99ZKCKLZ9OfnVQgKQTgtgm5j0TiQFJliNB8pWWZalh4euMsPRh85dNXO7/zL2K0MHFIoZ++v3WOKgD/B5EOSyfgjftrS7rwJX0kPxS4QgvCDZJWyfghCZ6YoXIKxmZdcU5TOFIIoPXT3nftY5/2ltaMWuQ/6ONgT/VX5AuSLB4Ug7F//5Ua34ODoLnO24863k+10FGTt9wXkolw5FBv3+4yO5qqDTeNTHR8fnXtCy06vX74jNad79rkZRBCocU5qbUY6+0vu55ZrBQRhP1eZ1frz+uoHhyDgK/+7LSimGq62vGaX8buMYRymC33uhJmXHCray3Q+PnhU2+o8bnxi98uY8NmpvkioDa9TubZyfhcYu5lC5VEIsXGCaZhX57OfoVz+6plEECVhyyevuJ6TTsS+W+a2K0U6o2eB6DyLxp7NGrxk08a4irs/9dh04BhSQKd50V9t3ocAEYTKa6sNgmcfaf7D4jaAenb8po9yUJdgfXBoqy7B+iazUUqwvtrMd8+1+F1gNGamLDPWE2FlPnpj2S+gmUQQufGW5q8eZeZPp4fjMSeS0CVmcSKWStxbCwoW/Xme6q6CgSIIlUSH+dv77IUfOl3oVJGJyaCLir6ygXFmWqnLVugQ7jvD4LPSibiy6zuG9nAK983M/enhuPWCXg5jh3Df25hpTdlMivpw31thGEtTGzf8WmvVKH0/tWbc2syU2tYy60xMeYKwmyksHzDlE4TPNIJwmPCjaqfEWb7IGqXSJfHJ42Y4c7g9XWml7q1VEkRT6c/LKx0wglBdVhnebsiOTfzzpk23vmodg0p4FTbpcuthsfqdgWsmRueeokvRqX53ShjEjNuyIeMMu7lJ5ZzY8oKxnIj/X2l6UrpifHSX05zackoYpEv3muubIryQ8WkG/19bFjuVce4bqaH4vzrmp+7sOwXEP7ZgpAjlmNTGDUkvum+kzKwkCF0grQLIXtJpzjSCyE3g7uhCMP3clo1L/bSNgDSDngVYJZg/WJOm0TEdZKXurdUQRDPqr9wiECCCiANQSX3m5PucAXgzYDw6uUM32wH6gC2fiVo7R8xw9rhy+dJrkXK0XDKjfL9rkXJU+Ym4EpHLB2tWhdgBaAxkPpMdyxxvJ9pyc2I6fp+VBKExMxWw9HRPYCYSRG7r3B09kjmX7H1XH5PHdWtdqXtrNQTRrPpz01lQCIIIx+VyQTPOKCUB/QjUgj0R5pMeGoy/5GVe5iOurrOnEfVQVi26sfE2/nyZfNSFqqijq+/zml2Bh6ag8lb/28To3O847VIKlTjs8Kfa0OWb99KB6ZCZzQRRaqbw6K0wUwlCTZhF3dEDiekqmkzo7vqw2lkQn2zPKWwtVKl7aw0Ioin156SwoBAEA0fNn8d3bnnJ+AIxn6vZjVqHsA3E30cGF/tIxpMr//HF/fNbmH/IzCsAhMvNZYCfJMJXk0PxO/2EIi+8MwbTFeo4zwvpqfcmxMaXhoc3PFy+X5MS+Q+tGzU7fPWzNmio17rrKTdrCcJPvHY7wDOZINRYcrbZF0MHEZlrwLlLQ/vmX7IsGFsINGgafOX8PTntFjXWHqfJj3trtQTRzPrTvfBBIoh0IqZMmbkQ5BPc8kWCuQKg/fNzbBTMTzLhBnM8s65as4kiipCZjRKwAkTvB2O3PD4qCvJzb9+vuJdB6+fPyz7sJQKyy2JKC7v6/i7MvJoJhwE4wEJ+KrbZU8S4J0O0blNi4Gm/JFQgvbBpngXGkaBcUqOptMKVJC2qJzEU6g48QUwHCNKGICAICAKCQCkCQhAyKwQBQUAQEAS0CAhByMQQBAQBQUAQEIKQOSAICAKCgCDgHQHZQXjHSiQFAUFAEGgqBIQgmkrdMlhBQBAQBLwjIAThHSuRFAQEAUGgqRAQgmgqdctgBQFBQBDwjoAQhHesRFIQEAQEgaZCQAiiqdQtgxUEBAFBwDsCQhDesRJJQUAQEASaCgEhiKZStwxWEBAEBAHvCAhBeMdKJAUBQUAQaCoEhCCaSt0yWEFAEBAEvCMgBOEdK5EUBAQBQaCpEBCCaCp1y2AFAUFAEPCOgBCEd6xEUhAQBASBpkJACKKp1C2DFQQEAUHAOwJCEN6xEklBQBAQBJoKASGIplK3DFYQEAQEAe8ICEF4x0okBQFBQBBoKgSEIJpK3TJYQUAQEAS8IyAE4R0rkRQEBAFBoKkQEIJoKnXLYAUBQUAQ8I6AEIR3rERSEBAEBIGmQkAIoqnULYMVBAQBQcA7AkIQ3rESSUFAEBAEmgoBIYimUrcMVhAQBAQB7wgIQXjHSiQFAUFAEGgqBIQgmkrdMlhBQBAQBLwjIAThHatAS/b0RN85NkG3g/DJfEf/M0yZ7qGhW15U/+/sjL7fNOjLMLEUhPkAQgBGwfwkE24wxzPrNm269VUvg+zuPnZehsNDAP42J894oLWFlw8Oxt/qWNzbzSZ+AGARgDCAlwE8wIRL5u/J6Xg8ntW1EY1GQ1teDB1EZK4B4xAA++bLA4RXwfz/CTSQnQgNjIysV3VW/CxY8LmW1tY3PgHCSibutrSl+vYSgCQD10+Mzr178+YrJvw25DoW4GUCHgPhujkhvm1wMP5nv/Ur+QUHR3dpHaNlYJzEwEcA7JGvJzcGAp5g5oHsROY2r3q19qPe9VcyZikz/QgIQUw/5nVp0YkgMpnQ6zCMs0F8xtSCq+9BhoFL28L8rXKLlo4gshMTvaHWlq+DodpR5GN/xhk4Mp2I3W/9IbeYvkAriHKkMkk47k+GiG6eIDrzoY0btpQTtrf1/IvUC+CHAPb2UPaPAJ2aSgz8IkeD5R/qWBz9BJu4BKAPlBfHNhB/HxlcnErFRz3II6fnLH0fjFMA7OShTIYYl2UzLd8eGbnxzXLy9a6/XPvye7AQEIIIlj4q7o2OIELIHmpy6BtM+Kz3innEDGePGxm85XmnMiUEAR5kGMME/roDOaj1NTnehiM23x9/o1Cv6vP2DF1EwEnO5Rx7/gIZvCq5Mf6Al7FFItG/RohuBLDUi7xFJkvAOWOjc891201EItGdKGScw+C1/sdSHnPVn/wYBgAc7HMMit2egkFHpzcO/M6pbL3r99tnkW88AkIQjddBTXqgIYhnGbiLgC9YGsgAvBkwHgVxK0z8IwjvtXeAgPvG2jhqXcytMqUEgW0AWgHMyckxnoOBX+X+vaON01KJ2CWFenImjO0UZ+TMSfZnGwFpBj2b6yfQCcZ+moV3GzOtSQ8P3OIGomtbOfMVNgL0CoPfRUAXgH1s9WUZ/LV0Iv4jXTvKZDVnpzcuBvhzJb8TXiXGf+TGAoQYfBABf6/ZzT1BZuaIZPKWP/hsI2cmBBn/ASDrjhcPjrfhaJ1eXcZQk/prMsmlkmlHQAhi2iGvT4MagrA2lGXGbdmQcYbNLEORxf0fg2leB+BD1gIEumxO+OXTBwcHM/YeawiiIDJKwNf3nseXWM4aqKtrxX7ZrPF6KhX/byXY09MTHs/scRGDP2+r+wVm+tL8vczb7WcVH1/cPz+UNS8gwtE2otgKw1ia2rjh1zpkndviJ03G6e/ZCw9Y21Imr+dfytn2rwDwbkudLxicPXR4+Obf2tqhSHf0q2A6r7hf+vpV2YULj9nNmNPyfwg5M5HVHHdPa5iP05n42rv6ewjmnRazUpaB89rCfL5OfuHi6H5hky63EXCWiE5MDg1ssGNV7/rrM+ul1nojIARRb4SnqX4XgihrInEwLbxJJi9NJuMpzwTB/I3UcPz8cvb6RZ3RQwyi24tt6J7MLBTp6lU7InWGsMP+znw7TKzU2fE7OqIRNuhuAO+yjON+ZLmvQFg6FUW6owvB9HMAu0/9TnRxamjgdOv4FnVFFxig+wDs6qd+ANTeHV1NTJdaF30irEkOxW4oXcB7LyRAtT35aPpiL6N2TnO24zaAenb8xne0hneODg5eu90q395V3/qn6TWQZmqMgBBEjQFtVHVOBMHANROjc08p543T3tP/XmTMewg4oNwi5LCDeDZr8JJNG+PKlOL4qC/6scwe6wA+3iL0X8jyklQq/owH/Ki9q1d9fX/bIjtqMi8fGY6rhdr6UKS77yIwn1b4Y84WHzYOSw9ueK5cW+3dvV8jhiK8wmMfY0n9AFxNRfb+lY6l9KzmI4eu2vmdfxm71bobYOCodCKmCMz1iXRFlwI5Ms6Z/xjYEka2J5G4+feFgvWuv1wf5ffgIiAEEVzd+OqZA0FsJZOWJJMDj3uprGRBJDyDDPekUvE/Wss7EES8Nbz1eJ1Jylq2fXHfB8nkXwKYZ/l70flEub4uWrRyD6Mlq7yhpsxiigjTidjJ1q/7SCS6N8I0CMb+UwRBOCs9FPuXcm2o35WZJmSS6qs6/1BPlpmWpYcH7nL63WkH4NSepo0Sby+dbpnpxPTwgDp0d30W9Ry7j5EJ3QeQOst5hEFPtJB54dBQ/JVCwXrXX66P8ntwERCCCK5ufPVMTxB0U2v45dXlFu1CQ5rFW+uaqiMIJnwrPRRTrqquT3tn3wlEPGVC0X3RlqtD/d7R2XsuE765Q5YfGw+Hl2weXP+nqfF09R5MgHJRnTw8B3wRptrtbM/sfj0BHQA2g/k3HDI2FDyB2jujRxGROiCfPEdwIFS38UzuqHa/CUC0IEeMHySHY9+ylKP2zt7riHBi4W9evJK84JiXqXf9ProiokFCQAgiSNqooi/VfAUWmu3sPOqvTGpTZot/nFqICF9JD8UusHZNSxAeTR4dnb0/KXa71dvEy0HRXrr4v2aCDxlJxDdPEUR37xnEsHoe/crg7cuGh3+mvK6qfuwkpby//vyO1mMeu/f6t/xU3t7d+01inGshu5Jzgo7uvn7mHLFaD7VHGbidwOtawzsP2s8V/PSh3vX76YvIBgcBIYjg6KKqnmgI4nUy6dBkcuAhPxVHunuvBWP1VBnCutRQbE0ZgnjLZF46MhxPuH8tr2kby7wVB+hTli/hi9KJmLo74Ovp6lrxvgxCg4TcrfDcY7fL28fCjBvSwzF158LLpTfX/ui+/AF+GjAe9DWQyZ5/TFm0LAQxEkLL4YnETa8V/pb3fLqDgHaH+tVFx98SY4BDdNv8PcynnG6t68rXu37/mEiJICAgBBEELdSgD7p7EGY4s9jtwpuu2VLTDUrOFjQ7iJKvd13d2l2OjzOBMiRlJ4gSswkxrkwOx/6pBnBP3mguDm1Si2oLdRSFSSn8sbOz/yCTTHXgvJeHxtQu6Q4y6dqxsV0eLOekoOqrd/0e+iwiAUNACCJgCqm0O+ViMXmtt+SgekecpamYQYElCAvZ1JKMvJKdV4w9yGkJQpVzuQ/iVq0K43F1mDLnFmJzOQnXu34PYxeRACEgBBEgZVTTFSEIwOrZM1sJojBHcgu5aa4lYFXRXQ33SeQ5PEm9669mrkvZ6UNACGL6sK5rSzUjiJILU57OIAJnYnLwDqqriYkrNJdVOTGovad/X5rgw2DwajDUeYZbED/Xm+eavtS7/iqHL8XriYAQRD3Rnca67QRBwJ9g8iHJZPwRH93QuTuWHCJXbmKq3SG1xiW36I6CGnM9D6l1l8tqecbhQ2dFoiqmUlvbqwdkKbSKGH26WFtebmE7tV/v+isdt5SrDwJCEPXBddpr1ewgtHcY3DrW1XX8rllM3AWQyuUw+TCdmhoeuNxarlKCUHXYQzpU7Bra2Xc4ESuX3Jzbp44QS91H4dvNtb2rdxkRrgHzSyB6WAX2aw1vvU7dLSlx2dWc10z7RChukCLdvYeCoWJtTYUMUXcoeCK0uNq8Ggr2OtffYPikeSGIWTIHdDZ3As5OJmLf8zpETVwhremoKoKYzotyNhIB8CIbtMQt5LUdKzevrkhn3ykg/rGljK+LePlyhXAd6qLc8wA9wmze19byys2FC4653RKbnwJTROUKYiDZFt66yvMFyO7oGmK6xtLPIr3Wu36v80/kgoeAEETwdFJRj/Rul6Vxfdwq7+jq/R4D/3uHjL58VQQw976IAAAF9ElEQVShCbXh13avDlDDpnkPgA9ObXQ0oTY0YSyUuOewHvlgd3cCpG5Sl+yoOjr6PsRGLmyI9ev8nHQi9l2vdy0ikej+COXCebzHMpYis161N7bLEX+9669oQkuhQCAgBBEINVTfCQe/fBXm+7T0cMz6lattrLNzxd+bFLrX5mOvXUyrIYg6BesbB3h5KhFXUVunHl1bvoL1dfYdS8TrnUJ1qCRBMLAeRMutuwgQL0sNxTeV06pDKPJRhnFEOrFhsFBecykw6yfmUwkBAM9a78jUu/5yOMjvwUVACCK4uvHVM5eLW1sZOMGe6tNaeT6g20+Lzh6A32UM4zBdWs9qCEK1qwv3zcCjhplZ7pQwJ99ffbhvwDGPgi7cd7mESKqt/Je9upR2YAErdQg9p2Xr562mHYexlM3elgv33RntJcqZflxDlzuQ6lYvRKQP+V18+bHe9fuayCIcKASEIAKljso7U+Zmr1Pu43wOZeMnAL/P0rrrF2q1BOGSMEhFGF27zzyO2cNEqDazCP+QGf0lCYPcv9jVQeqPwPhyMbr8sEnhk0eG1v/Gag7KJQx6UWW5o6tsuyltwiCXsbwG5jNh4iZ7nopFi054Vyg8cQ4TVMKksJfdR2dnf7dJ5h22vBavMNMZE9t3ieluSi/qjh5oMNYBdJClDW1o9HrXX/nMlpKNREAIopHo17BtB4IYt5hHVGtTqTzz6TU/qb9kRVeMj+5ymlN4hmoJQnWkRilHR4mwKjkUu9kNyjJtPc9AgkBvAvxuEBaDsZutPtd2ytQ/CsIjYFJEpHKwfhSgBZqUo6NM/M/pofhlDmNxILqc9DaAfwMY+bDurDLhqXOTPUrrctRtveuv4WyXqqYLASGI6UK6zu04eDGtNYHPEPBRj81nQbhg/C9zv+kWu6cWBFEgiTnbSS2IvZp80+W6rL6eVxdyM5QTnjS1kEojqtry86iF+wvpofg6t4PnKupXfVGhMM5MJWIqu5xjIMGcjjPGpQCr29N+H5Wi9Lq2MJ+uS1GqKqt3/X47LPKNR0AIovE6qEkPtKElgKMoy8MUNi5l5uPcF2F+kgx8MbkxrqKRukY7rRVBqIFPmnRILdoqjejeHsDIEOgnY2Hju9bcDx7KWdu60JZvWlucgTQTnzwyFH/CZ/1ex6KArqSNU9/egZwDYK6XfgFQCZ/O1Jnu7OXz+qhb/R77K2IBQUAIIiCKqLYbTgSRT0tJ7V39CwnmWhDUxamCCeVlAA8wcPX8efyg1/DQtSSIwrjVDd3W1jc+AcJKJu4GsK/FDPMyAY8x80B2InPbpk23vloNXqqtlp1eX5qPY6RMMXvmyTMLxhYCDZoGXzl/T057xcTaH1V/uO21RSEYJzOZnQDtYzmIVuag/2LQHSbo6k2JgafLEbJurMV4lbQxCvDzxMZwFuZVme27jniJ5mofww591L7+avQnZacPASGI6cO6ri2VIYi6ti2VCwKCwOxEQAhiluhVCGKWKFKGIQgECAEhiAApo5quCEFUg56UFQQEAR0CQhCzZF4IQcwSRcowBIEAISAEESBlVNMVIYhq0JOygoAgIDuIWTwHhCBmsXJlaIJAgxCQHUSDgK91s0IQtUZU6hMEBAEhiFkyB4QgZokiZRiCQIAQEIIIkDKq6YoQRDXoSVlBQBCQM4hZPAeEIGaxcmVogkCDEJAdRIOAl2YFAUFAEAg6AkIQQdeQ9E8QEAQEgQYhIATRIOClWUFAEBAEgo6AEETQNST9EwQEAUGgQQgIQTQIeGlWEBAEBIGgIyAEEXQNSf8EAUFAEGgQAkIQDQJemhUEBAFBIOgICEEEXUPSP0FAEBAEGoSAEESDgJdmBQFBQBAIOgJCEEHXkPRPEBAEBIEGISAE0SDgpVlBQBAQBIKOgBBE0DUk/RMEBAFBoEEICEE0CHhpVhAQBASBoCMgBBF0DUn/BAFBQBBoEAJCEA0CXpoVBAQBQSDoCAhBBF1D0j9BQBAQBBqEgBBEg4CXZgUBQUAQCDoCQhBB15D0TxAQBASBBiHwPxbskUN/+NYeAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-27"><g><path d="M 371 251 L 371 294.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 299.88 L 367.5 292.88 L 371 294.63 L 374.5 292.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-13"><g><rect x="321" y="151" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">OS checks all wake sources</div></div></div></foreignObject><image x="322" y="187" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcHEXZ//fpmckmoCT8UG4UPF/xeNVA9o6rQCAiJCTMbsKNqCDkB4IoL4KirwgvIh78QARBLiXZHcMhKHKIS3ZnZzYQRUREROUykCAvAYQ9ZrqfH9Uzvenpqe6unp3ZY7b689l/dqqqq771dH2rnnoOgn40AhoBjYBGQCMgQYA0KhoBjYBGQCOgEZAhoAlCy4VGQCOgEdAISBHQBKEFQyOgEdAIaAQ0QWgZ0AhoBDQCGgF1BPQJQh0rXVIjoBHQCMwoBDRBzKjp1oPVCGgENALqCGiCUMdKl9QIaAQ0AjMKAU0QM2q69WA1AhoBjYA6AjOWIBobV+5kxPOfAtHBAOYD2BVAvAhdHsBGAv7CTGs5QXdle9c8BYDVoVUruWDBYTvEGxKLmXEogEZPP0wAzwP8FBj3GojdPjy83R82bLgqp9b61C3V0ZF800iObgPhE04vGTg0299z+9Tt9eT3rKm988vEuGisJ4TrM309x01+z+qjB83tndeBcWwYvu3ty3bJc7wPwDvrWX5nFEEkk8nYP5/HQRbTuQTsCyCmLtb8GDO+nhve/uZqLNCN7cm9DabvAtg/Wj+whYBLzFzi0sHBn72i3v+pVVITRGXzoQmiMtxUa2mCKEVqxhCEWJCJ6RoCmlSFRV6OHwPhpExf6v5K2mluTs6BYXwDxKe7TiyVNLXRYj5uMJ26txYnm0o6FKWOJogoaG0tqwmiMtxUa2mCmHkEQc1tXUcDfBmAN/sKCuFFML1c+J23BbBTgFDlCfjGyNC8i6KcJuxFMW/8EOCjVQU2pNwQMx+fTad6phtJaIKoTAI0QVSGm2otTRAziyCoqS15OoGEzta5X3AQyDPh7phl/E8iMfuB3t7rht3QzJ//ucScOS/vbYqdPqMTwByPkJkEumpkaO5piiRBze3JM8F0oUelZDLwgEG4HGa+b9as7Ta5+kLzO1bukMibHybCUT79eIlhLM72rxlU/QimQjlNEJXNgiaIynBTraUJYgYRREt753Jm3ChZ3G81DT5j/brUP1QER1wkxxoSl4BxlHdxB/HZmb7Ud8J28K2ty99vUezu4iW089rNIByT6esR/w+9AG9uTv4fxOhKAIeX9JtxX0OCl/T2pv6tMp6pUEYTRGWzoAmiMtxUa2mCmCEE0dyeXAAmYRGzo2vIQwT812678OWpVEpYCEV5qKk9eSwx/dBDOENEOHqgr2dtUGMtbZ1fZ+A8V5lXDDY+lU6vEZYQyo99hxGj6wEkXZVGAV6S6U/9WrmhSS6oCaKyCdAEURluqrU0QcwAgijo+unnAA50kwMTn5ztS4nFNXS37idQPqeSR6x4fvFg783Pyup9aNHR277p9ZFbGDhg7HeiyzJ93adW0hfZaYSBa7P9PSdU0p7qx1PNcpogKkNTE0RluKnW0gQxAwiipb1rBTP/1K0OItAVs+KbTu3t7RU+DuN5qLm987tgfMHdiLi0Hujv+bqsYanNNNNR2XT3zyrpSEdHR3wkv9P1AB/hqn+/wcOHpNO/eLWSNie6jiaIyhDXBFEZbqq1NEHUOUEUVDBYC9Bi11D/nDeMAx9Yt+YZVUEJKrfvwhV7xC3rLgDvc5V7xMrF9h8cXL3JW9eHIJZn0903V9qf5tauE0G2ZdZGgPIg6wlzJH/E+vW3vFhpmyr17PuYRHwpEXUx8CGXtdcQgCcButsi66rBvtSfg04zYQTR2HjkdkZ8NAnCpwH64JgFmm1txuuJ8BPO45eZTEq8t6LHGYv9DqL3grFDsaFIYwl7uTB4aGh4+eMgrGTidgBvLxpNFB0hMcDAjbmheb8OM3iohCD8rOeIcTVbfKofhqLf8dkvNcZgnODpN2DPA54kxl15ouvX93f/dSJPrx0dx80eyb/WAdBSgNsAepvHSvFVgJ8GjHsZtHqPXcwHVdTKmiDqnCBaWpLNbJDQxW/nDDVodx/2cfv9XvahAiYRHTXQ173GW0d4bVPCXEfAe8Z+G4eKqdI+j6eeWLApkfsmAZ+VXPp7mzYB7uN47Phs75onZe/1IwixSM6aveU0EL4WaJZcaPQFgD6d6e/+ZZTFSYwlFs+dz4TPq/iiMJBl4hMG+1KPRsFQOGY++xwJC7iLAeymUPefAJ0UNJ6oBCEW+VlzXr4M4M+53x9CDtTUmuwkoksU+y10thVhpIBJSZHCxiF/LohPUpAPV11+jAycMrAu9dsgWdEEUe8EIbkMJosPGhhIZaIKY1D5poVd7yOLfwNgl63l6KaG+KZjvWqsgkpox5s8F8ubQXxIpi+1vpr9qkVbRdIVdzoiHEmUZzMDR2b7e4QzX8kjIwgCH8OgTwG2WbHqk2fwWdn+1PdUSKKlpWtfNrAG4HeovqBYTpwqvpTp7xFGCqF3WEWLM6FCPCjie0wCzh8Zmvct2WkiCkH4kQNANzbErZNlVm82qW3El0B0fkQPfzHMoWrc8/nh1bxwxUdg8c8rmDunyVBZ0QRRxwRRPHamUFhkig8/PBqP77ehd/W/In6ogcWjvkt2LyJ2wMx0em54bk+YaqGafY/Slo81mGjCBEGYCafBNELgvbjgpe51RtwMwzgos27N793vlREEgOdKCRd5Bv5EoAdFXQbvQ8D7Jbt+JV+QgLHkAX4CRINiLAC/FYSFLpWT03VTxax5/v7JuQ3DlCoxSnBaKKhm1gH0AoO3I6ANwO6eOTEZ/OVsf0qEYil5VAnCVmvN2XIOA+eWLvT+5CBe5GOEYYLxDBOyBCqEdyH+IBgflpwmleYiigza/WpZ9nY24r8CsLenrrhTfEr4Ejl9s2WR8FHJ/ImqL1ngAwb7UxtkfdAEUccEIVXlAKmG+OYjqnA5XSZPLa2d32LCV1w/+AqfWDRmDeNWgDokgrkFhDWGRdcPD8/dMFXIorFj2e5GPn4ngA+4+mwy41Yrxl/y+pHYi75J5xUv8Lc6JjLfBgsr3bpuH4JwXpMnxhVmPnGuN95U8f7nUgBLPaIsPb05ZXwWGPs9+VzuG967G7GTfnojPmGQ8SPPjlV4r6/IplO/kC0w4rQ4mt/pUgYL9ZXr4ccsxqlv2xX3uXXh9o79eToEjKsAvNVVYaPB5qJ0eu2f3K0oEoSfU2bP6Gz+3IZ7U8WIAaU9tO9kZiWET85Ht/7CvaaBT8t8hsQcDufpi1QgIZcjqvwkHZUUnPI2prkdr2DCZ0rkELjSGs19zefejZraO1uI8SOP/AIB6l1NEHVMEC0tyQ/DoHsYeIszTGJcMJDuOadS4Qyq19TatYyIS/wfgiKStrau2Mci67YQVY29azYYv2SDb2uIbfuw18u7FmORtSnx3TDBfO7uu+LigAs/2eJU5qcRQBChfiXSHTrhCeS5I5NJ/dM7Fp8FRilMyb4dyZ3jeax2E7vQt1ujuU/JFibZHRiAe2FyVyaT+l+/uZOebiQLmQJBVEQOol9NbZ37vxHEUtznzCr209fwwjUOam5NngWyIwQ4z3Ns0H7Zdd3CUGHcT0tL1wfYsNW5Yz5NDJyf7e8R91SB6r729uRb84zbARKRkosPD8aQWNzff9NL3s5pgqhjgmhsTbYZZF9Qi1hK9sOEs7J9Pd8et5RKGpB8UOKFJ2XS3cLbWfoU7y5E7CT3rjyoewVVDtMvDcaaiTph2OHQE6a4Oxjrp/C1yA3NOzHshCNf/Et3lb4EwXx2Jp0SoVECP/zmtuRBAAmydRazLWTRooGB7ge8YEoWGCVVkdNOc3PyXYiRWKCEpYx4/AwSqLm961IwrxqTP+BxxI0D/S7r3X2VGD78wzR4P/fuPYQg/Jw5A08OTh+a2jtPJ8ZWtZZiKHGprIzDjLts0S5Y7ImTgPOoENdY4abWriOJbLN35/mHFc8vlPktaYKoY4Joaus8hICSo38tcww0tiXnG6B7AGwfhZAKEV3pFBDEyWZeRPIaAqHHsPjCdDr1l4h1lYs3tSYPJSJhhuuERA/U3ZYtyh5fFAYe51xsoWMG7EMQZQuiX4eL6q91APZyLcbSfBLek1DQCcDvfeWnKb6jIb5t0n26a25O7oY49YLxrijy4JRdsDC5V8yyicgZk8lMh2TT3ULNZz9BBCG7P2DCr3INfISfWimEoFTVs9Tc1nkdgP1E7hIGPQQYN1QrPlhLS9ciNngZgH3EfQ0xromiFZB8p76yrAlCE4TyIhlWsFKCcNot2qcvBvi0YrIgb0DBoC7YdwFmglc90Jt6PqyvUX9vauv8AQHC07v48J0wsVzV76Ctbfk7TBhCXWGC6EEC0pzntY6axYcgVBckSM1kmcp8S6TlKjhVek+nDDwTh9nR37/272OLd7mKZjNZtN/AQPcjKvgLVdhwfscbCWgBsAHMf+SYscatqvEjiPGSg00+5SrTmlolqWBSjTKaICpHsa7yQUyXE4RsumyrKGv4o7C4E8QHg+1dpEpCo41k8eHVNOOVhQZhwjnZvp4LKhe10po+C7fyO1QXfkFUecR6Cdij2IOyXbnKmCQnltE3LGcOdpvwlqlogKp7t8sIghk/JUCY1LrijnFfDIklMj2733glJxhRVPi0/B6MH1n5+B0yR1AV/CazjCaIytGvL4Jo7VpMxCJA39jCypJdZeVwldb0ufM4I9vXI2zyx/XYhGG+9iFmWkaE5SGEUVWfilp4fnvBUF3g/UBUrS+ZI3FhfgdgRDJ7ZuZtiOy0sGMOmOzRs3vVE2LhzqZ7jgm7T4kiKBKCeAJs98kdlFI0WUkwSGpq6/zvolWSX7eeBeEOZqydHd+mf7IMKMIwK/l+GF0g7Omqo1VMYQAWf68rghivykcRs7FiUa2YorbvLi8sdxIjOIwYZwH0H+Vtce/obCxV0TWH9UOyW65o1x30nrBQG2F9VCUI2akyrG3V3z0GENTU2nmDnbfD+bgYVw+ke4TnedUeyUV2QNvRZaIgZ3QTMT6p0GnhP7IBjB+bufyttQ7z4u2PnUL4n/kdLKNhbzDvBuIFBN7HEzZFNgxNEAqTK4rUFUHIdr4ipEC1P1IH26b2zq8Q41surH0taRTnI7RYMXyDSKr+fY9Tmm+oj9BGPQWiHMmjtu2UrzeCUCWsSvFyydyXiSGsvGTP6wAaSlSTilZh7sZsI4o4VoHpqxHCWYi4UlmG8cVs/xoRHSDU27wSLGzfCxOHg+kLPk6TKs1qglBBqd4Iwse72dfmWREjaTHZu7yWOuNpP6yuj8er8iVvUPuaIMLQL/zuPkFMPkHwoAnjmBhb3wbREtcIpJ7sKiN0FmNinFk8tarciVWUjjesP664Vj/wOBSGVc2DIdRiIkyMYxKtCSIMteLvdXWCEGOSHMFfqUUsJsnlp3h92QIt4seQZV3EoHcCLHZ3w1Y83+GXO0Jx3iAP+V2dsCLVutgNGsskniD+Fqd8e1/fzSKsR9UeWbytWpxe5SomHrTi5uFCpmRqVkg82aMO3FY9DdNCAlaCsMgnjIXTrG+okKjvFeXtmFLbbLkAjNNDDDeGAH6WQRsMUB9x/v7h4R0ej8956UMec3RNEIoTUXcEIfNkVfW6VMTMLtbc1nkKABFue+yjkEVzlTjTvWYxHzSYTvVHeZ+sbDHkt9uBqCqL36RdUgNSPwbZ2FV37E0ewwUC/gWLDxgYSD00Xvy99SflkhrYYMXzS10bDtlFszCJXpVN97hlZTzDp9bWFbtYMBeDRHRfmi+JjxXJmS2oM02tyS4iusF1AhDF8yC+jy26ySLKbBOf87TfhXmUE7H2gyidibojCJ98EE/D5P0ymdQT4/kqnLotLYftaBmJuwn4T1d70pwTsqiv1fLullySV4UgWlsPfbNFs4U12Mec8VVi5trU1vkTAhbJnKcm6gQhCb9iMvMyv1hK45EPyZ1UZDNX+1KdcC2Ynxf+IyKwX0N88w1OLDGFUBvwkc+qfgNunArxscwbPHHGqmLYUIxh9qs38owI3xDniWTarQmicqmuO4Io7O7LwjCIGzOlMBEKUEpNAf1yTsgEnIE/GFZu0cDALZsV3udbRBIsMPKC5Nd4S2vnj0uDo5V7Dgf1va3tiO1N5O50x8Bxm4VOFEHM71j5lln5/G8AEsmNCk8FuThEuA7L4F+9YQIqooc+RMBfifnydDr1tNOs97QiotNGjUkkmdMStaUKQYj+FD3hRW6SOWMkH/ANFMKTGyIK8kLAElF5YzFYB7sdAYPmu3Hh8ncbVkzkWhjLe1GNjZDETNkkwnEDfT3u0BmBn5FkI6VVTIoLT30ShB3KAqs9l3WR4u/4LpztncuZcaMnzHFgxjpJ0DugAusSd59ku0QGLs329wiv7HE/kvDkkbyCJaq+ksVyoghC0IE3PhKAqLtp2aagbPH3cTRblenvuVxlQqS7ZU9sL1WCEHr7xJwtVxJwvOvdo8x8TDad6vb2RxImJNJJS1Xlp4KDu4wkjlJU0pXNvyYIxYmoS4KwTxF2chFLBO5zOxAFJmMJwcwvEFpo9FFJsDfxqiEC/mu3XfhylVSI7r75JIKp6mW8bLFTPYXJo6eWhuqYQIJAY2vyAIPswH5ju2mArhodmrsqLPCgvyzxzQ3xbY90671lhgPCsk05WF8h1MVql669jJRVCcLud3mQQfHvR6x4frHXSEJ6yU7UPfL63KOVMJLFoaqCk6r3ni3qHVJr64p2i6w73A6OQTkh9B1E6SpYtwQhhuljCip+ujVvGKeq5qguptu8kIATy60o1BaaprbkGQQSUWXdpoLCdvx2g/m/VAPv+eVDoAgfs+LmAdJw38RnZ/pS3wmwc3fSVV7rWpBHmWmlOwf3RBJEIThi+YkyKHObg1ExM5zYce/vwm3IYl4ymE6JQI0lj0/K23tGZnMyyImxuJgLEhtLiCOsoGYlNn/encskCkGIjjW1dp5EZBtTjMkdga6YFd90qjdHiuTUqHrqLgv5LYtVpSp3nhNEWXQE1dN3QCZEX38lTRAziCBs9UJb58nFnMCu3aMNgtAl3wTDuLLBmP07rwWE2KXPnv3ie0yKfa54TPdmSkNY0nc31IXLc9sS43DJh1KMd2NcbRJ+67bIsE8Ls17aETGjBWR9BkyfkFiMPEpW/pMDAzc/VclH6FfH57LTZJ9ELb4JZBQTBkWJvBtVpeFzohQ8d4fBOFNC0NSyMPlxtnB5ued64KaAmts7v1tMmuSClh+0KH7CYN/qP7rJtWDfjwMAusaTJ2Q8CYPG3uuTqEpKcCJhkDErcQcVMgM6j29SJVHAd/NE+H6mr+eM8TrM+ajtxOn7G2zypbLgkWITlbD4bAYLL3ZpAEw/WdMEMbMIQoxW7GgPISIRjngsLHfZomingqRipi3bX2HnAJvripyBIoYxUF3rNxpsLEmn19hpOav9BKXpdKUDFTtNvxSUUvKayBOEg0nAiVIUeZaBfpG2spgKVBCxN76RiD2QiSO/PMiPIjDlqOs9AalNfdWWUU8QYmAyNYtfyHO5Os7ZULnSshI3gLkRIBHa3LsIV3PDEhQf6lWA/wgYxWi5LDLyCWunnTzfwQsMDLsCNsIvRpsmiJlHEPaIxa4iZlrfI7JTVap4hPqstfwYGThlYF1KWGxEDidQDGPwpWIYgyjhvcv6Y3/koGPX93c/Xm1iKDn9VJgs3ta/G7RUlllsMghCjKmlPXkwMwkjA//Ngi+YWx3SwvAu7NxJpBHtDCvr+T0wxHYlBFG4qC8/1fj4B/ndtakO4xE2qLNa2eTES8e3seI+jseOMXLWOW6rPD+zbU0QM5QgisOmprYVCwjWJUI9G5EoHiGic3fb2boj6qWy7MsqHJ2NcwA+QpL4PeRj5MeY6bw9duW11eiLypcvVAmxeO58Johcy2HE5ptT2nnXZBGEs1lIMF/MzMsVxiKqvAribyKPy1TzYYhKUcNDCMJn4hMG+1KP+s1JhQQBn/ziLzGMxbLEPi0tXfuywT+JkPlwiIEfcy7xVW8ecRX5CitTVF9+iwChMg6TP9Hc35jxFecbKXMqZdzXkOAlvb2pf5dshto7rwNDxDorPD5Z9aTOpBEcPcPGO1V+r+tL6iCQhb413pBYzGyHcBaZqoRKybmnEPcTG4tJW35Zyzj4IqbTcP71NgM4gMFtAL1dot7aBPDfQLjVZOO29f3df63k9FINobNxm5U4ksFdnqiZQ2B+TFy652Oxq8MMACaTIBwcRKrMWMLsssdih0IZU00I44HnAX4YMK5tiFt3eheSKFgWTU4PIuDoogrEUV+aYDxDoF7L4Kv32JmzYYRfKUGI/vp4JN/VEOfDfcZHC9q63h1nPpYJB9ohsxk7FMduY0TAoyDcMNLAt1cjknAYrraRhml+BkQHv3GH+B5XMMEhAE++4VB3N4NW77GL+aAbS0naWWk4dH2CmNkniDD5079rBDQCGgGNwNgBSkOhEdAIaAQ0AhoBCQIzVsWkpUEjoBHQCGgEghHQBKElRCOgEdAIaASkCGiC0IKhEdAIaAQ0ApogtAxoBDQCGgGNgDoC+gShjpUuqRHQCGgEZhQCmiBm1HTrwWoENAIaAXUENEGoY6VLagQ0AhqBGYWAJogZNd16sBoBjYBGQB0BTRDqWOmSGgGNgEZgRiGgCWJGTbcerEZAI6ARUEdAE4Q6VrqkRkAjoBGYUQhogphR060HqxHQCGgE1BHQBKGOlS6pEdAIaARmFAKaIGbUdOvBagQ0AhoBdQQ0QahjpUtqBDQCGoEZhYAmiBk13XqwGgGNgEZAHQFNEOpY6ZIaAY2ARmBGIaAJYkZNtx6sRkAjoBFQR0AThDpWuqRGQCOgEZhRCGiCqIPpbmrrPISAX4wNhXFfQ4KX9Pam/l0Hw9ND0AhoBCYJAU0QkwR8NV+rCaKaaOq2NAIaAQcBTRB1IAuaIOpgEvUQNAJTEAFNEFNwUqJ2SRNEVMR0eY2ARkAFAU0QKihN8TKaIKb4BOnuaQSmKQKaIKbpxLm7rQmiDiZRD0EjMAUR0AQxBSclapc0QURFTJfXCGgEVBDQBKGC0hQvowliik+Q7p5GYJoioAlimk6cVjHVwcTpIWgEpjgCdUEQTa1dRxLxT7dizQ+PxuP7behd/a8w/JvaOn9AwKlj5QhPIM8dmUzqn1HrMuGsbF/Pt/3qzZ//ucTs2S/PZ0KSwR8DYU8wdnCVHwL4WWIjDcbqkZG5v92w4aqcQj8iO8qJvjTM2XIOA+cCiLnecS9M7spkUv8b8l5a0Nb1bgP8aQIOBbAngDl2HcKLYP4Lgbrzo7mfrV9/y4thY6jm742NK3eKJcwuBneB6L0ujPMANgIYBCjVELfuHI8z4b4LV+wRZ3MFmLoAvAfAm4vjGALwJEB3w7Cu2X0nPJpKpUyVMTa3d14HxrEuebw+09dznEpdUUa1fkdH8k0jOboNhE8U2/5bnPLtfX03P9fYntzbYFwE0H72nBbmcz2DrsgNzft1kEwuWHDYDrFEfCkRdTHwIQA7eTGxyLpqsC/1ZwCsOi6nnN3vvLEYsI4H6KOu9sXcPkVMfZbBV++xM2dVMff2YaLkJ+rYJ6N8fRDEwq73kcW/AbBLEcTXLOaDBtOp/iBQJR+JKK5Ut63tiO1N5O4EqLH4jlfI4oMGBlIZ7zubm5NzYOAEEH0NwFsjTPQLAE7bfRfuCRL2ClRM1NyePBNMF1ZCDmIBIaZrCGhSGEueGFeY+cS5g4M/e0WhfMVF9u1I7pwwje8z83IAcYWGXgXxN5HHZZlMSizqSk9hAaXvAtjfg59f/UfI4NMG1qV+G7Yoqi7wfi9SrS8jCJj8McRoKYCLx8je+yKmkzLp7ivLF9Ujt6NE7psEfNa37tZKJsB9HI8dn+1d86QK6PY3FMcqMH3VRcQBVfkxMnCKCuZOIxMlPyrjnSpl6oIgZAt92G5eTEBTObHY88KEM7J9Pd8LmqTGtuR8A3QPgO0L5XgwhsTi/v6bXnLXa+xYtruRj/3cRSRR594k0FUjQ3NP89u5RSQIH3Lg3nwcKx/oTT3v18FkMhn753N0CgP/o7AIeJqhv8OgwzPr1vw+KgAq5Zubk+9CjG4DsLdK+dIy4WMvlqfmtq6jAb5MbZEqeUuegG+MDM27KGgHrrrA14IgCLg0ZG7/YRq83/p1qX+439/Skmxmg34OYNeI2G9m4Mhsf8+9QfWaOlbsSXlzTQXfkAnC90Zfn/eVsJP4BMlPRHgmv3hdEISAsaWt8+sMnOeCNNUQ33xEb2+vOHpKn6bWrmVEvFbyY3jd9s4vk30ML9IDcGm2v+c0d1vz90/ObRimFAMHeN4h1A1iIV4PkDglCIIRJ4sW15HZXWWUmVZm0903ywYSgSCoqT15LDH9sHSBV1og/U4dokuvAvxHwHjE7h/xB8H4sIRENoP4kExfan01RT8A55J+MXg7AtoA7O59PwPX5obmnRiwkASNP8/Anwj0IAAzYPyhZD+JBDEKYMRFfGLxvo9ArwFWE0D/AaIrMn3dQh07phpqbk8uANPtAHYsk3GCIJI0mEYIvBcXTpyOGs4pvhmGcZDfxqGlZdnb2Yj/SkL8JhjPMCFLoFeC5hagq0aH5q7ym9sJkp9qivyEtVU3BNHYmmwziH79xvF428IiFX6XUHb/MAZ78B1GR8dxs0fyr6UA+lSxyigDB3t3Qk0eEimWvdU0+AzvLsx5dWtr8r0m0RUEfLxUCvhOmFguU4WoEkRLe+dyZtzoIYdBK24ePth787NBUievixeY6fTc8Nwe78dXONVhVVGtVribKDyPWPH84rD3RfkCmluTQn0n1B7OXcoQEU4beX3edZJFgZoXrvgwLOsGAB9wvcdXRSjKNLUmDyWiNR7SywP0HSsXv9CrPgtQiZgM/nK2PyVUVGXPJBKE0xdBYj9g0zrXLWvivgWGmXOfMAun4/idHhxNZtxqxfhLXhm3ZcKk88D4QokKkPk2WFjplW2fhdu3ffHVN7av/KDB+WsA2scFbjDmEyA/UeR5KpXo9etGAAAR/ElEQVStG4KY37HyLbPy+d8AJC7GxCNdtB3wJXcI7nkJvIdoa1v+jjxivQTsMbbo5WL7Dw6u3uQ0Ii66jIQpjs5jixARdY+8PvdohePuHMToegBJpz0GnonD7OjvX/t3rwCpEMR4yMG+jLWsuwC8z/XuR2Hykkwm9USQQDe1de5PwM/cO0wGzs/294j7mMiXlN53SchaFFmV6e+5PLBfHSv2RN66iwqXy/bDklOg+H9Ly2E7WkbibgL+09XmS0R89EBf6pdB72leuOIjsPjnAL/DVW6jweaidHrtn7x1J5sgFE5SY12WnNpNMJ+7+664OODOTHYSGwV4SaY/JTZ4Y49kg6XSPooX2T8E+GhXc0/D5P288joR8jOVFvyofakbghC7h6a2TnFxerwDAjEuGEj3nCMDpaUl+WEYdA8DbymuDutAWDhW1ucyTvxe3E0KdY+9YxUfVba/5wT3gldcGMXiMavY5mayaL+Bge6CGibkKTsRAVvIokUDA90PeKuGEYRskRZ3JionB/Gu5rbOUwAIvbvzRFIVtbR3HsWM61w7fKkuOwwT2e+CiClhrnMt9C9Z4AMG+1MbwtorH5f8HqmlvWsFs20l55xQTCIcN9DX47Kc839ba+uKdousOwBsN1aK8P1MX88ZXpKcZIIIPEW5RyjbAKmSi9w4hG5qiG861lEJy9oPUxW5+yesqYxZiTvchhTiDmigv+fr3nHUWn7C5HAq/15PBAGJuesdDfFtk7291w2XLaqlprEvAfQFgMXlq20JxYyfZtM9x0h2udTc3nUpmFcV2zSJ6KiBvm6hfhh7BAFZBh1P4H0AejsR9c+KbToq6E7EXb+9fdkueY73AXin838GDs329wh9b8kTRBAyHTEDfzCs/JKBgZufChNOccyfNYxfvWGyKe5H7CfqCUDWBgIIOKxPITgpWaGJNlpauvZlg29h4DUAvzOAh9jkS92qDvkO01/d59N3r8z4qkAnlyDkBCkbk3eT9Ia1nTIx29h7SJeBxzkXW+icwiWkLD0BBMlKeRvlqmPJd1ZV+Ykiy1OxbF0RhFf1E6CW8Zw2+GETRjIGoZd2zFbl9xBeVVaQ6mc8E14Nghgxsbf3AjEKOYj+l1trRVsIHAxaWju/xYSvuDAJNQRQwa+19dA3WzRbkObHtpZXunRXaR4SdSKY6ahsuluozZQfyYlQqgKdTIII2BSVjbP8/i4aaQpcTRjihG2C6EEC0pzntUX/mzJtgOyUHga+ZO7KFv9ay09YH6f673VFEJLdnsnMy7Lp1NZsawC89w9C+PbYhT/7zEa6jghHFSdNuiMq/9DZ95QynskfL0HA4LOZ6Ua3jh3A32IwF8nuMfz66j2VeXd6qmMstxhTd2YMe4dEFy6qbAFhjWHR9cPDczeE3fsEjH8xEQsCctRLz7FB+2XXdQtHL+VHckcmzKnLHCsnlSBCHD2dwX5o0dHbvun1EXHyGrPOY8I52b6eC5QBCSgoux9UMT33Nild/CUn11rKTzXwmMw26oogBJDeiy3ZxWPZrrgoNE3tnacTY6t1iUSYyi7OqqQqEX0veFq/+B4LsUNBfKRtWujyclZWMQFPAzAk5pyRdOeiT5Kd/0YG7iJQqId3qWDz2wEc6Pyvmievog27cJR8m8/HNATCQ7Doek7QXdneNUK1pnRB3tzadSKIf7S13cqIraOjIz6S3/Emt+EBCGVe0pNKEIonI+nmhWm5nxl21AVOdmp74yrsLoBCVaLudzE4QQWZG/PPkN1L1lJ+oo59qpWvO4Io2+FL8jN7Pvqxk4K3rvdYKzmhRNK7Fief5nes3GGWmX8HW9jLIDtcwEc8YQmkchKBIHzlTJwAEDcOVPZg9YZ+qJ4EV4Kd79uLJxRxEe61s5fV2QzCzczGdXvsYj4Y6KXuNVUeR75vlcVfpUzQFKjWlzqX+txxed9XNG9dB2Cv4m8mMx2STXcLk9dxPxK15rjbHGtAQsr2xrLgE1V1+alexyenpbojCMlR3qsSKLt/cOI2lVvElF7ale1sFBcLcTJIzNlyEDHOBGHf6F7IBeGokCDEBb0IOzEWekLV2kS8s2zBqZ6cVpUgRLcihgBxRrKFgEvMXOJSWSiQ8hMj7mtI8JJKYjiVYSnbvExeLCZf+SojiLIoApXdS/mJ0mQQRK3kp3qfy+S0VHcEYe8GPAH43JeKkkvmMRNVrxqAgH/B4gMGBlIPFXcZJUEBg8xondNCy8Lkx9nC5UV1keos216iIDvQ2ZiTWXSCoL+LwIAk7MELzknOM8TMK7x3M7LOTSeCcDBvbE++j5jOJOBwxROFqPoIG9TpvVuoKUEAZRf1qicAP0FSrT+uE0SdEkQt5Ef1g5+q5eqTIAper1I/haJp490A5hW25aXBx7z3EC5y8VpWhJnDUXNb58mBgc8KUuFEGN0AYhHo7/6G2LYPm+Yr21ds5mo3uzX2kczRrWDNlFs0MHDL5iDhVF1wpqKAi9hRz2wy3kMmL2VCFwHvDwniV+blXVOCmGp3EIoqJskdQa1VTFU/barIazXkR+U9U7lMXRJEuQBvVRX53T84k+S9hyDG1QPpns+Wq66CLyub2lY0Eiyhky0G87PfIKJY/p5gXAsL62bNsp70U1WMy4oJeJqJDsv2df/OGZfEWU3Y4kudtdwC29La+WMmfMb5HwH3/HubhsMevvtG4TswrR77Dska/ijYEieqZZL4QWI8JV7Y1bukLgvPAke23CCOl5BV64/nBFHrS+oyJ1agqgRUqdBWIj+Vvmuq1KtLgvC7TJ4Tf+EPpZYk5Yu83z1EjnPvL431RJd5A5c5k1pQVe10PcBHuCb6VYBOyPR3i6iXoRY04yIImW67EHJ8NYiWuPr0isHGp9LpNcIhT/qUWXYpxLiaKsId1A+xO3x2I44D0f9zq/G8BOj1iPeqHVXHKvH4lkYNluQnUc4HITM/lVlKiT6PhyBk5qOVmLk2tXX+hIBFAD/FoIcA44Zs/5pBH6wCc62ozkO1yqnKT7XeN1nt1CVBCDDLwigwnTSaMNa64zXJnG8k5oj2JbdhsUiA4kSLlfpXOJModa6KGH+oTBUW5ZLa5/K8EBfIEvFuXJE3uXd0NpZuuDf1skwIJSFDpHFzwgTYng/CN8D8vHCMAuMBmPyTKHkY5P1b0UhsfpKImhnYuzinImeAyiPUhhcScJar8FjiHPG/iXaUKzPT9vfoLxufbGGtBUGIF3tPlkA0fyCpr0PRzLZK3uso4iHibW1PwF8s4M+Gxdc6d4piHPZJv4byoyKEU7lM3RKExBIiZTCusAjC6akQ8dXHh6H8HgLHEXCMK/tWYCwhVa/ZIMGQxD9St2Lyt66i5tbkWSA7UdDWh/nsTDolQpeXnWzGE3TQeYEsLk7UBcUPq/HeEZSFKQFKCEK6WClarzl9FpuO0dyOV7hVdX7RhiUB6u43ePiQdPoXr4YtJBIyF2pE6QlkPCcImyDK41NFijVWzCEhNitOfKoSa0OJ/Ieedr34SNSqZd7rtZafsDmb6r/XLUGU7VAITzBTN4Gd4H2+F1+SBf7+YkpJJ2NdYJgIyYcaSYfq57ijbMUUsIDJF2sExeSn5vbO73qtoIhw9EBfjyyXRpnMN7UlzyCQSMU6FuwOzCdm0qlrxvuBSOYq0kKisiDXJlifXEVZHk8MSguvnUJ2m5dvZGaR/nTrUyOCWLAwuVfMIuGc6PhC2EErQ3Jq2P2SEiZKQ3W0ti5/v0UxYUyy1ckNuGdkNif9TrvuYfuEIv+dOZpb5E6BOxHyM14Zn8z6dUsQheNjSb5pcakq/orqFf9LZulRvWTHLU+76BSRXLIpfzyCHDhGP/eEli4ceHy8VcOiuXoFrLE1eYBBdvY1d56GuxrifLjs0lz2sQJQygbWvHBFCyzrFk+4byULKpUPQxoIEHiUrPwnw4IRikxl3pDfMtNln3DfrzLTcWHewz7hvn0JWe4DEJzwpqgP/xKIzi9LgVojgrBPEeVJukSypLMzfanvBNyzUVNrspOIrnXJnywhlmxjIqzzbmyIWycH+aGIXBxk0KUlJzb7ohursukel1c8MBHyoyLHU7VMfROEx9y1ZI2XhOh2qwTKwiJsrRwai8dH6EwGruRc4myZQ5ad7L0hfgqYzvSz3fdLoxqVIIqOe1e6Q6MLCyvZB+QMW3IKED8NMXAR5xLfi5AwRyTzUT59qHw4Ugst8GMW49S37Yr7JJ7SVPBPMX6smqfBL2GQX75te5GKGcdzIUKw27s7cBH1yZPuKztiJ2+YdDERRC5p54RW8xOETRDyPBl2X63R3NfcO/XCySH5puE8fZGAc1USBvmcAgT3PMiIrcr2rxGZCUvUon7OksL4wO/0MRHyoyLHU7FMXRNEc3NyN8SpF4x3lYEfEkOpzHrHaUBR/ywXOruRITA/BjKECarIdLUdMZpAdvIh9wf+Kgib3H33sxSJShCiEz5qLN+QyoJUZs15+TKAPycR5NKUm4XkOK0Sj3GxeHxndnzzuaphz1U+Gjt7W7mFVqEq4UVi/I5Bdh5lO/Ul4aNg7OBpO2z3W42Uo6IHgekvRZ+KYR9Wu3KJOF0dslN4Ch8X4gYbY7ZVPGOZ9MB0GYiFWXLBvLqGJwhbjvxTjqqmYQ087QW0b88tGOvstL0CDwsfA2HPcpkRPkHW0sy61B9l8jRB8qMiylOuTF0ThDRAWmEKQh1vJLpJu6LfLt47s7ZOeM7LP+DCglq+swsUBX6MLOMYK8Zt7uCBfuGYKyGIwkLUeRKRnQhorH9BWe9skthmywVgnB59TCKss1oC+Uq+kn07kjvH81gNUEcF9fMimczI0LyLQqK+Uktb1+clpwKVV4oF85Lc0LyvhkWWrRDnISY+OWbFHrHIErr7CSEImyTkWfNCMbHjghm0NCwybqXtF3cIf4dBh/vlvHY6OUHyE4rJVCtQ1wRhC29ZNE57mX/Yib/kNyE+9xBh3tMlzdkf+uwtp4Eg0muqBJF7Acz/DQvXCPPPsstuHx+ESgmikJqRhF/GWJRVO1Ur8zHZdKrbB5uieoZ+4MlFHCDb/BgZOGVgXeq3Kj4glX4kYifIMTqPgFMjxLt6hAw+LUrfhBrDYLpKcKwKUTKQjbHxf9PpNQ+qjs2+V3iOOt9Y6AXObw2uJ06kOCnTl7q/7A6jxicIp1+NjUduF4vnzmfC50O81UWVvJ9qzv97PHI7SuS+ScBnFec28jsmSn5UZWAqlKt7gpBd+qkkH5GfPtQzbrknV9wvxGcljmRwF4je61JviDAbTxFTHxirR0bm/ta9u5SYmErDdVdKEPYpQu7xXRZywius9gK2CXvDMk4ArP0BEqG2HRIcAvhZEP1GJWJqtT+EYk7ixQCLnN6NRUsYJ1jhqwA/DRj3wrCu2X0nPBoUzTWgb7SgrevdceZjmWyCFbmtnfELk9THiXFXnuj69f3df62UGMVYRk1aysyf98jOJoCF6uyK3NC8XztyM1kE4eAUIOsF1Spwez4Wu/qBdWueqWTexf1ewwgdAsYxwu8FwM4ukhaY/A2Mn5i5/K3eOxDV902Q/Kh2Z1LL1T1BTCq6+uUaAY2ARmAaI6AJYhpPnu66RkAjoBGoJQKaIGqJrm5bI6AR0AhMYwQ0QUzjydNd1whoBDQCtURAE0Qt0dVtawQ0AhqBaYyAJohpPHm66xoBjYBGoJYIaIKoJbq6bY2ARkAjMI0R0AQxjSdPd10joBHQCNQSAU0QtURXt60R0AhoBKYxApogpvHk6a5rBDQCGoFaIqAJopbo6rY1AhoBjcA0RkATxDSePN11jYBGQCNQSwQ0QdQSXd22RkAjoBGYxghogpjGk6e7rhHQCGgEaomAJohaoqvb1ghoBDQC0xgBTRDTePJ01zUCGgGNQC0R0ARRS3R12xoBjYBGYBojoAliGk+e7rpGQCOgEaglApogaomublsjoBHQCExjBDRBTOPJ013XCGgENAK1REATRC3R1W1rBDQCGoFpjIAmiGk8ebrrGgGNgEaglghogqglurptjYBGQCMwjRHQBDGNJ093XSOgEdAI1BIBTRC1RFe3rRHQCGgEpjECmiCm8eTprmsENAIagVoioAmilujqtjUCGgGNwDRGQBPENJ483XWNgEZAI1BLBP4/pzMUf7+OmDIAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-29"><g><path d="M 371 551 L 371 594.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 599.88 L 367.5 592.88 L 371 594.63 L 374.5 592.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-44"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 562px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="556" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-34"><g><path d="M 321 501 L 51 501 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-41"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 512px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="506" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-15"><g><path d="M 371 451 L 421 501 L 371 551 L 321 501 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 501px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">ACPI fixed <br />event active</div></div></div></foreignObject><image x="322" y="487" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQuYHFWZ9vvV9GQSkNuPEK4KruuuKKwazMz0TIfRABIEAgk9k3BnUUDIwyoL+gu44iq46v+I5kFRlLuQTLfhogGEAM5menp6InFdREBWQLknyHITJpnpru/n1HTP1FSduk11D1Dz1fPwANPnUuc9p857znclyCMICAKCgCAgCGgQIEFFEBAEBAFBQBDQISAEIetCEBAEBAFBQIuAEIQsDEFAEBAEBAEhCFkDgoAgIAgIAuERkBtEeKykpCAgCAgCMwoBIYgZNd0yWEFAEBAEwiMgBBEeKykpCAgCgsCMQkAIYkZNtwxWEBAEBIHwCAhBhMeq7iXTnd0XM/DVyQ3TTS2pTSf39fWV696ho8H584/ZOdXSvIgZRwE4EMBuAOZUi6n+nwWwEcy3m+XU2qGhVZuivlMms2T3Mqf6Afxd1LrV8hUAzwP8MkAFBm6fneJf9/Xl/xa2vdbO7DwDtA7ATrU6DBxVKuR+GbaNqOXa27NzYOAYEM4GaH8A21XbGB4bD93Xktp63pbKrNOJ8a3x9gnXDfbnTona30wqr5nPx1JUzvT33/zcTMJhOsYqBDEdKGv6aG1dPtdortwD4MOOnzeTSQuLxd4HG/Fq2Wy26ZlNOMg06RICPg6gKWQ/aqO+xyQ+d6g//1DIOqgDQei6GmbgJzza/JWhoRtfDXqX6SaItgU9HySTc5q5tfEA1v1tm5ZjthneerYQRNAMTv5dCCIaXnFKC0HEQS9G3fbO7GEA3QZglrMZAr5WLOQujtG8tmprJrsfMV1FQFuMtstgugym+dXBwbw6Dfs+DSKIap/0OAw6dnD96v/ye4npJIh0+phdTaP5bgL+ye+diPHT4kDus22Z7i8KQQStIiGIaAjVr7QQRP2wDN1SV1dXamt57nUAH1etNArAsJ3mHzRHmw6eikjH4yWovbPnRIAvt4k6dEU3AfT62A+8LYC5PoP6FSp8/OBg/n/9Bt5YgrB6fojM8uHF4s1/8XqP6SSI9o6eM0D8I9e7EF4E0yvW34l3YOCSUn/uMiGI0J/NeEG5QUTHbKo1hCCmilyMep2dS99XRlMfAXtXN+N7x/5NC6vNVojohGJ/7+oY3dSqUnsmex6YvqkRJ70MwrVMdOXec81H8/m8EiONP11dp8weHd3ycZNMVVfdOiaJowhYt3U2Zzfekx/b+DSPliCiydlpXtfynWeZ/AGY5vlv6kSOdL0HUe/WN3Y4cePGKxXRup5pJYhM97VgnGx7ic0gnDTYn7tbsa7z5YQgoq9wIYjomE21hhDEVJGLUa+9s/tsAOo0P0YLjEtNA69MEjWgPsrqdKZ7KTNusCmfVZcVBn7Mo81fDiPDV6/Y1pE9koiutSt6x14e3xvsz52r2/zUz3UgCDvS6j26iegax3iGTebFQwN5pYh+ywiiqyv7rq2jdBsIn7TPbXEgd2GM5SJVHQgIQUzfkhCCmD6srZ4s65YmrAFoUbXrCjMvYcIzDkub2Mrq9gXLPgrT/BWAXW3DHGbis0r9+eu8NnUvSNoz2flgUpY/9vZeNdg4YmBgtbJUcj11JgiLknQ3IgauKRVyp+nGNF03CB1BMNPS0kDvzdO8zBLdnRDE9E2vEMT0YW31lE5n29kgtWlvr/6fgadSqHSlUsZm1+kzhrK6ama5CkSLbUOsEOicYqH3iqjkUGsjnek+gRnqJjEhbmK+DSaW65TWDSAI6C3A+IGRVGrhxr5Vf3VO6VtKEA02p53m5fu26E4IYvqmQQhi+rAeI4iO7kuYcMFEt7y2JbVttq/v2i0ueTQwZWW1zkpKnbJHh3c8w0tWHwYK3SkZwKtk8mHFYn7Q2UYjCEL10dbZ/X0CzrH195IJPmSokN8oBBFmJt+5ZYQgpm/uhCCmD2v9yZfpzMGB3h+P3S56PswGK4V1TYQzJWW1xkpKNR9bZDVxi+hZxsw/s98ivExzG0YQTvNQ4HWT+bChgXxhOglCQ+pBK2oSkQUpqdVcjpTnrmTw5+wNE+iKWalN54RxqNTe+kBXjgzvsCLosGAdCMrGIsA8FaCP2SzblCPlX4ip3zT4p3vvxiWnkUMQEPbfJ/VDNB+Mnau/Pw3CWsPk7w0M5P+o/iYEEQXZeGWFIOLhF6l2W0f2KCJS8uiaeGbSpq3RTyhJ0PgNI2xnVUctRTS72+rkW1KbjwuzoQT1MybiKd9GoFeZaQ0x7tlzT/PPug1iGglCieu03tGNFDE1miCsDbFryV5GOXWnw/HOVzFfm8P29uz70URqLbzHNq8PmqnyoqG+m5/2mmtrLaawAkxfCTCNrjbBj5CBs4vr87+OIr5UjptPP0fdbxo/fB/ALj5rb9z/xmzCfg59nXhSB320U/xdCGKKwEWtpj/Vuzd/jR195JN/W0fP8UTWCX/8YaYTSgO9N0Z977jlp5Eg3hIR03QQhJqD6uFCmT3XQqEoQiyZI6NHbNhwy4u6eZo37/Tmlm1euYGZe2y/+xoVWH11LduHypXVALVGnP8KCJeNvLHjBUE3E9WuujVsKdNKAk4K69GvHAwr4OsMol/YLOqEICJOVNjiQhBhkYpZTneq123abh8JZUkazbM63dH9EyZ8pvbKBPwVJh9SLOZ/F3MYkas3iCCorbNbeYSfWnuhmrK/UFjzuPMlG3qD6OhZRISjVZ8MbibgUwD2sL3DXQDZnfiGOUXfK/Wt/rO1GYf3pKb2TPd3wfj8pPExf3lwIK9iObl9LDq6zySyzKntBgWe5VW76fSS97KRugPAfg4cK2A8xYSSdXMEb09AJ4C93IsiWHylyGvWnFcuB/h0zaLazMB9qh8C78tjPji1WFYgwi+YsQDAjtW6QhCRv8xwFYQgwuEUu5TT9wHAExWDF25Yn3/C3riH/iC0slqvRPa28Ik9sIAGGkEQ8xdk920yLbHJvuPdM+5raebFuiB+jSSIyXPn9oMICgoYgSDgEcZjMwzjMGe4kY6OpR8yqUk5542TVZBj47yDszu0bKE8A4fYxlVhxq1mE5/vXKvK5Lg1s3x/g8tXAaSCPdaeCoO/WCrkv+u1PPR6ERUckk/ba3ess4srx25CL/cwY6XLD2esAyGIBn3IQhANAjbExuFpt5/OuJTAoZXV+tAW0fUY9YKlAQShPUn73bKSQhBqTlo7socYZMXwGhc1wWFmPKbwpZ9j7DZTe7REYp9njbisAuaL9toD3/FTQFcVzD8E+ERbe0+iwgsHB/N/cq4lD6J7CBVerCtfq+/hhyMEUa+PVdOOEEQDwa017fR9ADAC8OLBQl75Q7ie9vbsnkhRHxjvn/iR70QFS4MC5DVgQ46FUD3fp+rb8X9BpDyT7WE/njW4cujAwJo/6F42SQQx5ijoEjVViHBKsT9n6Z3aOrPnEujbNozULWBFaSDnjhFVBUzvWxIsKqrhrULHG7Oa19oDQXqRtuYAFKgXqfXT1pHtIaLrHUEu5QYR6yv1riwE0SBgJ53M3Db7QSIjas/0rATzCvsJMEwYcN1mWIscOg1DdXURlyDU6XRkxNgHBh/NwJkA9nR14iOHV2UTRhDQbcZQPjOp8iLDnLWL03s+jP+LZtP2vAF4rSN3G27R5pgIddebAGQn2gkfVkZv6ScipkZ920IQjULW52TGwMpSIfcvfl23dXYf/Ga+htvtJ6UwymrtZkj4Uqk/p06U0/40OpprkFw9iQShxtTRsSxjkrm25pGv/qbWlQF80KFDCIx2a8Xaciv9PUWgXotIY2Dh8k3R3Y6jWthprPTkBtGgL1sIokHA1prVnMw8Hbrsr9LZedxOFYze6TA1DLp5QOsDES16al0RaTBBhAo5nrQbRHWC1Kb+7wRc5DNhw0Q4sdifW+M3qbq1xoRzVTjyKIuho+Oo7UyarWJ1HTRez+YIapF1R7bTICvUjAonr57n2KCFpfW9D4ftK53OfgQGrWPg3dU6QhBhwYtYTggiImBRinv4PhRHZuNwvxDZ4+TiTkkaqKyOK9KJMr4wZRtEEM8AOH+v3TkXxns3oQThJWoan5aw3tY60+o340o6zXMDp1tn5qsiFduj2bZ19Cwh4nHCYuBRHm1aECX3idKXUHNlPQEfEIIInJZYBYQgYsHnX1l3mg8jJhonCEdgv7G/+yurdadBJYZR6S0fuPuGajKgBg7a0XQdCMLK4UzAn5h4HZOxVpe7wm9ESSUINea2zmWtBFN5WY/n27ZWSYATnR0vHT51WyGO26vLUsrHPNnrHTSm3HKDqNuETW5ICKJBwKpmNb4P9ejNMzCealyvBEyWH0RUEJNMECpUxVPP0RUEfNaOCwPfKhVyXw4T9kIIIuqKmjnlhSAaNNfK6WjWFtwBULreXQTdQpwRYxPoSR0J0iQThE5ZXQUntOmoEESk5TSjCgtBNGi6Nb4P9ezJV1mts4CKaikS9LLtnd3LAb6QmG5jg29radr2ARWy3Fnv7aATSSpBeJi7jk9BWDGTBh/PuFZB6yLodxExBSH09vpdCKIx86HzY1DhkZ8FSP072kO8gy38sarrq6zWO9qhbtFcdcp3r1uKEIQ+wmxtAUQJteFYNDorppFqmVnjZQNSwqpyGqugCjMdWRroVbqNuj5tnd1HEqAC7dWeyPoDjZ4tcht1HVSCGxOCaMDkemQ8C+UJrXsdXfKfAGW1jqDqdirU5K1Qom6tdZYQRGMIQusHQbByXxPjEts6CgwLrrEKAjfIdyad7vk4G6xiRNUC7UVelxqrKyGIBuxj1lpqULszulmN7wPiiHjmdS1/96xy+V6ADrAB66us1om4iKh36xs7nBgmFLPPBOqjigIrBgu5HzjrCUHUnyD0sYy4b2Q2jm56tZxyhrxg4L8Nc/TQYvGWzbp57eo6ZfbW8ut5gI6Y+D1caBd7e1WiuYuAnQj4owk8bJh8jT2KsI6M4PCVCNo8NHlVhCCCQJvi70IQUwTOq5reikgfuTVK12m3T4Q6KU6yMbe355ELYFLMnij918qmM91LmXHDpGBxwMNlw/jUb9avfkoIgm4D4ZM1HOoZzbXapo6gJymktQH9AkRNGou70Epu29pw5isfYeDTpULuHtu6cHltI4Kpq0e0YyGIqXzMIeoIQYQAKUoRnfhFxcIpFXKnhTE59OpLL9bxz1ndvmDZR51xeQAMM/OppYF8Lur7eETT9A0EJzeI+t4gwmz+HmlKfUVNUwkPPun2oM9699vKyOihzoRGGpFp6IOLh9WWEESUTSpCWSGICGCFKao5iflGbg3TpirjcTMJ8qym9kz2PDB90xH9tEyMKyrl5ouGhm58NegdqmkhT35Tbvw9TfrJu1pSfKwuD4NqVwiifgThESZbmz5UZYVD2VTinpq3sZoOv1SjHqJDuqElZZ7lNb+qURVAjwxaaU9SpQwpvCLIVqPyrgLRYtvaU0mCjnfcNiYtzWr6VBXq3JnMSAgi6COe4u9CEFMETldN7/tQPyc1nW4jyLM6IHPXyyBcy0RX6ryTle6juVI+ghhfAugfNWMODAQnBFE3gtBt4L6xlnRJefzCb3jkvlYXzfsZTStKhdUbnLfO1kx2P2JS2f1U1rfxJyiIosdt9DUQfx1lXG4Paz92QFFJjOgqR7a+Wn9CEHXcxxzz2KCWZ2CzmkBkkdOF+sGmt46Cr7K6dsJDilS6SuVta8+jYO/OYYbLKoH8eJpH53spxSdV+Fi/BC9yg7BCXhxVKuRUADvtE9bM1SMn9TWjwzue4WV04KGHUiLGZaWBvN3UdPzdfJLyKJOWF8FYD9ALIG6BiYPePGDs4x4YPQ7DPHpwff73fuvZQ5+lqrxGQIlBTwC8CwgLHGbezmaFIBq038oNon7A6kxLAzfvqN07vaSt+kSXD/b3nuOnUwgQE0V5DZWY/mfmSPM5YcRTcoOITxAeJ/tQ+Rp0uoUAURPGdFf8c4DfF2VhjJVV5EDHOlOgerRDbZnsycT0Q4fRg1+3Sl+xmpn3tUUpEIKIPlGhaghBhIIpuFCcLHDBrU+U8PDQDgwDXmuh6n17PgFn2GzRw7xCBdapzvhXnajBqwEhiHgE4aFwDswQZ58PTYY5BEV6bW09fntqHv16NcbTRHpT75USSa9lb8ZLTKXp6jUmunC0xbx+1jDdbLMWE4II8wVPoYwQxBRA01XR6wf0vgFxuvTIqBWkrHZ1qcQPs2e/Mo8JWQar+P27Vf+piaCqIidsBPPtZjm1NkpI5lqHQhDxCEInWnLmoA5aT1Xd2K0AddnKhsoVoeq2bKUjwTiJx5TDap3U1simN/NQPwbG1ZXR8q1Oa6Wg97L/bgUdfJ7aiHE2gAW2fobB/Ahbt9bydaoPieYaBdl4ZYUg4uEntQUBQUAQSCwCQhCJnVoZmCAgCAgC8RAQgoiHn9QWBAQBQSCxCAhBJHZqZWCCgCAgCMRDQAgiHn5SWxAQBASBxCIgBJHYqZWBCQKCgCAQDwEhiHj4SW1BQBAQBBKLgBBEYqdWBiYICAKCQDwEhCDi4Se1BQFBQBBILAJCEImdWhmYICAICALxEBCCiIef1BYEBAFBILEICEEkdmplYIKAICAIxENACCIeflJbEBAEBIHEIiAEkdiplYEJAoKAIBAPASGIePhJbUFAEBAEEouAEERip1YGJggIAoJAPASEIOLhJ7UFAUFAEEgsAkIQiZ1aGZggIAgIAvEQEIKIh5/UFgQEAUEgsQgIQSR2amVggoAgIAjEQ0AIIh5+UlsQEAQEgcQiIASR2KmVgQkC9UEgk1mye5lT/QD+rtriSyb4kKFCfmN9epBW3q4ICEG8XWdG3isSAh9fsGzvFJvnUoWvKxbzv4tUeYYWbm/PzoGB00AYGSzkr/SCQQhihi4QAEIQM3fuEzHy1tbjtzdmjXweTOcBKMvJNnhas9ls0zPPG0cwm/8B0D8y4Uul/ty3hSCCsZtpJYQgZtqMJ2i86fQxu7LR/GsA+4noI/zEtme6rwDjzFoNIYjw2M20kkIQM23GEzReEX1MbTLbM93XgnGyEMTU8JtJtYQgZtJsJ2ysQhBTm9CoBDG1XqRWEhAQgkjCLM7QMQhBTG3ihSCmhttMrCUEMRNnPSFjFoKY2kQKQUwNt5lYSwhiJs56QsYsBDG1iRSCmBpuM7GWEETEWe/qyr5ra9lYBJinAvQxAHOrTZQB/IWY+k2Df7r3blzK5/MVr+a7urpSW8tzrwP4OFuZfEtq83F9fX2qrdBPuqP7EiZcMFGB17akts329V27xacRmt/Z8/cG+J8JOArAPgDmWOUJL4L5jwTqLY+M3rhhwy0vhn2Ztkz3F4nxrVp5Bo4qFXK/VP8/b97pzc1zXj6MwJ9zYPcagEcZfJM5Ur7Or7/Wzuw8A7QOwE5h3snef5jyYct0dZ0ye2v59S6Ajga4E6D3ANjOVv81gJ8EjHsYtGrv3Sv3+60Hv34Vbi0tr3wChOVMnAHwXgApAGp9PQ+gyMANo8M7/mrjxitHnW1Za3aUbgPhk6HGR7husD93Sq1sEBHPOzi7w6wtuAOgdK0OMS4tDuQuDNVftdDYN7HrTQCytvWzslTI/YtfO8ps96nnmg4kMk8B4xAbPqraJgIeAOH6WU18a19f/m9R3mmmlxWCCLkCLKeiFFaA6SuOjcCjBX6EDJxdXJ9XZpisK5TO9Cxj5p8BaFK/M/BUCpWuQmHN4yFfC52dx+1UweidALVW61SI6IRif+9qrzZaM9n9iOkqAtpC9FMmxhWVcvNFQ0M3vhpU3osg2jPZg8D4kbK7D2ijDKbLYJpfHRzMDzvLvtUEYfldpMoXgViZidoJIWBYwevB2YDa+J5+jroBfAfAnkHYA3gGoDMHC72329dcowlCvVe6s/tiBr468Y481ITmRYXCTS+FeG+rSGfn0veV0dRHwN7VOq+SyYcVi/lBjzYovSD7CTbxgxDrSjXxGoi/jjIu162tsO85k8oJQYSY7bauZftQubLatgmHqGUVqYBw2cgbO16gO9m1ti6fazRX7gHw4VqDzHRCaaD3xrAdtHV2H0yA2hBmVes8UTF44Yb1+Sd0G84zz9HZDPzH+G0hbEegx2HQsYPrV/+XXxUXQTAvJiL1watNbuyGEuJhwh2jLXzcxnvyr9iLv5UE0b5g2Udh8s8Bfl+IIeiKlBn8pVIhf5nXoaFWqb09+3/QRGodHBaxrwoB39g6vOMltTU3LQSR7vkwG3wvgF2r7/u6yXzY0EC+EPb92zt6zgDxj2wkUxyZjcOda0D9rg5s1GR8g8HqdmEdsMI/PGSmKscO9d38dPg6M7OkEETAvKfTS97LRuoOmzNWrUYFjKeYUCLQqwzenoBOAHu5m6QrR4Z3WKEhCWrP9KwE8wrbRxFGPDRevK2z+/sEnFP7AwPXlAq50zQbELVnsueB6ZuaD0qJQ34PGA9a7RDvD8ZHNBv6ZhAfOdif3+AFm5MgAKwCcLStLUWairwGwLSVwPvy2E3GdRrXOXCNkTV/XrXHzNsQWeKx7avvMwLwWsD46/j7GfSjIFIL8+n7rANLtMjAb9Q6sOBTYyJ8DIydNW0HxjFSIpuWLZRnWOKSyY8l/sN6gF7wWXMVBn+xVMh/V1W2xGGjb6wA0d+PNWZ+Aqj9t/WHDW+KyibCk7C5YXAgf1Wt4yARkypn3bCbsAagRba1GCgeqpWtiuzyAB0xXt/Dw1uJ3GbNeeVygE/X4UOM3zJIrbEmBh9IwIeqIjl78YfILB9eLN78lzDzP1PLCEH4zLzHh1phxq1mE5+vOaVTa2b5/gaXrwLoQFvTkz5Ye5eaG8BzbNDC0vreh4MW5byu5e+eVS7fC9ABtg1y8WAh/ytn3XSmeykzbnBs+i8w0xdGt+yQc5LX2KkTalP5N0edB81UeZHX6UtDELVX8cStKr47vyq+U7L12vOgOdp08NDQqk06LMJsXEEYhvldycZHRne9ggmfmTyn+LE5MvpvHjoTast0p8kSq03cEK36RJcP9vcqUneJHq2+ynNXsqWnsT/8iMk45z174D67LsMSQz1PR4KhYintYqvxrMGVQwcG1vzBOcaoSuqwOGtuAA+MpFILN/atmiBsD8DbFvR8kEzrBrJ7tchmMmlhsdg7dmiZeDwOOnp8VLX584/Z2ZjV/O8EnOE4HN3VkuJjRS/h/RUIQfjsEJrNrgLmi/baA9/xV0BbiuwfAnyirfknUeGFg4P5P9m71Cn4wHTm4EDvj4M2r/bO7GEA3WYTL2k3VCuQnWneBeCDtjYfQoUXO9/H2WeVwJSooyY6ULvaN0qFnCIO1wbnQRBhcNN9+BVmXlIayP/irSSItFt84ouB/V0zmewuZcYvJ4snveXz6XS2nQ1SBF+7Fanm7kGFewYH8//rtSbaM9n5YFLGAOPz5EVEjSKI+Quy+zaZpDb5favvOfLmzerTpUJOiVF9n/bO7rMBXD5RiO9EBUudugIPEWMgPoqW2zLZk4nph/bbLBFOKfbnlB5QHg0CQhAey0KnHwA8RUWuVqqnlrV2RTABXysWchc7C2sUfGHETC7xlJfliPvjQ6CoyP6O6Uz3Ccy41nb68tRzaAmC+TaYWB6kGNTqZAgXlvpzl76VBOE+GcP3ZuMi2Y6e44ksY4Ta84SZKi/Q3MJcc8rAo0gZnyr1rf5z0A6mwV47T40iCJ1lHgOBYiateEqvi9OIZBFFVERtnd3qJnGRjYg89RxBeM+E34UgPGbZaWEEQHsD8Fsk7jZYe+XWnIoCxUyazVRr8aG7ofjdAHTjiXLL0d26/G4B9v50Zo5wmFxOPp1PT56CdLrnUDZ4CQAlNtyLGFdFMeHUzK9WD9Hent0TKeoD4/21cQYF0rPjoTnBV5jpyNJA7532co0iCNVHW0f2KCK6eeIwoV/zkw4g7hualth044t6A4hzy5kJhOAcoxCEftbVSUOZgZ46/qF6K389143GbE9r2aE7QQFYMVjI/cCrcc2HqD0Jhd2cgha/29cCWp8NjZlrJNNdVz9vA4IIwibo97BzoNFHecnhtV0qgt1S3vUGApQ/wkYw/56bjNVOfVYjCUJzcFGGA1q9WG0Q7kMF3dSS2nSy0x/IteYJf0KZuwYH888EzUHtd90hZCo+G2H7e6eXE4LQzKDGtwBMOLfUn1PmiaGfjo6jtjNptpILHzReyUO/oFHwaWWw1XbcBOZh8dHmEG8okQWPNi3wUvx6E1LPEiJeM/G7/mSouUH8p8FbjhwY+IVyhgt8XPVnEkFkur9ADMvyqPpEwi4Q3GqBRhKE6sJpWeevlHc58XnqnZyHBwLW/W2blmMeuPuG18OO3Xq/TPcFxLjEtpbDiHSjdJGYskIQWoJwOeyoUncBFMkkjsHNBHwKwB61brxOK5rbhufpUSOK8CyrOfk/y8BdBHJ53Pqvalbeu2os1uPl1Ofc4KN+xEkhCMtss/L6Acy0hBg9IMtTvfboRUzOMNyMn5UGcicF+UxE3Y0aTRCtHdlOgyxF+7bVd/PU2WhuV9qyWvEj+H8AQzmiRnxYmXDPtxFEZKe+iB2+Y4sLQWimLqozVqTZ9zgRe4Te0IqZNLoNzxOQczOI9K7+hbWbXJQNXtd8lPphzS/rOOZJTVmZ2Z4p72waLfuBeU8QzyfwgSD6Bw8fCD+CoLaO7uuJcILtMPHT4kDus/V+/0YThEZn5SlmchpoeCm1Izv7RQPtsRSVM/39Nz8XrVrySwtBvE0IQr2GRq/gEjNpLUV8vK+FIOr7EauNaksFx4Lp8x4OWGE6dJGrbgOMoqAO02mtTKMJwlrLjphcOjGThkg8va+FIKLMcP3KCkG8jQhCo+BziY40VhieJqdqaEIQ9flYbHGRvu9wSAvqoAzG0yBLzFgLh5J4gtD4jrhER26fD/Y0ORWCCFpmjfldCCIcQQSGR6jT9Ljt4B3KZ6fSGdBbfHidFv3MRusxhigioneKiMkK7bDNy5eC8YWAuD/DAD/NoI0GqJ+D1F7DAAAJcklEQVS4/J9btuz8aGrOSwc4ItBqCMIdyZQY70gRk3UwcYfecCmfNUpnrZ+Qam86b1j1+A6S0oYQhGYm0+nsR2DQOgbeXf1Za0/eiEXgUvAx7mtp5sUqHIBGURdoQpju6P6JPUREVKVx1DEmkSDaOrI9RHS97QagYCmD+D426SaTaHCb1JwnvcKrhzVzdYl+3qFK6tqa0VjQjccJ01gK+pr0HnDoidu+642tt9jjUzWKQKOu+SSXF4LQ3SBal8+l5sp6Aj5Q+7lR8mBn9xq57PhpUxOvJtCjt81pOjkF2/EoH0DSCELrJAg8SyYf6xOGehJkYQnCbX6JyGaubZ3dRxLhGjA/D6L7VWC/ltTm6+0+BdOhg1AAuMShtrXn9vnQh9awA+k87MB2eIqyRqVseASEIDRY6SJLAsEL2NmU0ilQc+UuAnYi4I8m8LBh8jXFYn4icqbuBuOIrV8jJ6evRJgwBhrnq8Bbh275WOE6CF+zbTy/QYWvdobPSBpBaEw2K1G9d9s6nD4k0FuAdfQsImLlN1MLXx3oUe+cqzAOjdNFEBqDinExk8tXIkT8MU3Ik0iOhFWsamJclZToaRXFltlcN7v5hTVRE3WF32bfuSWFIDzmThO/6FWDjSMGBlb3h51uTQyjUMHL3Ao+lSEOy7eW6Wpbtq1Q8fZ18Y2IqHfrGzucqMtRoRubLq6UCquty1qXNIJw63wQddPWxQ/SEoTGAEFNh69HvX2+woZEmS6CUO/mDCipDjUppC62J7kKmygrTtDEGk7t7dn3o8kKKKgyAFpPmINW2G8+aeWEIDxmtKNj6YdMarp7kpMbsG7rbM7qEpi4bg9dS/YyyikVA2c8GRCA31ZGRg8NSuHpVPCpD8gw6TMqlelEti1viw/Hu1B7pvu7YKgcCrVnmAgnFvtzNs9o76Xd1pk9l0Dftp1sVXTWM+w5A2q1k0YQzlMrAX+FyYcE3QJreHR0LMuYZK51RGfVEoRHsLvwwfrGbioq/0bNWkp7wp5OgnAfUHiIGZcSkUotWnWk8ze0sG3uc2BgFYgW21Zr6MCTHqHUhxnG4aXC6r6kbe71GI8QhDeKuo1VRQ2+oSVlnuUXQ97KdmXQSlf+AMaK0kDOljHLu3PNdVp9+Cr95Fh6Uo/QGroWdWQHYDMDxweFYm5fsCwN07zFEe77vw1z9NBi8ZbNzv7eYoIIdauK8uG0ucU+APOXBwfyKu+2NpVsrf2qGefP7YeM6m8vk0mHFou9v3G+iy7ctzIsCDqYVE/GKvT7frU2lRJ3VvPmzzlFJ06CCIpFFNch0SFOepkI65mtRE/q8Q3p7jp4dWQPMcgKcT+endCKeGvQ0QE5VKitI9tNRNdMym8SMtJwlDWTpLJCED6z2aq/Bah94X5G04pSYbXKrDZpk/DK9xzmI7e/ioe4oVYksuxVcwtQbQ0z8C0ebb7MmW/aJwe37+1jOglCkzAJalNkk88JCi0e9iP2mIdhFbqdK7xS14/Kv9Fs8pcZrLyg7QmQxrtl4KhSIaf0Dc7H42DC95uUOm2of9Xv7WtuzD9DZZ4jlQFuPKQLAM+EQS75P+CbGyQuQXjkuKiNO9DQwg6Qd0IlvATm82HiJlcOidbjt29KjX6DCSoJk30+Qt8+wq6XpJUTggiYUW0ilvEj2kT6RxC3wMRBjpg71ZIqn7N59OD6vPq4Qz0eoTeqdaMrzH3TNAJlBv5AoPutPNpjOZc7NClHKwz8v9mpzRd5KfSmkyD0xgQWRK+plJzqPwjmxcVCXmXSm+qjySEw3tTkVK1gldFNRVKd6+jsBQa2TIgH1SWElpYGelVYbNfjm3IUeJqBwlh6U94FhAWasB6+JK65nap3UKlTnwWoTIz1s5rnfK5mthuXIDwswcbG7ZNdz2vCAvAZBuF3YKp+a+Y/ATRPQ9TDTPyvpf78FVNdGDOhnhBEiFmOl6xekQMdO5W8yO7QGzV+CJdxzjm0CA5fOlRULunLRt7Y8QI/5fZ0EoR6ybaO7jOJrExk2sT1QeKTENMPtSE1b6WbiHF4mPKTy3A/p5pOMkbNC+0iR/ZJhKTqj22qpNKIKrFilEdtfGeV+vPXeYnAPESOtj4mZ7yLSxDWPDlDb4z1Fspow4tEp4iPam4YwPmDhZzKLucrJowCfBLLCkGEnNXW1uO3p+bRrxOgxAbj8k+f6uokdkWl3HyRU3wTskvos9rBN7RGiLYpvSD7CTZJhYywK9B9qvIjZODs4vq8ipzp+0FNN0FYHrbu9K72sWjzVoTAaVIRKwZTmS4h4CwvsZGjzceYccHee/AalZ7WdWoPYcMfNbwHAyUmPm2oP/9QwPh06TfHqzitiupBEDoLJCC0oYV2ODZ8vgNgzzBzGgGjMM0lvowQRMQptq63W60k8SfxmEJwN9vpddObeagfA+Pqymj51iBrpTBdu+XF4Sw+gtq2Pq5N2A+mcRpgHgyQMvvbrlrPChkBonuZjWv33r1yv18Obntf000Qqm8rqurzdDQznwvQ/rZxKD6rayhnK793pfIZEH0aY46UNszwZ4DuZtAqJ2aaDTK02bS6+TXPefkwAlSOcyXCqq25ChhPEahPWbjtvRuXws6Twk3pywwmlX7zkw6x2CRlfz0IQicO9ErBG7R2nb8rfFKzX2ptgnEak9kB0F62Q5wSAz7JoLUm6OoNhd7/CTrkRO0/yeWFIJI8uzI2QUAQEARiICAEEQM8qSoICAKCQJIREIJI8uzK2AQBQUAQiIGAEEQM8KSqICAICAJJRkAIIsmzK2MTBAQBQSAGAkIQMcCTqoKAICAIJBkBIYgkz66MTRAQBASBGAgIQcQAT6oKAoKAIJBkBIQgkjy7MjZBQBAQBGIgIAQRAzypKggIAoJAkhEQgkjy7MrYBAFBQBCIgYAQRAzwpKogIAgIAklGQAgiybMrYxMEBAFBIAYCQhAxwJOqgoAgIAgkGQEhiCTProxNEBAEBIEYCAhBxABPqgoCgoAgkGQEhCCSPLsyNkFAEBAEYiAgBBEDPKkqCAgCgkCSERCCSPLsytgEAUFAEIiBgBBEDPCkqiAgCAgCSUZACCLJsytjEwQEAUEgBgJCEDHAk6qCgCAgCCQZASGIJM+ujE0QEAQEgRgICEHEAE+qCgKCgCCQZASEIJI8uzI2QUAQEARiICAEEQM8qSoICAKCQJIREIJI8uzK2AQBQUAQiIGAEEQM8KSqICAICAJJRuD/Awi3JiXVPgeVAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-28"><g><path d="M 371 401 L 371 444.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 449.88 L 367.5 442.88 L 371 444.63 L 374.5 442.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-43"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 412px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="406" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-57"><g><path d="M 321 351 L 107.37 351" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 102.12 351 L 109.12 347.5 L 107.37 351 L 109.12 354.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-58"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 362px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="356" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-16"><g><path d="M 371 301 L 421 351 L 371 401 L 321 351 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">IRQ other <br />than ACPI SCI active</div></div></div></foreignObject><image x="322" y="330" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQmYXFWV/++8qupOQAUGZd9c/4IbGkl3dXeF1rAEBAKB6k6AGBQV1Awq4wqM4Ci4jaIMCiIoyJauIkAQZRXbdG0dzAwyDCKDoLImyBCIZOmq986fW13VeV11X733auuq1Hnfx/eRfnc593df3d+9556FII8gIAgIAoKAIKBBgAQVQUAQEAQEAUFAh4AQhHwXgoAgIAgIAloEhCDkwxAEBAFBQBAQgpBvQBAQBAQBQcA7AnKC8I6VlBQEBAFBoKMQEILoqOmWwQoCgoAg4B0BIQjvWElJQUAQEAQ6CgEhiI6abhmsICAICALeERCC8I5VQ0r2Roa+SIxv2xr/c5BykbGxm5/VdTg4GH3N1iytAuGDNQpkAniu8F8G4Nu7gzuOjo5evaXGdqdVnzPnE6FZs16aw4Qogw8F4QAwdp0qRHgBjL8Q6HfEiG/ZstPatWuvyNZThnZvKxJZtGeOg2MA3lwYy4sW+PDxRHxtu49N5G9tBIQgZnh+ZpAgdCPfCOKvI4dL0+n45lqgKSxq5wL4KIDZPtrawMBVIcp9z4kkfbTV0kXD4ehsGDgdhIl0In6Fk7BCEC09jdu1cEIQMzy9LUYQeTSY8OtsN5+89t74S37hUYseB+h8As7ySQylXdWNrPyOodHlo9Fo4OnnjGOYrW8B9HYmfCkzFvuOEESjkZf2/SIgBOEXsTqXrxNBbAToeX+i8RsAvNa5Dl0xsXmn5X7UPb2Diw+gnLkCoJ4KsqiTyXMA8WQZNzl4nIOBxZnRFX/xN77WLR2ODF0GxplFCYUgWneuOl0yIYgZ/gLqQhCEa9JjsdP8DmXOYdGdQltxAjGdZ9NvF5sxiejU1NjICi/thuctfi8svgngN5WX50eY8O1sIHj72tEb/176vqdnye5G0DoeZH1W7ajL69PjMOik9OoV/+VFllYvE44MXQ3GMiGIVp8pkU8IYoa/gZkkiOLQ8xffOePHAC+dDgenJmbhaDdVU1/fov3ZCP4awEElcD4D8On77Il74vG4uhSv+CjVy1PP0hCAHwJQJxz78zBZuaNTqZv/6tZOq78Xgmj1GRL5iggIQczwt9AKBKEgmDv3hF2NrtDtBPTaIHmZLF6QSsXTTjAVLlpvBNHCEnIZzQWx5P7RuLKU8vU4qqqYV8HCklov0H0J04DCQhANAFWabAgCQhANgdV7o61CEEri3v7hU4j4umnSM52ZTo78xGlEfZGhU5lxNYCAbddxz9ZZHHU7eVRCyeFUYhLhtNRYbLqM3uFuiZJCEC0xDSKEBwSEIDyA1MgirUQQPQPROQbonldVPLsUx1zpArWv74TdLCN0NwHvsWH0N5g8P52OP1YrbuFIdC6Yfglgtyl5gD8YVvaIVOqW9bW2P1P1hSBmCnnp1y8CQhB+Eatz+bYmiMjwYub8iaN4ejCZsTyTjF1eJ5iod2Do3whQl+jFx9flebVyqPuQJ58NvJ/IOg2MwwHsDyCYby/v3Md/ItCImQ2MjI/fuK5SP76dG0uMDtz8IPLvEThVqd9A+Uv+ot+Jkus+Bn627578Wy/3QLpxVMQCWEfAgyD8oivAt46Oxv/hBXMNJlMOoj2R6EFG3nmU5ufHMon3GgZdlt28851+LOu8yCJlnBEQgpjhr6OlCKI/OmAQ3QlgR7cTxODgabO25l6JA3SMDcKHrGzgMLcF0w/k4XD0LQjQbwDst60e3wETJzbiLiK/GD5DJxLhIo1ll070HBGtzBJ94f7VK57UFWgUQcwO4k9bcnQhAZ+aIi9HcPkRsowPp1Ij9/vAn/rmRT/AFn6kty4ra8mz74qOIGDyoQjQ8a9+f9919KFxUXn6GJsU9YCAEIQHkBpZpIUIQrtbZ6ZjM8mRO0ox6J03fCBZrBbuPafIBLgkk4h9pp54DQ4OBrfmdr8G4JNt7T7LBs3PrB75Y337ir5mS44uIeDD9jsVj308QwYvTa2O31davhEEAcJHwfxlF5+TUlE2MtNpmeTIzW5jUsYHFDC+wWA1n1P3S271Jt/zuBU0Txofvfkpp/I6giDgEga+VcHB8gnT4PlrVsef8CaHlKoVASGIWhGssX6rEIRO3w/CY8jxYDodf7qMIPqHFxHxStvfTScyqREi7eU5M53oZaHz2rfyCeneQnFGXp1UtrASkGHQEyDuBtAPxhs1C6d2Ac6ftrKbloPorZMNWx8Aiv+f/8MagB6Y6pStNelk/KrivzUqpgkQNk6LaQVsBuEBMP13JRkZeBRB48hKjocqflbX7JcuBfgTZUgQXiDGf+axAAIMfj8B79CcYCqaJWsIYgLAVpvz5noG7iPQK4DVmz/BEF2WHhtRHvoFJ0uvsyvlqkVACKJa5OpUrxUIoq9v+BA2sKLUyY2Ar6USsQt0Q+3rH7qQCefY3jVkV6/a7+uLHgyD7mHg9cX+iHFRKhlTsZ5qftQpZSK3+yUM/mRJY88w0z/vu5e1qlR/f8i8xfsGTOtiIiiViH2HvR6GsaCSU18dLqntYjqqdQq6/GsAer+9QqV5VTcs4Uj082D65vRx8SMW46z99sJ9pVgUTKTVXdEZJVjc1R3kk3T3EhVOVSaBfsimdZ5dhajwhmFmqzGbrvkD6eAGhCBmePJngiDUjnZi4uXdEQhGWDnHManIsJMXsIWHAEdT1Um1z243AIhuq8EPTgSD83We0rVCrDytKWSuJuBttrbi3cH1J4+OjuZqbr8/erhBtGq6asNdTZJfTAeGlP5/us7cxV+jjgSxHsTHpsfia5ww0JoLM+7rDvFC3cKts2QDcC9MHk6n4/9XAWvqjUSXEdOPbTg6miU7EQQDP89u3vkMuYiu9auuT30hiPrgWHUrdSGIqnvXV+S8KWluoZPXsvbHXWHRqVW8RvbncMfhx1RXd3ez2WJeOJ6MK5PhsqdOBDHBzB/OJOMjbviGB4Y+DeBSW7knrGBunuaOgMKR4UvAvNxW1o8HuwYLvTe+A0G4Oma6jVXe1xcBIYj64um7tRYjiBxAMZjWP1faLQ4MnLyLiewd0y5I25QgdJftAJanE7EfeZ3MfCypkHkvgHcW66idcCYRO12nL68PQXgLg6LkmVQh8t0Adi7Ip80nMXde9I0BK28xpu5X1OPbMVHTxgQDH8okYgqfqUdPEDweQOioROKGF71iL+Uai4AQRGPxdW29RQhiPQg3Gxb/IJmM/8lNaM2lqbo2dFRbuLXn9t5hMamLSqvUe5yBJ4MwBxOJlY+7yWV/X34n46xyqwdB+LmD0czXBrLoiFKT197+6HFEpCycJu9UKhgpOGGjUz/qZNXNKTOuyyRjyoJMLqH9fHwNLCsE0UBwvTRdJ4KoEO6b1d3CXhork00E/Fs2yNfcPxpXDlWef5RzBpe8viuX+w1A754aY9MJAr8zeMuxyeRtG73g7FSmr3/op0z42Lb3+cx6Ub+Z9XoHhg4j4FcAuirt0tW7ehCEHysuHaEzcFwmEVNe6lNPKcmpe6h/7NB9woN3X/uKH4x7I0PnEOPCSphqCcIlL4YfGaRsfRAQgqgPjlW3UheCcAn3nTdb3GHDh8D8zRKHJxVh9fLuIH/ZqwesGmgj7wR0QDaqP52zH1fpyzEwcOKbcgiMErBvcQy6RbhuBKFZ4J0+Qi8E4WB48L+A8Vv/HzcfrOI/2giiTHWkP0HQqZnkyPX++5MajUJACKJRyHpstxkEURRF2fp3bSGV2lKF1LY97Cvy6vZixVTPXayXRbgIeB1OEL5yUnuRzbczn8fvu1CsLM+6FnsfpOeveyldLQJCENUiV6d6zSQIJbKTQ1gls1bdUHsHhn5YSCtafN1UP4hqd/r2sTScIBxUJkIQ+lOo04mrTj81aaYKBIQgqgCtnlWaTRB5FYc2vpF64z3NqCY0uMnMizLJ+G31xEe11VvutQ3m2tURDScIBxmFIIQg6v0baVR7QhCNQtZjuzNBEJOLbnSYiH5hu1RVf95MhKWpsZg9hIZ2JFrzUKJLGxAKQdnWX0XAR2yC1OW00nCCcFCZtAtBuOXK9viJa4uJiqkW9JpXVwiieVjrF9rI0BcpH9p46inT17qpRVBFTmp1cR2aveEnJQuvMmVyjdWj5HGI5lr3YGoau3plcFWXaK71vKTWEKZjbKpWJIh3H7F0x9ds2nqLPRYVMa5MJWMfb8RPRAiiEajWv00hiPpj6qvFmTpBVFI1EeiyruC6s9zCWPSV54OAn11nb//QmUR8GhFd6pBLQOelrET35chWaUJK71KqNu3sHz6KiJXZaN6HgIC/w+LDU6n4tiB8BUFakSCUaGUmv002XZY7CF9LR1MKC0E0BWbnTmaSICZVTWqRzodhsAecqxgqojgahzzWrvGBCuQ0GwGsBOioQnsbCPiemQ1dMj5+/cv5Mk3IKNemjnJ1t2LK490/fAaI7cme1pNF81OpkYd8/EyK4TpUnK6nVJRaZuueWaHnV9o3HHKC8IHoDBYVgphB8PML9AypmIrDzv9Qc3QTgCOnQ8GjE7NwvFteaf1dhnugu76+aJiNfHKi103vlx4HWR8l0/wLG8FfAzjI9t536Ae36dXmtfDpsKUijQYt6y4ABxb7a3CojYYQRF/f8DvZyOf4sKd4/UYmEfuqV0dKnQGEzuJMCMLty2yN90IQMzwPM00Qavj9/YsjFlm3lyzWJoO/mEnEv18JIqe7DJU0hoOBxU55B/KZ256jXmL8K4DDSk4wKkLrplLyIKKRrZt2WlrPSJ8NCtY3AfDCdCKuCLDsaVUVk0oSBAM3gmihTWhPJ0JV3iFs+maGcXQmsWLUDoQQxAwvPB67F4LwCFSjirUCQUzmABj6PhifLRmnp6im2pDSkw09D+Az++zJsUr5kFXeAmJSlkq9FXD2E1XU13T1aMJ9u0W0LXSgD/cNOOZBUPVKCcItrpJbTmq3wXpxlCu24YDFozDoeJcMftTbHx0iop9PC5vuEPpcCMJt1lrjvRDEDM9DixAEegcXH4CcdVdJzgVl1eQpPn8FklDaiUeY8O1sIHi7Ll9EPhpqMBclovPtSYFs6ppH2TCPGV+98n8bMV0VEgY5EpxadE0Ev8uMxWUJg1xyNGicDB+GyQvT6fhjuvE1kyAqYPEimL8ACzeU5gLv6TnldYFg9htMUAmX7HlFHE8fQhCN+JLr36YQRP0x9dViqxCEErqWC2tV/5DB6B6hHP3CIW1nEZfNAJ4DiKFsnoA9KuQgLtYxmXGrGTA+d//qFU/6Athj4TqlHPXkR6K5DFZSKrXaMwDliLG6KzT7k8WAgc0kCCWICxbbUpvmsbXeA9AcTTDIzUz8L5mx+GW6KRCC8PhhznAxIYgZnoBWIginC2sGMtZE9pg1a255wQ2uvB47iOVgUncLr3Ur7/O9Y3pNn+1oixdiVakFTcWqslt1eWn+eWZalkmO3OFWuL//xHdYFFD5GVSUXc0zPS9CswmiSBL6uF1uo8u/V5uAL6QTMZVdThslWAjCE44zXkgIYoanoJUIQkFRy4W1HcqCCewXCnmKi4lqvKC9EUTXWIHst41c8IhXk9z8QEM0T4P5/O4QRvxEofXSubo8f+pZUgSh0oju7aFOjkA/3Ro0vuoj3aouPedUV6U5KWaCIJQwVWCh2CDDxKePj8UfroSdEISHL6sFighBzPAktBpBVLiwfsZg84hkcuX/+IFMWTnNmvXSHCZEGXwoCAeAsautDZWL4ikC/Y4Y8S1bdlprt1JSJqQB07qYCMeX7OpNIjo1NTaywo88Xssqubu7X/oACEuYOAJgf5saZR0BDzLziJnN3erlZKXrV13OG0znAVA5wXe3lXnFYl4wnown1N9miiCK8igsgrNe7AnAOJ3J6gdoH5tacCPAf2PQ7RboZ2sSI+qeyDW3iBCE1y9xZssJQcws/tK7NwSob170A2zRD4tpPf2ovbx1IaUEAUGgFAEhCPkm2gaBou8EgHNg4ZeZZMzu9ds24xBBBYF2QUAIol1mSuQUBAQBQaDJCAhBNBlw6U4QEAQEgXZBQAiiXWZK5BQEBAFBoMkICEE0GXDpThAQBASBdkFACKJdZkrkFAQEAUGgyQgIQTQZcOlOEBAEBIF2QUAIol1mSuQUBAQBQaDJCAhBNBlw6U4QEAQEgXZBQAiiXWZK5BQEBAFBoMkICEE0GXDpThAQBASBdkFACKJdZkrkFAQEAUGgyQgIQTQZcOlOEBAEBIF2QUAIol1mSuQUBAQBQaDJCAhBNBlw6U4QEAQEgXZBQAiiXWZK5BQEBAFBoMkICEE0GXDpThAQBASBdkFACKJdZkrkFAQEAUGgyQgIQTQZcOlOEBAEBIF2QUAIol1mSuQUBAQBQaDJCHQEQfQODB1LwG1T2DLu6w7xwtHR+D+ajLd0B2BwcDC4Nbf7NQCfbAeEgK+lErELmgHS3Lkn7BrsDh3FjOMAvB/AHgBmF/rOAXgGwFow/8rKBW8fH79xnV+5yr47vw0AmwE8B/CzYNxrEW7bb088EI/HTa9N9UaGvkiMb9vK/zlIucjY2M3Pem2jlnJz5nwi1L3jixG2jOMA6zCA9gPw2qk2CS+A+TmAEgz8KjuLV6+9N/5SLX061S2ThWgPMHa1lV8H8J8V1hwwVuy7u/WoH6yL7UQii/bMcXAMwJuLf2PClzJjse80YlyNbLOtCSIcjs6GgdNBmEgn4lc4ASUE0chPyH/bfX3D72SDfwNgt5LaD1nZwGHVLMZepIhGo4Gn1+FQy6ILCTgEQMBLPQBqQb7XIj57fCz+sMc6qANB6Lp6GsAX9tmTY14Wr5kiiJ6eU15nBHPngfjMaYTgDl6OGasC4HOTyfif3Iu7l5g7L/rGgGWcW9iQFDcB7hWBPzPjnH334pVesBaC8AJpE8rkf+jPGccwW98C6O1u7CwE0YRJ8dFFeGDo0wAu1VQxiejU1NjICh/NeSraE4keRExXEdDrqYK+UA5MF8Oyzk+n42p3X/FpEEEU+4xNzOJPuO22Z4AgKDww/CGAfwbgDW4YVXifA/HXkcN3vWCta0eRVCCY/QYTPgkgWL0sPMbBwIczoyv+4qUNOUF4QamBZcKRocvAUDuT/CME0UCw69z0nMOiO3Vtwa8B6is0nQVgbNvN8x0wcWK1i4JGXLVgLQVYEdI21UZ5wXUAvVL4onYEsHuFod8Jk09Jp+P/VwmeBhMEALpiYvNOy9euvUJhqH2aTBDUG4kuI6Yf29R1NX1BxLiSLT7L7/fQO2/4QLI4BuCdFQTYCNDzhTlXBLJXBSJ5hiw+KZWKp90GJAThhlCD34cjQ1eDsUwIosFAN6D5nv7ogEF0JwC1CIOIRpitAwF6d6G7l8niBV5+iB7Eo3Ak+nkwfVOjTtoAwtVMdIVO1zw4eNqsbHbLIRZZqq46dUxTRxFwz9ZZHK20g9cRBAPHZRKxX3qQHUpn3tX14m4UNCLM1vnqtFxSzyTCaamx2HWtQBDhSHQumNTYSlWHTxNwOSy6tatr61Ojo7duKMo7OBh9zcSEcQAMPp6R3/TtXTYW5q+kk3F1j8JecKsgR44JdxPzf3QHdxwdHb16i729vGbiaeMANvhzAD6qIbn1ID42PRZfU0kOIQgvs9TAMkIQDQS3sU1TODJ8CZiXF7shwlKL8UECPjL1tzpdVvdFhk5kxrUlP3STgZ9wNvSV8fHrX/YwXOrtjx5LRFcD2GVaecIP0mOxs50WrloJwt6Xum8jgy5hwsfsf2fgD4aVPSKVumW9bizNOkEU7gNvBNFCmxwmmC+EhW95OQHkF+hn6dMMfKtkzl60wIePJ+Jr3earr2/R/mwEfw3goOll+fcWYZnXO6TewcUHUM5cAVCPH7xVWSEIt1lq8HshiAYD3KDme3qW7G6EzHttx/78D58YexPRzbZdes2X1eF5i98Ly1InFftudjMTfyozFr/G6260CIXDrvRlg41jkskVymKl7KknQajGlXquewvFGTjcvggz86JMMr7NSs/2slkE0dcXDbORPxm+rtg9A9/IJGJf9Yl1UU115bRTG9Gl6bGRsyq1pU4jW3N0E4Ajp08GXdsdtD7l12rRAW91jKk4LiGIBi0gXpsVgvCKVGuV64sML2ZmpQ4pqGt4PIDQUaaZ3QFBGgXjLQWJa7qsdtrNEuisVGLkMp8L1hSIfZGhU5mhThLb1E3Mq2BhiW6HXG+CUIKEB6ILAFoFoMu2EF+SScQ+M5MniN7I0OeI8X2bDFWTvHb+CI8hx4PpdFxZcWmf3v6hM4nyxg9T80OE27oCfIpfcpjaGISjb0GAlMWdMs8tPs8YbB6RTK78H50gQhAzvO4IQczwBFTR/aTvw243AIhqFjbqHRhSFkZTaiag+stqh0X059nNO59R6ULXbVj5HWqWVoHwQVtZxzuTRhDEnMElr+/K5X5ju7NRmnlHv55mnSBKf5MA4t3B9SePjo4qnxLfT/lmAhvIoiNSqZH7dY1pTqdqp/8ogsaRXq2PHImn3JcElXx2hCB8T3dtFRx+mM6NEq5Jj8VOKxZwMXOlnoHo+wyij8HCAhD2LexAcgA/xqBVZPKV6XT8sSpHQb2Di/dHzlxkgBbwpG5UOWUVdznKxv45Ah5m5hEzm7t1zZpbXnDrS4PJNAco5QhmdAWXEUg5o73NZsGjnIH+k0GXZTfvfGctC6abjPb3Gt+HCQY+lEnElMpJtzOu6rLawQlvPVk0P5UaeciPzLqymoXLcbFoBEHofwuTJ7FE4oYXS2WeKYJgxnWZZOzD1Z7WClZIauduKOsyBucCbHzMSZ2nmxcAy9OJ2I9qnfNJP4r8KeKN29ri1MQsHK0zUhCCqBVxn/UbRRCbcsZeBvgaD7bxSuVxE+esT7mZNtqGRn3zhiJs8fcAUp66Xp8cMS4zc6HzKl2iOhHEpk2v/3vXrA2fAUHpfiuZdSp5ngfoo+nEyK+q/SF7HZRmoZqmgnDYATqqThx3e5PmjerHvKetTE27WXtfk3LmVhHoZWZaSYx7997b+ovOkap5BAFH7+hmEURf/9BPSy7Qq1Yxef2miuWUxdnW3CtxgI4p/o2BJ4MwBxOJlY/7bU9TXplKX8zgQYPxKzZ4VXdgxwdLraCK9YQg6oC4nyYaQRBk0E+ZWdlrT7dMqSDYpMVIbmEqdfNfK8mfN0/cYcNFYChzOa/eutOadOtLRxAwzWMQCHwNwJAPfHMM/lImEb+4USSh8X1Qx/+yxb93YOiHBKiLyOLje5Hp7R8+hSh/zzH1MNOpmeTI9T4wqUvRphFEC6iYNLibIP5Keiz+7436roqTNDBw4ptyCIwS8qf/4lO3TYHfj0EIwi9iNZbP7xCym5aD6K2TTVkfAIr/n//DGoAe2LYiWGvSyfhVxX9rVEwvgfKelXlb/PyTjwmD1ZOOM/wGEOaVxGnJF2PATZftbHs/GXfmT4Axqeog7gZzD0DqcrbM01M5CXWF1n9Sp8fVEMTTAD8I0FE2uDeD8ACY/lsRFYPfT8A7NH15NiOsZip7B4YOezW0hTqlFC9WX7GYF4wn4wl7e6U+EirEhV/P6tKdLAF/h8WHp1Lxbd9HNYOook4jCCIcju5dcqGvJHNcDJt1gtCrYZA/Deey2a95UZtWAXG+Sm//8FFErPwvpjZjTDg7MxZTm56mP0IQTYd8eoc1X1JPa44fIQOfTq2O/9a+01E22U89g2EQKasI+ymjom68ZyA6xwDdM70OPW6xdeZ+e+E+nSoiH7cmlPsKwJ8vWbwddecup6qNBFzYFeQflVpvHDJv8b4hy7qqxFRSeay5mhFWOe1lvg+AXn+rO2n4uax20M8/OBEMzl87euPfq5S/6mqNIAidrr1SJIFmEYTa7oT7o18C5Z0SSx8VNuM+sHHlxCzrbrfwIH4B14xx2v2W3/ZqLS8EUSuCNdavF0F48YbVOVtV+EFqFkP8DSbP93DJrbX/dlKPVCCI9TCME9KrV6ScYNbad3swI6xm2nQ73koLWt/A0AUMnG/ry/Nlte6HCfDt3cEdo0764mrG5LVOvQmir++E3SwjdDcB7/GKTxMJIu+nEdpKNxDjaBeMniImFfzw5mwwkK6VvDUWVM+yQfMzq0f+6HWu6llOCKKeaFbRVj0IwqsJXN4mO4CVdtWNk4WG1gTRhyXFwMDJu5jI3mH33mTCuZmx2EWlMDkQhMmM5Zlk7HI3WDWmoBXNCN3ac3qv2fFWtCjSnsA8nm60BFFi0VbtOKqpV0+CUCe/oGX+AqDB6QdgZz8MVa6ZBKH6U9/llhxdQoCyYPJ2/zap3r0PoHg1JwwNQTQ1pHnptyEEUc2vpY516kEQfnIP9EaGziHGhVNDcLgUDIej/0QB+hQD7wNwMAhmgM0j/VhSlH3sDgucliB8nAJ0F3t+4gR5mU6ddYmbykhHyAA8XVbryEXd46SSsY97kbfeZWokCJozuGTXUM48mAingvOGB6Whql9kGEdlEivGnWRvNkEU5KC+edEPsEU/dAmWpxNbhUK53yD+xt574E63MNu634Ha/HE2MK9RYePdvhMhCDeEGvy+DgShvSR1/JGVJhwCfmfwlmOTyds21nuoNRGED+ck7WnFRyA5L+PW5n1gOjOdHPlJpfrh/uEzQGw/BXm6rNYRhFukXy/jqLZMg6O5erISmiGCyEOm7vGefDbwfsA6g4CTPJhdl0DNj4BwZnos/junOdBulGY4IZgQRLW/mDrVqwNBPGEFc/PGR29+yotImh96w46wtRCEkzpKN0aHnZfnSKNecCu9T/Bqm643W3T3rLY5V23zgWgxFZMX3DyUUZe+5+yzB77vtsOeSYKwj6MYKdUyrIVEdBIYB3sMCV5xrPpTqrNfiAdsay4iBFEzhLU1UAeC8LXAN5ggqL9/8Z5sWB9kxikADp32w/GhYvJj2tdogtBbJNEN3cF1y9zCLzh4Q7teVrfDHURtX34+s13GIv6E56ikM5xy1GkpUNLrAAAYg0lEQVS8U6G1CYfB4GUuhFHxtCR3EDV+VRWqt2XK0ZoJwucRtB4EoRZk5bkdID4I4Lcy0/sKaS/3r5jtyg9B+FARNZogNL4PntRExW9VGzrB5bJapzZTlmr/2KH7hAfvvraQDKhxP6bSlmtWMRFeYM5f4K4xDNxlTgTu8atXb5UThBvqyrk0OOvFHjLoK8Q4Quer43Tf0tc/dCETzin2MZO+L0oGOUG4zXaD37cJQVBPJHogMX2eCMfpnO48wdSeBKEz9/U0XJdCT5gGz1+zOv6EXm1WHhBQOQ9uT34QfkFsF4Kwj6snsuTdBpvK870kG5z+BKrx4hY/CL8fikN5OUF4ANLvCUKZJAZM62IiHO/Z1G9SDpX0Rc3Jtly+bUgQDl61HpB2LeJ6Cmml3WSNVkyuYHgp0I4EocalS/zjdIel8cBXaYjFk9rLB+JSRgjCA4h+CGIyUQ3fBPCbXJpWUVX/TKCExTSaDRn3K4ehmi6pW0TF5BBZ0wPSXopUvqzWqLZQ71hM4YGhJQCfS0yrKgVu6wSCGBw8fuctuS5lAq7CqYRUpOIC3ioBVE2P5jvSngwc/I/qGotJGU6YbNxEMP7g5uAnKqaapr32yq2qYipczN5a5swEPM3AdYBxZzZID60dvVGF89bm1213gnDwY9isQpqr4FP+Z5/VacoelbbiZbXfWEV+5dFdoDvpvDuBIHSLMzEuSiVj5/rFtrS8zirNwVenLJ+IV4s5rzI6hDjRnlKEILyi2qByrUoQmo/IBOGnyPHZXnLyKrjanSC0vg8ePaF1n0t4YOjTQD5L2Lancnu6+4+6BSPUjs8htlQnEITWzNSnEYjTMtEzuGgfIxdcbc/D4OTMqU0SRfhSZiz2nVqXIYdNj+NGRQiiVsRrrN+KBOGQMS1jTWSP8RrJsr//uNdaNEtFpVSmrpNPm91BaGIp+XJK9LKLBFDxslqXH5mIRrZu2mlpjQmSKBwZ+j4Yny2RU5uYphMIQuGgmfPNFvPC8WRcBa2s+tHcLTiGg9GbVXuOg1ZRxp7+6OEG5dO82jzZnVWdQhBVT3l9KrYiQTh4JvtKeKPffU/PjldEsFYz1Vrr62ZSh4FT5FavX4KDT0TFewVlMtm9w0vXMvOwrR+TCKelxmLTckV4lSO/EEaGTmTGtSUOXn/MGcaR969e8WQZuZV74Cu9Yl2dEd3kb8YltY6QAR6dmIXja4jcqiHjyhZpupzhtW4MdJflACaYaUkmOaK9ZxGCcPsqG/y+FQnCQRfrOQ6Qw6LWVieIRh3zde26xnTKGwtYdwLYzfY5bmbmj2SS8ZjfJDbhSHQumNTpzt5exeCInXKCcPx2gZtg8hk+sjBOTZWOjN3ip+U3PTm6CcCR9iVIxeNii8/yquYt1tVGPVYvuXKARCGIBhOAW/OlBOF2KeaSk9qtO3ixYnr3EUt3fM2mrbfY8yz4ihgbJKW6UEHlpkXAdIocW+sJoNb6GtB0uv+65ILWn0zg5lntlLjJU0rX4vjyeUGepWUAfqCJJXRXd5BPKs25UazbKQShxjtpvVdGyGo1fYTI+PLee1i3u4UFybcTjs6mAJ1VCPluD07oKWy+A5Grk1uGiU/36oHu7IsBFU5/QXr1iv9yWjiEIFyX1MYW0KSmfBgmL3TKudAMgsirIMpzGagfyO8twjLdh1lITfohMH8ToLdrUXO48Kt1ga+1fqmsDond65aLQYuty+V3Ht/ZL10K8Cc02G4A4WomumLf3a1HSxcvdSIMmbljiPElh7l5mKzc0ZXSz3YSQRRUcB9nhkrjW5YdEQVLPiK+uTuQfXR09NYNhTnJR63tsvhtYGspGItfJeOdS+ZrMxGWpsZiK72sLA6qQFU1x4S7ifk/uoM7jpbmCFEX7tnslkNMw/qygze3JzmEILzMUgPLaKJ95icfwDMAqR3i6q7Q7E8WP4BmEUQ4HH0LAvQbAPuVDN8E40kmZFSy+/wpga33gfKkYN8lqVg7jwJ4sy09pzZybK0LfK31S6dXl5PYb7rQSp+M3nqo8mV1cUcKh9OZrb+pb2fyb2WmtdNEU/nCyeST3JJAdRpBKJOK3oHo5wj07YrhY/ytDdXkTKfwwPBJAKu0w3YT6dKe1wFUCMHCKv3w7hVE2wjQ6enEiFJhVTTXFoLwN8F1L93ff+I7LArcDWAvfeM8HkDoqETihhfV+2YRRL6v/uFFRHy1/9DG2MhEynY8aTDfxcDrC2PTRp6tdYGvtb4ddwczwIqWRn4/Cp2VWKENrQWRvX0XNZEfUZTZ8nXWROis8fHrFdFXfDqQIBQe1NMfPcwgUr8Bh9+nG3JT759n5n+u5s5ItVBBTeRZgELBhywKnDI+duODXioKQXhBqbFliuk51XG2NImKovcngzAHi4l6mkkQatjhSPRQMC53VBtNxybHwLWmYZyvLGE0l90mMy/KJOO32avVusDXWt8uSy1Z4Px8JnoPbfcw4MU+5s49YVejK/QFAs7QqDEqiZKPosow/iWTWLHGbQdZbKhDCSI/fPV9TeToowx8EcDefuYZwAYGfsLZ0EVeiLhS20rNGJr14iIiXODx92hv7mkAX4XJN/q54BaC8DnbjSreE4keZDCdB+CDJcfDabb3zSYINd78hzl7wwIClqrrCRWCwHYBvY6AB5l5xMzmbi3xkyi77NWZ6tW6wNda3z6nmvuBCYAXphNxZUVUt6enZ8nuRsi8tySIm9tldVn/am5mzXppDhOiDFY+J2pu7PNTUDlhLZh/ZeWCt/uNoqo67WSCKIKeTxq0zngbsXUMmML5TIuTWNs3dhsB/htg3EuGddvWV3YZq9FfRffNUe/g4v2RMxcZoAWvRjZ4i0aOfPgbMO7lgLFCdzfl5WMWgvCCkpQRBAQBQUAQaGsE2jJYX1sjLsILAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv0JAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv0JAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv0JAoKAINAmCAhBtMlEiZiCgCAgCDQbASGIZiMu/QkCgoAg0CYICEG0yUSJmIKAICAINBsBIYhmIy79CQKCgCDQJggIQbTJRImYgoAgIAg0GwEhiGYjLv21FQK9kaEvEuPbU0ITrkmPxU5rq0GIsIJAlQgIQVQJnFRrbwQOmbd43yBbZ5PJ16RS8QecRiME0d7zLNLXhoAQRG34Se02Q6Cn55TXGV0TnwXT5wHkLPDh44n4WiGINptIEbcpCAhBNAVm6aQVEOjrO2E3NkK/BXBQQZ4XhSBaYWZEhlZFQAiiVWdG5Ko7ApHIoj1zHBwD8GYhiLrDKw1uhwgIQWyHkypD0iNQDUEIloJAJyMgBNHJs99hYxeC6LAJl+HWjIAQRM0QSgPtgoAQRLvMlMjZKggIQbTKTIgcDUdACKLhEEsH2xkCQhDb2YQ2YTjUO7h4f+TMRQZoAU9aBO0BIFDo2wTwHAEPM/OImc3dumbNLS9UK9fcuSfsGggFjyeiYQbeDWD3QlubAfwFoLstsq4YH4v/EQCX9tMzEJ1jgO4BsIsXGRg4LpOI/bJY1s0Poq8vGmaD7gTwukKdVyzmBePJeMJLf8UyAwMnvimHwCgB+xb+NsHAhzKJ2L2V2hkcjL5ma844CrA+AtD7bPjkAPyVmMYsg6/cdw/OxONxNTfyCAKeERCC8AxVxxekvnlDEbb4ewC93wcaOWJcZuZC542PX/+y13rKX4FC2a8T8HEAs13qmQCPcTDwkczoir/YyzaaIOYcFt2pawt+/SpR9RX7JcZFqWTsXK9jVeV6+4dPIeLrttXh1MQsHL323vhLunbC4ehsBLEcTP8K4LXuffEjZODTqdVxZeZbRqTu9aVEJyIgBNGJs+5zzHPmfCLUtcOGi8D4nO2k4KsVBv5gWLmFqdTNf3WrWNiV3wRgL7eyJe/XM3CKfdfdaIJQ/YcHhj4N4FLb4j4eQOioROKGF73IPzh42qytuVfiAB0zRTLA11KJ2AW6+r2Diw+gnLkCoB4v7dvKmCBcPLFp53PWrr0i67OuFO9ABIQgOnDSfQ6ZwpHo58H0zTJyILwA5j8BxkP5Nom7wdwD0FsABEv7IcaVXaH1nxwdHVXqD+0TjkTngkmpeHYrKaAWtycAJMG0lcBvZKBXs3teD8NYkF694r9U/cnFlD+rTiHMvAMRjrOpgyYAvh0w/j7Vl0GXF+vm63uIxdTXN/xONvg3Npk9qYeKffbOGz6QrHz9PQt/c3Tg6+tbtD8bwV/bnP2KzZhgPMmEDIFeZvDrCBgAsE850HTFxOadlgtJ+PwldGBxIYgOnHQ/Q9bvwOlxi60z99sL9+n02vlwFqHcVwBW4SzsRLGeLJqfSo1MEkrJ0zO4aB8jF7wDwDvtu15m3GoF+AtrVscVQUw9ef27SeeDoQhgWz/Mq2BhSTodV/cUU081l9ReCEJ3AmDgkkwi9hkvWIf7h88A8eXbyvIdMHFiqfxKndW9heIMHO4FH0XZPZEl7zI4d1WJWtBk8Bczifj3vcgnZToXASGIzp17LyOncGT4EjAvtxX+G0yen07HH3NpgHoj0WXEdKX95MFMp2aSI9fr6vYNDF3AwPn2xQ/M5+2zF75b4YJVd8JRJ4OF6URcXR43nCBUB32R4cXM+TuEwmU9PzgRDM5fO3rjttOJZtD5u4QAVgJ0lO318nQi9qPS4mVkBZge8EHhIvvHAC+tYh69fCdSZjtFQAhiO53YegxrzuCS13flcr8BSFkPFR/t4qXrb2Dg5F1MZO+w68qZcG5mLHZRafmeniW7GyFTWexMnR4Y+Hl2885nuKlC8gtgllaB8MFt7dIN3cF1y+zqrEadIFSfc+dF3xiwSKmJ3liQwZOaqVQ9xcCTQZiDicTKx+0Y6fABvKuKlDWY0RW6nSbVcvmHKtxz1OP7kTbaHwEhiPafw4aNIByO/hMF6FMMKPPJg0EwA2weWbp4VRIgHBm6GoxlU2Uc8in09kePI6KbbacN10B69n5Ld/AMPMrZwLzx8RvXFcs1kiDUets7MHQVAR8p9udFzVR+wV1ObKq98hMKvJ7kpmCq9pTTsA9MGm55BIQgWn6K2ltAzwQxMPRDAs7aNlq9Ht4JDeVHYML4FZTahej3BCQ5xyvT6fj/NYkgEB6ILgBoFYCuQp8PWdnAYXaSssuvOfmYRHRqamxkRck4deTz80widrofk1WNr0VVPhvt/UWK9H4QEILwg5aU9Y2AF4J49xFLd3zNpq232C9fnVRRvgWwVWjwCQIalZz2LqQoUpkBAOEx5HgwnY4/bR+ng6ru7MxY7GI/ePT3H/dai2YpC7FDp+oxnZlOjvzETztStnMQEILonLlu1kipv3/xnmxYH2TGKYXFaJujm0bFpFm4wUwnZpIjSuVUt6fRBKEE7esfupAJ50wJTXRpemxEnYzKnNPKLaT0ZTU7f9X8XQC5+pTYwWNwiIAj7f4l1Tj11W1CpKGWR0AIouWnqDUFVOqRTTljrwDxQQC/lZneR8AhAPbX+UBsWzDLczoXzFtX2y54TWY6NpMcUSavdXuaQhDloTe0aiaNB7bjpbZfZz9fgEmObV9wdVphIYhOm/Hqx0s9keiBxPT5vLMZY9eqmtIsSJoF0NcFtVc5mkEQmnsFrZqpPIYTO3pfC0F4nWEpV28EhCDqjeh22N4h8xbvGzCti4lwvM9QG+snrSnxhooniPKAem1LEGqcZZZJGjVTqc9HJZNTIYjt8EfVJkMSgmiTiZopMcPzFr8XFt8E8JtcZFj3qiPWnwmUsJhGsyHjfuUk5uWSWqNjb1sVk8KoLPRGyeWz5tL5ZbJ4QSoVT+swbtYJa6a+Mem3dREQgmjduZlxyQp68lsBGiwR5mkGrgOMO7NBemjt6I0qnLc2QqgXgtieLqkVToODg8Gtud1uABAt4GYy86JMMn6b+ndPf3TAoHyI8B0n31c26e3rix4Mg+5h4PXb2qv/Hc2Mf3AiQMshIATRclPSOgJpnLNUwLyfIsdnl8YJcpLaC0HozC+rMXPtHRj6GQFHAPxXBj0AGL/IJFaMF2Vrxh1EsS+N496U30Jvmc8HKnqnKy9qCpmrCXhbsX0mfCkzFvtO63wtIsn2iIAQxPY4q3UYk2YXrI4IGWsie4zXBEBau3sHq5m+/qGfMuFj20Tn27uDO0ZHR6/e4mU4Wl+BkrhPzSSIstAbBTXTRHdwa0n4kmfZoPmZ1SMq4ZH20QUDdDt16BoqEM1dBOxCwJ8s4I+GxT9PpeIPeMFYynQeAkIQnTfnnkasXXB9RChVnWjCYKsr62vSY7HTSoXQnFYqRn4tq19uXlq28DaTINTlfEmgw7wZKzNvma5eQrw7uP7kSiHQ1VjLQ3LgZYONY5LJFWOeJjQfrmPoVGZcbTM08BQvymv7Um77Q0AIYvub07qMSBeoT+VzSCVjKsOb66OSDHXv8NK1zDw8rbADQWiC3akTi8dgfYPBiexul5WcQMpCZjeZIMpCbyinNIWFzZHOKbRGGb79/Se+w6LA3dOc3IB7ts7iqFPWOXsjDqHU/9OcyB7h9UToOulSYLtDQAhiu5vS+gxIG/4CeBRB48jStJ6lPU6mw6Tvg/PpQou5qvPFmHFdJhn7sO5SWxvum/gr6bH4v1eIOUS9/dEhIvq5LTXpBDMtKfXE1hCEaywiL/kgnBAvJVmVVY+YLRC9N1/HIbSGQ3sUjgwpTFXuC9tD13YHrU+Njsb/4SSHmg8y6JLpBAqTGcszyZgtD0V9vh1pZftBQAhi+5nLuo9Es2Crdfr3FmHZ+Fj84dIOC6lJPwTmbwL0dq1AjPu6Q7xQt6D19Z2wm2WE7ibgPba6JgM/sSayXy3d6SqntC05+hcCzvOSMMjpVMQWn+V06V4LQagx6DEsjK5CGA4ddg6ngPycMALLM4kVa0qJtCcSPYiYVJTZqTDfk9zk/fRR9w9LGmwbBIQg2maqmi9oOBx9CwL5HAf7lfQ+Lb1l/pTA1vtAeVLYFndJRVZVpw7gzbYIp78zeMuxyeRtG3UjqpByNMfA/xDo95MRW/ldYBxc0p9q8mGyckfrcl/rL3vzUmwE6PnJhdO6IJWIX1uUrWaCKL8bKTZdMZCf42nAOSWrEv4FMFbnx6LSv1o4FIQDytuix2FYx6dXx/+7+V+V9NhOCAhBtNNszYCsvf3Di4hYXWy+1mf3G5noXJVD2mC+y2bD/4QVzM0bH735KcdF0Ltz3rQmVA4IGHR8JYug3v6hM4lwqZNHeGnwuloJQp/MSIntLeOclkSrxGeyLUUOdJI977bPeZXiHYSAEEQHTXa1Qw1HooeCcbmj2mh6w2qnf61pGOffv3rFkxq1zjSnMSeZVF7rQDD7DSZ8smLwv8kGcsS4zMyFzhsfv/7lSuN0SL9przLNqqhWglANayyQas7mpvChUPbrhPw9j/3U5jR8zxhV+51Ive0PASGI7W9OGzIidb8Qmr1hAQEqr3EfgD1su/B1BDzIzCNmNndryV1BWV5rIhrZummnpW6pRNVAVKrMYFfoFAYPg+j/2YIEbgbzIwB+mQsErlRk5HXg0Wg08PRzdDwznw3Qu6afjqYHzasHQfTOGz6QLFaquj0LMlYMreF1HKqc8nbv3krHgvFhBg4qnRcV/gSMn2nmxU83UrZDERCC6NCJl2ELAoKAIOCGgBCEG0LyXhAQBASBDkVACKJDJ16GLQgIAoKAGwJCEG4IyXtBQBAQBDoUASGIDp14GbYgIAgIAm4ICEG4ISTvBQFBQBDoUASEIDp04mXYgoAgIAi4ISAE4YaQvBcEBAFBoEMREILo0ImXYQsCgoAg4IaAEIQbQvJeEBAEBIEORUAIokMnXoYtCAgCgoAbAkIQbgjJe0FAEBAEOhQBIYgOnXgZtiAgCAgCbggIQbghJO8FAUFAEOhQBIQgOnTiZdiCgCAgCLghIAThhpC8FwQEAUGgQxEQgujQiZdhCwKCgCDghoAQhBtC8l4QEAQEgQ5FQAiiQydehi0ICAKCgBsCQhBuCMl7QUAQEAQ6FAEhiA6deBm2ICAICAJuCAhBuCEk7wUBQUAQ6FAEhCA6dOJl2IKAICAIuCEgBOGGkLwXBAQBQaBDEfj/6ItS8xKWbU8AAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-30"><g><path d="M 371 701 L 371 726 L 371 744.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 749.88 L 367.5 742.88 L 371 744.63 L 374.5 742.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-65"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 715px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="709" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-36"><g><path d="M 321 651 L 257.37 651" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 252.12 651 L 259.12 647.5 L 257.37 651 L 259.12 654.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-45"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 662px; margin-left: 302px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="293" y="656" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-17"><g><path d="M 371 601 L 421 651 L 371 701 L 321 651 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">GPIO<br />IRQ shared<br />with SCI</div></div></div></foreignObject><image x="322" y="630" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnXmcXFWZ93/Prep0B5SAIGGLiPpx3HUmQO+x2QMCgYTqJEAgyogoTFhGZARUVMBxUBl4WQQHJWzp7jJsRnax7a6tw2RkHERkQFCWLOokgNBb1X1eTnVV5/atc6vuvXW7uqry3L8gfZbnfJ9b53fP9hyCPEJACAgBISAENARIqAgBISAEhIAQ0BEQgZD3QggIASEgBLQERCDkxRACQkAICAERCHkHhIAQEAJCwD0BGUG4ZyUphYAQEAI7FAERiB3K3dJYISAEhIB7AiIQ7llJSiEgBITADkVABGKHcrc0VggIASHgnoAIhHtWktIlgfmHR+Y0jNACAj4DcAeI9gJjd0v2YQCbADwJpofTIXrgiYGel1wWPyVZZ+fivdMcHgTwfj/5AWQmbOFtAMUY+HlTmH/Z3x/9m9vymjsi8w3QowB2y+dh4PhUrO9nbsvQpZs//8yGpqbX5jMhwuBPg/DeKRwJfwXzJsB4jMHrmsI7xfr7bx0pp07JKwSsBEQg5H0IhEAkEgm9shmfZpO+BaAZQNhbwfwMMy4bH9nt7g0bbh53mzcAgdBVNczAj3i84WtDQ3e+XsqWoAVCtWmcw/9MwBkAdi1V/1ThpbsyhnnF+oHoCx7ySVIhoCUgAiEvRtkEmjsjHyGmWwhoKbsw8DNkGqclEr1PuClrmgQiVzX9AQadlBzo+XUxW4ISiNbWyGyEcQ6YvgbgnW7a75AmzcANTWG+xMtIqIz6JGudEhCBqFPHVqJZ2VHDRjqbgX8FMLtInbkpJeJsGuI5tikne9Y3ADojGev9KYCJPA7P9ApEttKnyUwfk0jc/UcnG4IQiIO6Ins1pOk2Bo4o0tw0gFcBSgOsfrt7leD+FBvUnRro/V0l3gepo/4IiEDUn08r0iI1Pz5rp21XgnE+gJCt0jQTHiHm/9cYRkz3FdvVtbJpfHzkIJPMcwAs1kxJDRNhRWKwb61ngSCsTg72rXQJguZ3Ld99lskfhGleCOA4e3uIqHf0rTkrnKa+yhWItrbF+5tG+D4CPqmx+RUQX2OYoTvj8Z6NdsGc37V8j4ZM+lhiXATQhzT5XzXYWBSP9/ynSx6STAhMEhCBkJfBDwFq7Yx8GUzfsXWmGWbca4b4Qi9z4ActWDYvbJrXvr1ofYLNmKfMcProof67X/Y0gvAmENaiqaU90k1EP7F9mQ+bzIuG4lG1EF3wlCMQakG/cYSimpHDViKsGn1r1143azJqNPfyRjX6oFsA7GMzsuQoyM9LIHnqn4AIRP37OPAWtrcv6zTJXAdgF0vhwwT8y7578/XRaFTtDPL0ZEcks1+7DuAzrRkZuDwV6/u601STdorJv0CoqrXix8BPUrE+tWhcMOXlVyCc2gzwEIdDy1L9PS96gghATVWF01gDUNcU5QMeHW3iyIbHoq95LVPS77gERCB2XN/7anlXV+Qdo+N0HwiHWgrIgPirycHo90qtGRSr1OFr+oWMwYc5jUimQSDQ3Lx8rtGQeQzAx7bby78ZC4cP29C/5i/2NvgViJb2yPFE1DN1tML96TCWP9EfVduAfT0OHDMM/koqFv2Br0Il0w5JQARih3S7/0a3dS5dxsx3WKeW1Nf1+PCuX3AzFVKq5ub2yBEG0X3WTpOZTk3Fe+/U5Z0OgVD1tHR0X0PAKkudW03wEUOx6IYgBEJ14rNG8ABAbZbytsAwFpbaNVWKofp7e/uSj5oUesQ23VRUbN2UK2l2LAIiEDuWv8tqbXYbZghrATraUtCrBmeOjMfX/raswnOZdXUw0DNvbz5VN3U1bQLR2f0VYnzX0qY3TeaFQ/FoLAiBaO2ILASyQjgrXx4TLkoN9v1bEByzIlfYBvXP5yRjfdcHVYeUU98ERCDq27+Btq6tLdLKBj00Ze2B6LrkYK/60i66HdWLIW2d3Z9n5nPBdCeH6N55c81nndY1KigQqoHa09Fep5i6urrCo+m5qwE+2cIl8K/73OL/wwA+vL0eTow14RhZi/DyRu64aUUgdlzfe255W3v3FUy42M1XtefCfWaooEAENsV08ILIASGTfgHggO3Nprsaw5tP7+/vV2cdAns0U2Wvk8kLE4loMrBKpKC6JSACUbeuDbZh+sVpHgqh4ehY7K6twdbmvrRpEghq6ehWJ8M/Ozn9A7wURqYrFlv7B7t1XkcQLR3dhxPwc+v0EpjOSsZ7b3LfcncpK1mXO4skVS0REIGoJW/NoK3NXYv3M9LhAetXb7Gtn5UydToEQvuFz3i8sYEX6Q79eRaIzu7ziWHdTeS4vlEux46OJe9LI9RPwLzJssrbBlyuSZK/hgiIQNSQs2bS1La2pQexwWpXzGTwuKAXVf20bxoEglo7u38AxnlWewj4ZiLWd5nORq8C0dbe/SMm/KOb0YkfJtY87e3Hv9OkJhVV9tOT/15E7MqtT/LXFwERiPry57S1pqWj+zgC7rdWEERI63INDlIgsjuoDPwLiC6xnRAvulPLi0BMLFDveReAiKXtz4cp3Tk4eLcKpRHoo0KajKbfjAJ07PaCnc90BFq5FFbzBEQgat6FlWlAS/vSU4iy5x8sH6Ll33lQrvXlCoRaWxkbM94Lg09g4CwA+xbYxPzVZDyqtrxqd2p5EwjNQcNp/qJv7ey+FYzTKyFI5fpT8lcXARGI6vJH1Vqj21PvdQSh60jdNtiprumO5kouQlSIQLj1oqSrNQIiELXmsRmyt7V96RdA/MNyRhA1KBAPIcOnJJPR/yuGXQRihl5KqXbaCYhATDvi+qggiDWIGhKIVwBcuN/e3Ocm8KAXgfjEkSt2fsdbo/fYorf+yuCR4+Lx+98I+m3RrXkw8CyPhxYMDa3ZHHR9Ul59ERCBqC9/TltrdPvpmXBBarDvareVVqlAZC8zIuA5Jn6UyVhX7OS2rq1eBELlr+SagOxicvt2SjodAREIeS9cEWhZsPTDZLI6/bt3PgMD16Zifee6KsBDIi8dbrmL1B7MckzqxV5ViH09h4C/wOQjEonok0HYYy1De36FcUcq3ndakOFRgrZbyqsOAiIQ1eGHqreio+Pk3TIYfxCg5kljp2n3jZcOtyYFQnOSuljE2nJejiBGfuXUL3lrm4AIRG37r6LWa+L6bCGTDkskep8K0pB6F4jW1si+CFM/GB/Yzm16YjFVY/ysIN8VKWt6CYhATC/fuipdF6K62Aljv42vd4FwiOb6J2T4sGQy+pxfbvZ8DhcfSTTXoADvAOWIQOwATg6qibpLbtSOGISNo/xcj+lkV70LhGq39j6IEterevVjS3v3WUS4bsrlTgHfOeHVJklfWwREIGrLXzNura7TAejmseE55wRxo5xq4A4hEPrLl7YyjKNTsZ6hch3d2hr5AELZkOLvsZQV+J0T5dop+aubgAhEdfun6qzLjSLuBajLYlwgd1Lny9sRBCIrhJrrVQE8TWb6mETi7j/6db7DndSohuCKftsk+WaGgAjEzHCv6VpbOyMHg0lFCN3T0pC0Wo8YHd71u+WMJNQtaKGMeTURTrDde629za0WdzHlmam1iLH03GsZ/EXrC5GdtjPohNRA7++8vigHdUX2CqexxibgcBMyxGtdkr7+CYhA1L+Pp6WFbR2RFQxSF9zMtnVuKcA4LxXrWe9ln33uesyLAHzOXqYq31Mspgrfd+BlxGN3htPXPoCtYD5nv33Q6+Y0NwBqWxA5hE3jRwC/z1bPFhAflxyMKp/IIwRcExCBcI1KEtoIUGtH95cAXKXr0AF+Bmz8uwH6WTzeo8JYT4mEGolEQi9uDu0TzmSOBKkoqvT3thDb+erSDNzQFOZLdJf11PIIYnJKLXsZU+inU86YbIf9PJiucuI4v2v5HrPS5iGA+RWADtS8pVtgGCcmB3oS8gYLAa8ERCC8EpP0VgLU2rH0MwD/GMC7i6BR9yy/ClDuvmXeGcDc0ij5GTJwdmIg+kun0Ug9CITi0NoaeRdC2RHZSR44KubvLJL+KTao289UVWnfSIodgYAIxI7g5WluY3PzKbuEwuOXM0HNpYcDqO55Zlw8bx9eW2p6pV4EQjFTo6qXN1I3gGtKCG4pxGli3JhJN1w6NHTn66USy9+FgBMBEQh5NwIjkF1gNs1zCTjDejWpywrSTFBXml45by9OlRKGfJn1JBCTU05KcBvGVzHwzx45qsCDPw5T+orpuJ3OpR8lWR0REIGoI2dWS1PUl/BLm40PUsZcRkStDPwdgH0so4uMiqAK8B8JFDOBR5vCO8X6+28d8dqGehSIPAPF8U8b8akQ02ImHAVgvylTc4S/gvEigX5FjOjIyJwN5ewg88pe0tc/ARGI+vextFAICAEh4IuACIQvbJJJCAgBIVD/BEQg6t/H0kIhIASEgC8CIhC+sEkmISAEhED9ExCBqH8fSwuFgBAQAr4IiED4wiaZhIAQEAL1T0AEov59LC0UAkJACPgiIALhC5tkEgJCQAjUPwERiPr3sbRQCAgBIeCLgAiEL2ySSQgIASFQ/wREIOrfx9JCISAEhIAvAiIQvrBJJiEgBIRA/RMQgah/H0sLhYAQEAK+CIhA+MImmYSAEBAC9U9ABKL+fSwtFAJCQAj4IiAC4QubZBICQkAI1D8BEYj697G0UAgIASHgi4AIhC9skkkICAEhUP8ERCDq38fSQiEgBISALwIiEL6wSSYhIASEQP0TEIGofx9LC4WAEBACvgiIQPjA1twRmW+AHgWwWz47A8enYn0/cyqutbP7VjBO91GdPctmgP8CGCkAD481mY9seCz6WgDlWouglq5l+yOdWWyAFjLwEQB7AQjlEg0D/DIzDRHR/dNkQ8BN8lZcV1fkHaPjdB8Ih7r1sbcaJPV0EujsXLx3msODAN6fq2erCT5iKBbdMJ311lvZIhA+PDrDAmG3OE2MGzPphkuHhu583UdzJrO0tkZmI0TLAXwLwL4eykoz4RGw8a1UrGc9APaQtyqTikBUpVtcGyUC4RpV0YQiED44VplAZFvAwH8bZnpRInH3H300iVo7ln4G4B96FAZ7VRlm3JsJGec/MdDzkg87qiaLCETVuMKXISIQvrAVZBKB8MExIIEYBrAJIPdf28RzwNjdyWQCHh1t4oiXKSc1aqCQcTmDz7VMIRV0/DlbRyf+wDsDmFsE3VZmXpmKR9WUm/v2+fDFdGURgZguspUpVwQiGM4iED44BiIQjMcbG3hRf3/0b15M6Opa2TSWGT6K2bwUoAPteQn4ZiLWd5mbMucfHpkza4RuBtCtSb+NgVsMk27cd1/zxWg0mrGmUXaMpt/sYqJ/IsaRAMK2MpQAXpiM9d1QiyIhAuHmDareNCIQwfhGBMIHx5kUiLy58+ef2dA4e9slDFxq+/J/IWPwYesHoi8Ua5rKP2v2a9cBfKYtXZqBy5vC/H234tXcGfkIMd1CQItdJIiwIjHYt9YH5hnNIgIxo/jLrlwEomyE2QJEIHxwrAaBUGZnRWKn125n5qWWZmSI6NTEYG9Psaa1tHefRYTrbOKyhYFTUrG+x7xiKTJV9arBmSPj8bW/9VrmTKYXgZhJ+uXXLQJRPkMRCJ8Mq0UglPnN7ZEOg+ghAGpdIPswcG0q1qfWFLRPa2vkAwjRLwC8x5JgC4iPSw5G1S4kX4/TqISIekffmrNiw4abx30VPAOZRCBmAHqAVYpABANTRhA+OFaTQGh+CGpcuDo52LfSoWnU2tn9AzDOmzrqwMrEYN8dPnBMyaLWNRpHKMrAEZY/DJvMi4biUXV2pCYeEYiacJOjkSIQwfhPBMIHx1oWiIMXRA4ImdnRwwGTTWe+DyaWJ5NRtbBc9tPevqzTJHMdgF22F0Z3NYY3n97f358uuwJNAVlhGqXjwDiNgU9YdlmpxfVNBDzNzL2Z8fS969ff89dSNpQSiObmU3YxwmMRED4H0McBvDNbJuGvYF5PhB9zGj/3y7S5eflcI5w+FkSfUbOJAPaxbQRQByafZ2BtxghF3W4rbuns/goxvptvf/6Ap2oPNYxfTMBnAewJYBjMzzDhDnMsvboYs0gkEnppY+hAInMlOPthsL/F1s0E/AaE22aF+F6361o6/2SnVBtfO4QNVh8/6gDjxE66LHM8wjCuyZ/DEYEo9Ya7+7sIhDtOU1JVk0B0dCx5XxqhfgLmTRpZZATR2r70C6DseYf8MwbwomQsqqapAnkc1ka2kEmHJRK9TwVSSa6QbEeeoW+D8QUAs12U7epgoZNAjA/v+tCspm3ngvD1SVFwrvTPAH0uGev9ududXGrB32D6AYDDi2w7ttfo+vyJTiDIMF6GyT8F+H36pvC6xvDOkf7+W0dsf6e2BZFD2MT1AH3IBfs3QPxtpHGdR+F0W49af/spp80vhcOZRjlJ7cIjJZKIQPhgWE0C0dbZfSozbrV2Jky4IDXYd7W9abmtqVGAjt3+N/7NWDh82Ib+NX/xgcIxS1vn0mXMrKas8uE5AKazkvHem4Kqp7U18i6EqDfXmXoqloFnYdAJqYHe3+ky6gSCwKfxBDvdtmCn+tMMvigViyp/FDsTQq0d3V96ey3pKpdCp6vvVYONRfF4z386GWMXCALOZWTFVYVT0T3aTQ8uz884FMlDZjhz0lD/3S+Xclput95FDHxDs5XaKftjBmfOMyl0n4TaKEW4+N9FIHzwqxaBaGtbvD8b4QdsP27HmDPNXYv3M9LhAev0EjH+IxHv+7wPDEWz6EY2zLgjFe87ze3XdLEKimzTzU6NgIz/ApABcSOAdnB2Sm27WGUL5/6xJpygO1ioEwgAGwHsbbFLbQn+LYGyHTKDDyTgo5qObCvDODoV6xlyalNbZ/cSZtyuEYc3oMQM9GS2PaoNbP4DKPvFXjhiKnG+xi4QuemZ/OFLNcUYB+gPudFEO4DnzfHQ4UNDazbnbS/CPjvdQ4z/YpDaZh0qwuRpMtPHlDj5T62dkS+D6Tua0ZTi8hhAfwb43SAssB0iVTvxPmyJDCCxmHz8yEUgfECrBoFo6Vr2XkpnegBqntoE57l+3Y6noL/q87Z84sgVO7/jrdF7pi5W81AIDUfHYndt9YF9SpaWjmVdBFOJY76TzDDwnaYwf1c3z63WXsIm3WRbPHfcEuwgEHkbHKepDlqwbF7YNK8FcIJbv+TyPJzr0PLZthJh1ehbu/bqdn/lvuA/y+B/tU11FZ0yLBCISSN5iMOhZan+nhfz/6QYDGfwnqHB6NOWtjh02vyMyVj1nn3wuP1Q5cEHn7i7MavhWzQxUrGK9MONYT7JaV1Cv5aFN8B8/tjIbrdZuah1kD+9ikMNMn7oMFUmAuHjRycC4QPaTAhE9qtt1tY9OUT/8PYX5ckELNJ8QRb9KmtpX3oKUXbaJ/+MMfAZP+ce3GBra+/+ERP+MZ+WgZfCyHTFYmv/4CZ/sTQtHd3XELBqMg3RdcnBXvX/jtM4EyfHcS9AXdvL1s+vFxGI4VKH/7Q7uQjPIc1dyWT0FXu7Wju6zwayZ1Lyz+sGG8fG4z0qGmnRp21B5FA26R7rhgBiXJmI912iy6gTiOx0W9g4yioOTpXq3v3sl3yGlyaT0f8rYiy1dEZOJyZ1sn5S1In0u+eygSMNrAGRes/zzxYYxonJgZ6EUz0Oo2qVXASi1Muk+bsIhA9ogQiEj3pLZPE8/zzdPxpNZxTIj1Q3OikVbj3PrrUjshAgNTc9S/2bk2g5CgTzV5PxqNoFVDTGlL0eANvIpCMTid4nrH7Urgu5ELt8GV1dXeHR9J53AYhMCnGRqTydQHgIz0KtnUuvBfM5lja4mSrKJ6eWjm41klCn/3MPJ8aacIx9mq+tLdLKRvZ8T34nXMZJTOy/i5aOZc0E80FrOP7pfteD/zlXR4kiED78UGUCkQF4kMOhz5b6Amxr776CCRdbmhxIh+2EcLoEQrvDiOnUVLz3zlLunFiHCT0KkFqbeJJBTzeQec3gYPTPUzvuwvsgALgKY6LK0a336EQsF2L9cwx0APgHAuaYzCcNxaOxUm3J/72Ac5F1CI1P3jSZF7qpT7NF2nWnnbdVU4Z2FFv4ruqFxIGRTsim9V1366taSycC4cNjVSIQapFunXXvd6mmaC4tmtYfjaYzCmpKi1rau28jwqmT36EldiWVYmP/u8MIItoY3nKym7McDiK2JBXvvdurLaXSlykQL5jh9AI3O4pa2iPHE5Gyf2Idoci0mZPNuhGPfUpMu4OsyLSZri7Netu0vuulfFSrfxeB8OG5gASiSLhvVn5RN7jZd6mMg+gmyox/b999Qy/bFwNLNaVg3n6a52U1AqGdZillt+7v2m20wDAD9xF4dWN4537Nvn3XVWk7eMIlqcG+K90U4pD/otRg37+5ye8lTVkC4SGqsP2rXoWX/9tOjSf+5pHb3/Ro78XEuGJ7nqnrQJodcJ4/LNRBQ2rIDBDwwVw9IhBenJRLKwLhA1ogAlHih5k9nbqJWsD4niZK6r3pMH/xif7oJi/mT9eUj5MN01lfbmfMOg2bvDkTW1AZvRyie+fNNZ/1IqjldvDl5i/l1/ldy/doSGdaCVA3AKqzGRMnudXjbYrJ1ahI9+UP8P8Cxi9L2Vr4d/4UgIMtAjFld1tb29KD2OBHAOw6MVDBX2DyEYlEVG31dfVo1qlEIFyRm5pIBMIHtEoIRN6s7O6lnbZdCcb5Uw7D+ZhSqaddTIpPe/uyA00y1YKzCkNR6slOyZFJt46OzvllqcCBpUJtlKosCIHI3v0x9vpc0wh/wgDNY+KDwdmO9b1FD9N5EYjicbsmm1li228pHKX+/nyY0p2Dg3ercyZo6eg+joD7LZmm/L1UYfm/26ZURSDcgrOkE4HwAa2SAqHMK3IwycsOEvXDO5wAFfYhu4Mn+wR8ujlfrMM5iMBPbaszBKGMeTVR9tyB7SCco3PV9N6Pw5S+It8p2VPOlEBk22Oa5xKgQrjv5+P19DaCEIHwhXhHySQC4cPTlRYIZaLTlIqXa0a1p5uBn6RifWeU2rbpFZNuFw8AV9MZXutS6S0d64pcsDk3xbxKBq9IDEQfn2mBUMHyQuHxy5nwRQ8hJZTZamSkDh5uD90uI4isO2UE4eYnUDyNCIQPhjMhEMpMh/3dKpzEV5OD0e+V6uT1sZjwlD2Ugg8kBVkKdrxM42jFVjm1dC3bn8b5KBh8OhhqvrtYED91+GphcqDn19ZyKjmCyG29/WnhqXh7y7JRS9VJ5xRAvzKY4vF4z8aWzu4LrRFaPa1BlDGCYML0LLrLFFMQP8FAyhCB8IFxpgRCrde1tkcuAmVj01ifkrF+8ok10Vxd3UDnBdPEgubc1QCfbMk3LdFcS9mlpueamv76wQyFVhBjKSg7fz/10RxMq5RAKPsaZm+7KRdm22rXNhB62KT7G4zxJ/faK7TFaZG9rF1MLgVCN2U4XXG8NL8vz+sHmkV1z2WUerd2hL+LQPjw8gwKhONU09sj6qJxbfLN1N8HgccbG3iRm1j9bW1LP8aG+ROG8XDGoJt09xDoY+jwg8hgiccwzz68UzSLuizpSDBus05DqVATPB5aYA1IVymB0JwYVg14CBk+pUToismGVkIgVGX20CnFRirlOE4zPZlh5sWpeNS6cF20io6Ok3fLYPxBy6hMBMKHU0QgfECbSYFQ5ja3R44wKBsuwjp1ou4EOCcV77Pe9aBrnSbcQTbqqatpKlvcoDQz7guBL4nHo79XlVXiRrmWBUs/TGweC6ZWVSUDiabwlhVuDrApG1s6IyuJ6ScWOAWdR8UEovB0+5+Q4cOSyehzLl/NgkOD0zHFpGzRjD79jArzp5xVaJCXVZRaZvPRpoY/r837zyGUStFrdO2sghiFuORf18lEIHy4d6YFQg2fx9Jzr2WwWtCcfNwGXVORYJE2H7YcIlJlbCXiFYnBqNrlpH1ywe4eAKjNlmAYxN9oDOHG0bTxfYDPtP496Dupyz3R66bzqIRAOJwtcLqcR+uT7M1zDRkV2vpjlhfBcUSoCfdd7HraKXVOjB5Z3UaobpzLPgxcnor1qcuTisamyqfX3Yeuu0Ndc4bmd2nDOMrtzXltHd2X5e6QyFctIwgffZ0IhA9oMy0Q2a+51sgHEMpeHbp990r2UBHdOCu8eVWpr+mW9shSIlJTLdu3vAJbmXllKh79mdMPXu0WajD5PAarOyS2H86a4Lgt92/W7aZev4hLekSzG8tTTCDNAnpBuInKCMTKptH0m1MucPJ4Olk3GpyWba65d04fYZX4uORgdH0pxzl82AwzjGNSsZ5+a/5yxMjhtyECUcpBmr+LQPiAVg0Cocxuae8+iygbJtraIbsKFV3kbIWrKzldbsssGRrbB344LYLDRUelD/lduP22EgKh2l4wr69EusTlQiqfOmn/8iZcACYV+iNs4/grg0eOi8fvV1tgpzzljCBUQbrpzVK38+UMoJb2SDdRdmpv+9So833oar3oB2CcZ2nAMBN/KTUYXe30AVPklkERCB8/NhEIH9CqRSAcOruiN6VZm1v0ZjBgGwO3GCbduO++5ov2HTS5LbPqXoXLHLZnjgFYmYz19bidfvDiCofLZP7MTOePj8zp052UnrjvGasBOtDa6ZjMi4bi0Uet9VdMIHRXs4L+QIb5+cRAVIWxmDJ1UyIES74JjiePyxUIp+nN7FkM5gth4i77RoQiHxNbiom6w90OTh8w1Lpg2adgmmpUvH26bbtTRSC8/MDyqu4jzw6fpVoEwumLTl1N6XLBOntKu3H2tlJ3/qqrLjcBNJp1PvEc2/WODu8EP0MGztZ1dAG8RLovzHyxbwD8P4Dx1MQ/8LvVxzqAuYX10s1jw3POsQtKpQSiREwpdc1nYuJaTfWYHwPo4wVTeyqqKmfXBfJ3JzhGaC1XIJQVDhsR8miHQXgSTP+Ts/mTAM3XjHLUaOCfU4PRG4u9C62dkYPBpKY8J9c9cukn62HwLjQRLr3YyXMRCB/be3tsAAAUgElEQVQ/OhlB+IBWTQJR5IvOy9w/tS2IHMImrgeydx0H+SixujcTMs53u8DotvJsJ542bgBYnZ72+qgrSm9rCvMq3fbeSgmEMtpjTClrO9Ngupob6AbbpgPHqLlBCEReJGaN0M0Aur2CB6BCnVyYjPWp2+VKLm7nQsSouz7sIuFYNRMeMAC1y+74XCIRCB+OEoHwAa2aBEKZX+6CdR5B9tBW09bFRNlpIy9CkQY4xgj9i2FmRtmg2zXD/Gz8o7RhfDdIocjOxW+ks97+Qr08H/3ThUvVtZ8X7rc39zkdPqukQCh7D+5Y+kEDvLpIdFprs9SI7jGT+AJ1X7TuhLzTKeegBEIZk2OvBOIqAPu64K7UIMXEZ9juuS6Z1UPMraxowjS/gTDdCMbpIhAl8TomEIHwwa7aBEI1wWHBelg3v+6iydlwFUhnFhughQx8JHc/RX4x/A0GNoKxnojuH2syH7FeGZm9JS2Mc8D0tcKdTnRXY3jz6aV2WbmwcUqS7FRZ42uHgLCcyWwHSE035BdDhwF+mdiIZ2Dekh7Zbagao7nm1xcMk/6RiTsB7D85NUMqzAb/noG1GSMUtYus/V5r1RGbY+PHrl9/z1+toIIUCOuHRbhpa3MIxhka9mq6708MWmeCfrw+1vu/bkYNDv6n5s7Ihw3QP4FxhIWP+kB5jmGstR7elFhMXn9FhelFIMpnKCU4EMhuiWW+ipmX5Do6VzusBKgQEALVQUAEojr8UNdW5COtGkT7cdr87AyH26hr1tI4IRAkARGIIGlKWUJACAiBOiIgAlFHzpSmCAEhIASCJCACESRNKUsICAEhUEcERCDqyJnSFCEgBIRAkAREIIKkKWUJASEgBOqIgAhEHTlTmiIEhIAQCJKACESQNKUsISAEhEAdERCBqCNnSlOEgBAQAkESEIEIkqaUJQSEgBCoIwIiEHXkTGmKEBACQiBIAiIQQdKUsoSAEBACdURABKKOnClNEQJCQAgESUAEIkiaUpYQEAJCoI4IiEDUkTOlKUJACAiBIAmIQARJU8oSAkJACNQRARGIOnKmNEUICAEhECQBEYggaUpZQkAICIE6IiACUUfOlKYIASEgBIIkIAIRJE0pSwgIASFQRwREIOrImdIUISAEhECQBEQggqQpZQkBISAE6oiACEQdOVOaIgSEgBAIkoAIRJA0pSwhIASEQB0REIGoI2fORFNaOru/QozvTtZNWJ0c7FsZlC3TXX5QdurKOWjBsnnhDB9DxEsY+AiAvQCEcmnTAF4l4PdM/CiTsW7eXPPZaDSamQ6bbLb8HYB9AIRzdQ0D/DIzDRHR/WNN5iMbHou+5seOAn8Bz4cp3Tk4ePdGP+VJnpklIAIxs/xrvna/HXi2w2LzAsrw6kQi+qQTCL/lzyBYalvQ3ckmfx+gAz3a8QqAryPDa5LJ6LDHvAXJ588/s6GhaetiIlwG0Ic8lJdm4PaMYXzjiYGelzzkgwiEF1rVn1YEovp9VNUWeu3Am5tP2cWYNXYemL4MIG2CjxiKRTfUg0C0tkbeRWHjBmY+yTJS8OO/p0wKnTI0uOY3fjIDoLYFkUPYxPUehcFe3RtMdMm8vcwb3I5sRCB8eqxKs4lAVKljasUsLwLR1nbinmw0/BIT0y3q2VovAnFQV2SvcBprAOoKyHdbYBgnJgd6El7KU6OGxtnbLmLgG5YpJHsRahprE0Cj2T8QzwFjd4d6MiD8CGm+wM2oRgTCi7eqP60IRPX7qKot9CIQnZ2L905zeBDA++tJIFSnPGv2a9cBfKbNWWkmPALGDynD/zU2ttuWDRtuHldpIpFIaNOmzJ7jHD4QhLOIcaS9Q2fgWYSNo1L9PS+6eQmK2KGyvwLG9dxgrEn19/wRAFvL7OqKvGNk3DiSiL8J4GOF9dHNY8Nzzsnb73rEJ2sQblxXtWlEIKrWNfVnWL0KRHN75AiD6D4As/NeU507UyjidpqopWvZeymd6QGo2ep5YvzHrIYtX+zv71eL2sUeau2MfBlM37FNbw0T8E3O8LVuRgBKuF7ZSGcz8K/W9gDIMOOcVLzvh8WMkBFEff1uRSDqy59V3Zp6FIiurq7waHruaoBPtsD/EzJ8WDIZfc6LQzo7I+9OM35mE4ktZNJhiUTvU0U75vbI8UTUY+vUtzBwSirW95gXO1Tats7uJcy43VZeyXaJQHglXd3pRSCq2z91ZV09CkRz8/K51JAZIOCDFmedk4z1Xe/HedrRCOGi1GDfvzmVp9Z2TKPhEQI+aUnzOhGfnBiM/tyPHWploqWj+1sEXDolP+Hfk4N9F9inqPJpRCB80q7SbCIQVeqYejSrLgWiIzLfAD0KYLecz940mRcOxaMxPz6cf3hkzqwRPKA2Ik1OVzHuSMX7TnPqlFs7us8GcN2U+pi/moxH1fmUKWsNXmzKbkU2zYcBfNiS74WMwYetH4i+oCtLBMIL4epPKwJR/T7KWtjcHukwiB4CsHPO5I1s0GGpgd7flWpCa/vSL4DYOndccvdQvkx7XgZ+kor1nZHveEotUjcXdqBFzWXg+FSs72eOX6SWg3hqvvyljXQIAZ8DcCiAubl8w2B+BgbWhJG5YzoPaWnat41MOjKR6H2ilF+c/t7S0X0NAZ/P7TRiYgzMapj9xf7+W0fseXSCAuApczx0+NDQms1+bcjna+voviy3Iyr/TxkiOjUx2KumswoeEYhyiVdXfhGI6vKHozXzu5bvMSud/gVAn9j+ZUmnpuK9d5ZogpoquIWAz1rTMZfOOzG/vuddatNNLm9B5zBTAtHaGfm02h3kYp9/GkxXwzS/4WaR1uvr0NYW+RQMepSBPSx5fU8xea2/paP7cALUNNKsfF5iXJmI913itSxd+ra2pR8zDV5LwH8T4X4yjcfj8R51Klo7MhGBCIJ69ZQhAlE9vihlSUFHb/+a1xWgExaVTu2OScT71Feq49PaGtkXYeoH4wMqEQMvhZHpisXW/iGfqeICAdwGYP3bI6mrbAuoRfkx4YHxRj7ZbwgJp8I7Ok7eLYPxB20Ly0+TmT4mkbhbbSed1qetvfsKJlxsqaSsKa5yjRWBKJdgdeUXgagufxS1pq1z6TJmvmP7NkYeCqHh6Fjsrq1OGdvalh7EBj8CYNepaUrnbZnYGXO3pb51jeGdI9apjlICMbF9k89TnTkz70SE4wHskrNlDOB1gPGXSdsM+mFyoOfXjgIEqLY22cThZQZiBHqdwAcw0ALgnQVMApiX135lF07DKDl9xmSses8+eNztKWSvr+Injlyx8zveGr2HgSPyebPba8dDC4KYXvJqj0ovAuGHWvXmEYGoXt8UWNbRseR9aYT6CZiX+2PJtQTN+kO+3JJrGLm58FWThjCdlYz33mQ1rJRAWNMGskhtKZCBFBOfMTQYfdpaT2trZDaFaFVu7nzybAKAogusfl8FJYJImw/bdjLli3uFgTtg0Oqgg/FpeCphKhBxv+3yk08Ewg+16s0jAlG9vimwrKtrZdNo+s0oQMcW67Tzf9OsIUwps9g6hGbqRLsff+YEouTJXt3BsQwzL07Fo/cH7faW9qWLifhW7chle2XDIDwJ8D2GaT4wMrL7s6VOJhezU7cBgIFrU7G+c4Nun9vyRCDckqqNdCIQteGnSSs1P8BoY3jLybqTtgV79Jl/DaID8tNNxTqTgs6H8XhjAy/q74/+beZHENw/1oQTSq0nqPYbDRl1SGwydAQTLkkN9l05HW5vWxA5lE1Sh8tUKG03j4qa+luD8XMzRHd4HWFoBaLEmQk3RpWTRgSiHHrVl1cEovp8UtSiwu2u/JuxcPiwDf1rts/j50qwpc0w0VnE5j9ZdkL9yuCR4+Lx+9+wV2rfW6/CNSRifZfZ083ACKLoNkubfdTS3n0bEU6d/PeA76uw81DRaqlh/Nu5barW6S03b9o2Bm5poPT33WzNbenoPo6AKaMhJlyQGuy72k1l05FGBGI6qM5cmSIQM8feV82aqR/HXSu2H+tGk0ILDc6ok7H5bavadQjNVJbbOoAiHXAQaxC6nVTFQNo7LC5x6MyXUzSZDj74xN1DDbNOAmUFWd3FkL8oyE0VamRxQ1OYL7GP2KaM3nQCYTtH4qayINOIQARJc+bLEoGYeR94tsC+eMyaaYWC9YfcFNFIhlYR44pcpdo5+cLFcOdRygyMIBxHPTqQBfY5TJV5doKHDCpS6mgaHcTGUiY+HMB+brKzOnuQ4ZOcYjrpzkDo3gU3dQWVRgQiKJLVUY4IRHX4wZMVbrafas4wZBcvW9qXHk3E6qRy9otWtw5hL7/YWkXFBcJjB18NAmF3bja0dppaiLAEDLXhoJhgOJ6pkDUITz8bSeyDgAiED2gznaXgC5/wHNLclUxG1ZWV2cf2dTk5UijIq+lwbSOUojt/Ki4QHtcQqlEg7O9PdjF9Vvo0MKndR/va/64ORI4P7/oF+44nzbZnVwcgp/P9lRHEdNKtfNkiEJVnXnaNmjWCMQY+Yw3rbF9/yMdtsh+uss/pF6xxaMTH2gARiLLdOVmAOr+BMC4E09dslwe9TiYvTCSiSWtt+lPccg4iOI9ISSIQNfoO2A/AWePvFAiIbZRgC8+QYabjUvHeBxUKTVBAx2202ZFKZ/dXiKGihk4807xIXax8nStrYQRhs1t78Y8uvpLunIucpK7RH3SVmi0CUaWOKWVWsXMKzV2L9zPS4QEA6sxDwTqDfR3C2vkU7vopHtRvRxaICY7mpYCxF8Aqyu4zTtuGS/nT+nd9hFb9yEBzUj7wWEyt7ZFvgnAUCGtLHfCTKSYvnq7+tCIQ1e8jrYX26QXrVJHT+kO+IPvcNQGP/m2nxhPfNRbK2E5qlw7HsQOPIDSh0AuCGfp9vQrDnOgPKuoWqoOM5qq9n6JIvCcRCL8er858IhDV6RdXVjktJtumkAo6ead1CFXp1FhP/CAyWFIsTPYOPYIoDLUdWCiPls7uiy3bkdUwUHuSPbtuEcJagI62vDS/SxvGUU8M9Lzk6kUqkqi1I7IQyN63PRlOHKC7GsObT9ed3heBKJd4deUXgaguf3iyxv7jVdtRm8I7XTRlFODQsdhEJLvIbRDtYY0W62ZP/Y4sEAcviBwQMukX+am8rPOY74OJ5eXcPaFdWyhywK8wym92WvHyVKzv6+XcKJcVHwNrQLTI8mKqCLyLkrGouryq4BGB8PQTrvrEIhBV7yJnA+1nHQAeMoGzCXRPPuKr0xmGwvMQfClA77dcLKTdOWO3ZkcWiImOfO5qgE+2dqDMfFoqHu31+2q1ty/5qEkhFaJ9e0wnTSTdfPnq1LYxq2EdTYQ5zz+vG2wcG4/3DPq0Q7tY7jSSydchAuGTdpVmE4GoUse4MUvzpbkVjKtAUDGT1JSA45SH5rT0f4Jo1/zlQEpsSt01oWzckQVCtb+9fVmnSeY6yx0X6p+3MvPKVDyqDiR6uhNazfk3jlDUeseDmzDlucON6hpQa/ynLTCME5MDPQk375M1TVtn9xJmqMCD1vKGTeZFQ/GouoNb+4hAeCVd3elFIKrbPyWt0+xiUZfJZ3cvAXBcZNZdNmOtzG3Y6DIFouSOGy/l62BN9zZXJdJj6bnXMviLtvrTBPpRiMa/7Sbwnsrb3Bn5iMFYDdCBNl+UnC6aP//MhlmzX7sO4DNtdrwB4m8jjevcTHsVOYsBAt04K7x5lW7tQUYQJX+qNZlABKIm3bbdaN0ulsm/lghLobmuMp+14OCd6y/GIucgdNefqqtP2eRVTp1XtQuE4tLWduKeHGpYB8ZBGk5pED9OoNvJ5IGRkd025k9Eq/MqY2Ovz2UKHQ7CWW8vBv+9Paif2mE22sSRUqHNVb0Oo4+8Sa+AcT03GGtS/T3qKlTryIba25ftbRqZU5xOc7u1Q0YQNd6h2MwXgahxf2YDwY3TfSAcam9KqVGAfR1iMn+J09PWerx04NoLjyYKewOgP6v/IJiXJWJRNbWRfbyUPxMjiHydbW2L9zeN8H0EfDKoV0oF6zPM9CIvd1u3tkbehRDdCWBhETuGAWzK3kwOVn3AXiXu934IGT4lmYz+X6m2iUCUIlRbfxeBqC1/aa11GAmU3HKpi+WjKlCxf1KxvjPczJ977cBb2rvPIsJ1TuGv7Xv4vZZvBzTdU0zW+tRdEMas8WvB2fsnvIT3tpudYca9mQY+54n+6Cavr2ixaSKPZanRj5qeusrN9JRW0IHnw5TudDvN5tE+ST7NBEQgphlwJYrXhX0utv6Qt8lpHaLYVaQlO+ASwfQmQl8bNwC8woHNlNAetSQQufZQc+fyjxMy3yHGkbaYSqVeh8zb242fMAxclBjoU7uPPC1w2wuf2IZrXJLbZeXl8qI0M+4LgS+Jx6O/L2W09e8ygvBCq/rTikBUv49KWli43TXbtWgPVtkLs48+yr2Qx02spEgkEnplE53AzBcA9PGp9zhP3T1VgwIxiVitCTSM0AICjlKzZbmw3nMtPsioqR4CnmamtekQPRDE4Ta7j3NTe10AnQBwB0B7ALDaMQzwy8w0RET3jzWZj7hZ89C9mCIQJX+uNZVABKKm3CXGCgEhIAQqR0AEonKspSYhIASEQE0REIGoKXeJsUJACAiByhEQgagca6lJCAgBIVBTBEQgaspdYqwQEAJCoHIERCAqx1pqEgJCQAjUFAERiJpylxgrBISAEKgcARGIyrGWmoSAEBACNUVABKKm3CXGCgEhIAQqR0AEonKspSYhIASEQE0REIGoKXeJsUJACAiByhEQgagca6lJCAgBIVBTBEQgaspdYqwQEAJCoHIERCAqx1pqEgJCQAjUFAERiJpylxgrBISAEKgcARGIyrGWmoSAEBACNUVABKKm3CXGCgEhIAQqR0AEonKspSYhIASEQE0REIGoKXeJsUJACAiByhEQgagca6lJCAgBIVBTBEQgaspdYqwQEAJCoHIERCAqx1pqEgJCQAjUFAERiJpylxgrBISAEKgcARGIyrGWmoSAEBACNUXg/wNgXgWow6KHCAAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-31"><g><path d="M 371 851 L 371 894.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 899.88 L 367.5 892.88 L 371 894.63 L 374.5 892.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-32"><g><path d="M 371 1001 L 371.5 1026.5 L 371.13 1044.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371.02 1049.88 L 367.67 1042.81 L 371.13 1044.63 L 374.66 1042.96 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-48"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 1023px; margin-left: 382px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="375.5" y="1017" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-52"><g><path d="M 321 1101 L 51 1101 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-53"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 1112px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="1106" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-62"><g><path d="M 421 1101 L 531 1101 L 531 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 531 402.12 L 534.5 409.12 L 531 407.37 L 527.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-64"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 1112px; margin-left: 432px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="425.5" y="1106" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-23"><g><path d="M 371 1051 L 421 1101 L 371 1151 L 321 1101 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 1101px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Any PM<br />wakeup event<br />pending</div></div></div></foreignObject><image x="322" y="1080" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcJFWV7ncis6oaUZYHsqOD+nPcBh1bu/aeGhp4gEDTDVnVLLIIgqM8GJ1BRuCNG+C4/GRkGlAUkUXoypRmGUaWRqbozMrMal47iPwYR0FGFhsamG5AqO6qzDiPm5VZHRVxI+JGZlZVZtaJ/6ryLud858b94px777kEeQQBQUAQEAQEAQ0CJKgIAoKAICAICAI6BIQgZFwIAoKAICAIaBEQgpCBIQgIAoKAICAEIWNAEBAEBAFBwBwB8SDMsZKSgoAgIAgsKASEIBaUuUVZQUAQEATMERCCMMdKSgoCgoAgsKAQEIJYUOYWZQUBQUAQMEdACMIcq5YouWRp4qCYTb8AcJBDoVfJ5iOy2VSuJZScJyW6+gaPIeCuGrofB/A8wJvAeMAm3PWOffFIKpUqmrTZ1T/4RWJ8U1eWGJdnR5MXm7QTVmbxwIl7thcKvwDoYHdZBo7NZ5L/GtaG/N4cCAhBNIed6iZld9/g5wCs9jRItDqXHj4PANetswXWUB0IQofYcwAuOGBfToYRRRBBADwWQ9uRmcwtW2o1S1ff4KEE/BuAdiGIWtFs7PpCEI1tn7pK192d2Akx3AbQkZqGnypavGzD+tRTde10ATU2SwRRQTA5sYjP3vhA6hU/SIMJAq/bzEeMjaYytZqkq2/wewSojwnPIx5Ereg2Vn0hiMayx6xK09OT6GaL7gWwS7mjCddX4Lm5TPKqWRWihRufZYIAQNdOjO967saN107qYAwhCNQjzBQUXlIyCUG01gAXgmgtewZq09M7eBkTLpomB8KV4NKXYDlUwNmJRTgq6Ct1AcEVWVUdQUSZMBcvPrutvX3LXhS3+pntLwP0PpcQRSKcnk0nb66GIIDa7RsUXhKCiDxkGr6CEETDm6g+AnZ2nri31VZ8AMCHyi/yMzG2TrYt+8dgvKfciyxW1wB3rQTh7FqFA8miK5lwlvP/DPzKsicPz2Zv3+wWVeNBKA9xO4C31cm+1N0/dCWYzy23pzwZC0CsIksUQqwBaqk6RwgIQcwR0PPdTXdf4giA7pz2FhgP2oW2FdQ2+c8EnDEtnyxWV22qehKEEmLxoYldO7ZRioHDHEIVmXllfjTl2S2lIYgnAf4VQCsr9Qn4ajaT/Eo1Sro/MgCoD453O3fECUFUg2zj1hGCaFzb1E2ygYGB+PbC3jcAfNL0RFHe9tjVmziWiNY6vgJlsbpK5OtNEEoMD7FPxfmvzGeS5xt4EE8C9DWAf1iPMKJLliKBzmPwF8okURJHCKLKwdOg1YQgGtQw9RSrr+/4dxUQGyHgwHK7Ewx8Ip9JPtDdndgfcRpxhJlUEVmsrsIAs0EQ2kVhxoMdbbx8ZCT1J6eYOg/CpthKiws3Oc4sVBtGdIeXHoNlnQrbTglBVDFYmqSKEESTGKoWMb1nH/jRiXh82caRW19SW2NccWX1HRh5sXpgIPHW7ZN0JwiHlGV9Mk6F/nR67Sb195IlK/aw2uOnEUh5Me91xMVfAPiXDLpmcny3e/126Kg2NLuwfMMtQXhpQiV12QI6GwShwVXZR3umQUcQygaTHP8H57bUasJMHsyIVqNg/xNi9JAQRC1vZ2PXFYJobPvULJ12gnGtM2jCGNMehqkAfgTxxht7vtS+aOv5IPyjgxT8mn0RoE/lMsPqEJbnwJ5OFwauz2eSZ0Y54OcNq0UnRJ0Cc0cQmEG+FVkCCOKDMw62+XggQbZ2jZEJgJfHqfirAsfTQhCmb0nzlROCaD6bRZJYd/ahEl6qNKQNY0RcrNYRBIrFoxGLfRXAYAShCwy+MJ9JXaGb9D3eEOEJFHggl0upE8cmD3X1DV7nXJhnwoX5dPJbJpWDyswZQUQIMSkP4o1Yx6QrNcYWG3zYWCa10VBnT3jJnowd2t6+3RKCMESwSYsJQTSp4UzF9p561YcnXGckVPORFqs1BPEcwI+6Tm2Pg/AImH6tFsUZ/DECPggg7tLHdwLr6Rn6EFusckntVa5TJKJTsunhNSaYaHJRbSablmWzw4+Z1J9rgvBZI0p1xDefNDIyUnDK4+dBqDCfexxEIUVdeEmlZenvX7mPEESto6ax6wtBNLZ9apJOE2uHX/xZ42movo0Xq/Wx8mnxXyPgsvY4X+VeWP340lUHttn2da6tnICPBzMwcPqi7YXXUwAdvQMcuqUj/sJp7glTB15P/9AqZlYHzcp79/keFHF8LpdSifJqembDg/DKC/hN7iEEMTN/UoQwky68lMuk7u3vX7mvEERNQ6bhKwtBNLyJqhdQs4XV98tc7blv34afA9Szo0fz2HwAQWyGZa3IrV+T9dNEu98/IHSkmTSNvB3ddl9mOiU/OvzT6lHeUbPeBNHTs2Iv22q7n4APO+Tz3YUURBCaMKJpmMkVXtqxwUEIoh6jprHbEIJobPtULZ1uMgSCv5b1J3F5ufpaDBPEhyCKzDg3P5r8flh9zUL5VrLp8Gx2+GF3XU2YyCjMpNnua0QsYbJXfq8nQSjPKm4XbwRoYEb/zHfCxok6jyeIIFQb1YSZNCfwp89gCEGYjozmLScE0by2C5S8a+nQ+8kuxer3dRQMDBlp4vu+oR535/rdUuYLyJrJO+jQlWehGQgPM3X3Dp0DYgdZhdeJMjxqJAhaPHDiHm2F4keIcAq4tLC/k6v/LQzryHxmzZhOLgOCcKXpDg+vubzQGbvbhCCijI7mLCsE0Zx2C5Vac+/DJrZoWX798H/6VdbH980Wq308CO1iqq7/vr6Tdi9i8h6AOiu/B53KdXscDDwTR3Egk7nt97r2NbqVtmqaeEehYJcLzHI21yKIv5RLp77jt6U3jCA0YaawBXoXEc84PwMhCNOR0bzlhCCa13a+kuvXE2A0WXf1Dp1MVFrEdT6hi9XaMwqEi/Pp5OUmEPuccfC9nUy3NTdoPUHjHT2mtmqOjd36gol8JmVmkSAKIL7ogH3w3aBLg8IIQhdmCtqI4N5B5U7xIQRhMiqau4wQRHPbTyu97sSx6VZQ/ZWk4YvVPgTxhXw6qc4zhD5RCUI1qNma60uCbo/KL59RqKABBWaBINRVo3mb+OyxdOrxMNkMCcI4zOQKL3lOmwtBhFmk+X8Xgmh+G3o00Nz4ZbwYq1/cRmg4ppoJ3il4NfXdROgXZtLcpFeX1Bpu4GsmCMLLzHgZjA2WhfuKE7F1UTwcE4KIEGZyh5c86T2EIFpw8nCpJATRYjbWnX2oj4rBC7rVTPC1EoQ2lMb0mdzo8A+cbXf2JRZboHUAdp/6f7hHVA1mNS5SV9PljDomBKENM2kwc4eXdLfRCUHUbLKGb0AIouFNFE1A3cGqaC34lg70QuaDIJSkmkSEd3fEd06MjPxkW0UT98QZ5RRxFOyaiCDcYSYvZjPTwGs9LiGIKKOjOcsKQTSn3bRS+4SH6qmh72L1fBGEZvF5xm4tjZcRtnOnaryahSA0YSb3DrfQ8JICSQii6qHSNBWFIJrGVOGC6s8xqJg2vRJe212CVX6k/WbmSfIPzcwXQfhszZ0mMu+Cffje/+hYTdVoFoIICzN5wks+t9AJQVQ7UpqnnhBE89gqVFLN2YfQxWW/RjULu6qob3vzRRClyc6zNZenQyY9fYNfYeDLFT3rmVrDjV2TEYRvmMm1e8k3tYcQROgr2fQFhCCa3oRTCvjkUtJeLGOqsvfksaqpX6yeT4LQbM0thUwWWfYzrkuMjHdzmWLkLNdMBOEOMzl2gD01Mx26v9coBFHNKGmuOkIQzWUvX2k7exN9FpHKmbRzpVA1N4c5O9CdifDbSjqfBKFYy3PHA9MpTPZvZu5eqm9qjWb2IHRhJuVdkW2POK+gDRpDQhAtMnkEqCEE0Ro21lwbCtNsnUEI6NpV5T2L1fNMENCk3rhenScgwjVlBasOt5kOkWbyIMoE4QozIUWEYWYMl9OhB95fLQRhOjKat5wQRPPablpy/dmH+izG6jwT3TmC+SYIDQaPAfyM48KiuqfWaHYPQrOb6Ukw/rDjXvHg8yJCEC0weYSoIATRAjbWnn3QHH6qRlWfvE6er/H5Jgilmyb1xrTKs5Fao9kJQhdmcuoUdl5ECKKaN6q56ghBNJe9PNJOnX3Y6xYACcePdV2M1eyO8ixWNwRB9CS62Sqtw+ziAmpWUmu0CEG4w0wVtUJDlEIQTT55GIgvBGEAUiMX0Z59MLgbIYpOursaAMw4cNYIBOHj7cxaao1WIAhdVtySXgZXkgpBRHmLmrOsEERz2m1a6lpugTNVPeCE9vRidSMQRClk0j/4RWJ8M0qoxBSHsHLNtkhd0UeT3NH33msnBkIQYSOi+X8XgmhiG/p8Mc/KYqzmStAZX+aNQhAaj2rWUmu0ggdRItW+QXeYKTS8pOoJQTTx5GEouhCEIVBSrDkQ8BJEfXZzNYf2IqUgUF8EhCDqi6e0Ns8IeC4GYjolPzr803kWS7oXBJoSASGIpjSbCK1DQBNyq+tuLkFdEFhoCAhBLDSLt7C+nnUSotW59PB5pT058ggCgkBkBIQgIkMmFRoRAeU9dGyjFAOHleULTBPRiDqITIJAoyEgBNFoFhF5QhHo7k68p6Nj8qWRkTu2JhKJ2LMvxA5m276agK7pysx3wsaJuVxqPLRBKSAICAJaBIQgZGA0HQJd/YMXEeOyAMFftdg6enR0TbrplBOBBYEGQkAIooGMIaKYIaC/p2K6bhHEX8qlU9+RtQczPKWUIOCHgBCEjI2mQ0B3YrmsRBGEKybe2O2ijRuvnWw6xURgQaDBEBCCaDCDiDjhCHQNrPozKthXAFgG4G0A1DrDQ2ThG9n1SRVWkl1L4TBKCUEgFAEhiFCIpIAgIAgIAgsTASGIhWl30VoQEAQEgVAEhCBCIZICgoAgIAgsTASEIBam3UVrQUAQEARCERCCCIVICggCgoAgsDAREIJYmHYXrQUBQUAQCEVACCIUIikgCAgCgsDCREAIYmHaXbQWBAQBQSAUASGIUIikgCAgCAgCCxMBIYiFaXfRWhAQBASBUASEIEIhkgKCgCAgCCxMBIQgmtzuXf2DXyTGN6fVINyQSydPb3K1RHxBQBBoAASEIBrACLWIIARRC3pSVxCoHYHO/sQHYjZ9vliIXTI2dusLtbfYOC0IQTSOLaqSRAiiKtikkiBQMwL9/Sv3LXLb/2XwpwH8IU6F/nR67aaaG26gBoQgGsgY1YgiBFENalJHEKgNgb6+oQ8XwfcB2Lvc0pNCELVhKrVnAQEhiFkAVZoUBEIQ6OxLLLZA6wDsLgQhw6VhERCCaFjTiGAtjIAQRAsbt5VUE4JoJWuKLs2CgBBEs1hqgcspBLHAB4CoPy8ICEHMC+zSaVQEhCCiIiblBYHaERCCqB3DWW9hYGAgvr2w1y0AEpXOGLgyn0meH9b54oET92wvFH4B0MGOutfnM8kzwy6919TdYoMPG8ukNvr129l54t5WvHA0iD4BYDGA/QDEHeVfAPhJBm4rWrHUw+vXPBOmg/q9GoIYGEi8dXvBuvrN/j7p7IMYP2Kbz8vlUuNBfScSidgzm2IfI7JPB+MwAO906PICAY+CcGN7jO8YGUn9yUQPtWWwwPE0gHc77HFsPpP813rX9/TFeLCjjZeXZaWuvlVLCLYaQ4c4dqm8BvCvieiaKHqZyG5aZsmSFXvE2uLHgfApEP05GHuU6yp7/TdA99tkXzuWTv1n0BhefGhi1/Zt+Pmb5XsqfRPj8uxo8mJTWVS5at+/2Rg/pTE9SXeCSjZTz4xdRQo7qz1+GoFOAvBeAG8rl1Pv3S8ZdM3k+G73btx47aQfBl19g8cQcJchRqFzgmE781qs6be5dvcOnQPi7+9AkcdiaDsyk7llSxCymi8A9U6Z1e1N9FlE9wLYeaoPzk4swlEbH0i94u5THaKxmL4L4FAAMUNrF5lxRzFmfT6MKKISxOLFZ7e17/TKaoDProIcqGdp4q/ZxlUAvc9Al9dA/HUUsDqMdBqBILZvRzvFrauZ+YQQW70Gxtcmtu32vaAJxQAfoyKdnSfvEotPXsqEv3F9VGjrM5Bn4jPH0qnH/Tro6Rv8CgNfjvreONvr6zv+XQXERgg4sPz/V8nmI7LZVM6n31kbP34E8cYbe77Uvmjr+SD8o4MU/GB5EaBP5TLD/6YjWCEIo+HaWIU0E70Rc3uJBSDgJdh8WDabeiRIS/ek7PP1Rd19g599k0S+DWCnKlH7o8XW8tHRNf/P96smQqoNP3IA6KaOuP3ZoK/97u7EThSzLmWw+rI2Jbqy2Dxmx4snjI2sfdZPj/kmCAt8hk10XZnITc2VnFjEZ+s+DEwbCCvX0zP0cbawBuB3hZV1/a68igtymeTVusmup2foQ2zxLwDsVa73us18xNhoKmPaj+bjzPdDabbHj44gUCwejVjsqwAGTXUCUGDwhflM6go3bkIQEVBslKI6d5mZTsmPDv80QEbq6h28kQinuMuE1dUMxAkGPpHPJB9wttXTP3g8M27SkMNrAH4LkCKhYmmyZfujoNIXuZdIZoY/PCqZehCKHDp22noxA5fMnODDycGfWKBY9WViKBf9KdUugz9GwAc1X7qPk104Kptd+wedXeaVIMBZZtpEhOMdsqkJdhSg3zN4F5oKN1UmU0cxunZifNdzZ8OT6O5PLAGTCrG5+y0A/ASIxsC0HeC3g7DUEXKqyFcE8Zdy6dR33JOdmrARw20AHVkpbBqeVeUHBk5ftL3wegqgo6frEy7Mp5Pfctt3LsaP5r18DuBHnfoBGAfhETD9OmSsaj8yu5eu+kvY/Jkp/ew9y7q3l/V9lRl3EdEb5b/HOU7/nB9Z89+NMldWI0fTh5iU0j29g5cx4SLHQA9cS9CtP5i+JF1Lh95PdunLa9+pOvzoRDy+bOPIrS9V2vj40lUHxm1bnbJ8v8MoW4hw3vY3dhvWTSblL6wzGPxPLld4AuDluUxKhbSqJQjq7k/8PZi+4fr6N/kC9qnLv7EZ571jPzyYSqUU0U0/U/Hetq8RcI6rv/s64nyCzlOZX4KYAWuBGNcUC22XjI399NXKLypu/uwmtd5S8jLU+lHlKTL4i/lMSoUR6/b09Kx8J1vxnwP4gKPRkmyFycmvbthw+8vOzpR8T/8Rh1hkfd/lbYwz86r8aMoTO9d4AJ6x7KeQ9z3AZrJpWTY7/JirzpyMHw1BOMV4jYDL2uN8lXvsqXe1zbavY5TW0nY8RKtz6eHz/NZyZJG6bkN99hvq6hs8lAAVNyyzuXfSdkrR6VlDcP4avA7R1Tt0MhHfPF1DM5C6+wY/B2C1o9VXLbaOHh1doxZhA5+epYlD2KbbAexSKRi0gGjgQdRCDtCv1eABFHkol0v9T4Ay1NWfOI2YVIij4hkViXB6Np3cgV+5gQYhiHEm/mw+nbrBb2Lo7k68BzG60zVxP1W0eNmG9SnlRdX8qMXficm9rmHCWY7G1ER/Rn40lQxagP74QGKfeAG3AjRQqavWJOyJyaPdpLJkaeKgmE3qY+egclmtN6xTyDvG+R4Ucbx7rWmuxk8AQWyGZa3IrV+T9TOMikJ0bKPUDJIgPIECD+Ryqed09YQgah7mc9dAd3dif8RpBIz3lHsNjKd29Q9+nhiVL77NAKvFKRUWUc8mtmhZfv2w2gnifqirb/A6As4o/1Bk5pXOrzOd642QrxFnJ9qdIYyb86PJU7ULZ8FrELpJWnVn4jmoctTdP3QlmM91yBgYKnIBpvBSnoQKa5Uf/YJ+QxAE85dyoymVOp2DRm9X36pOgn2PI82CKn5uLpO8qh6jXrM+4Bsq0k7eUySmJv53VMYpEZ2STQ+v8Y61vW8AWO3sKT0mYSZteEof1p2z8eNDEGqzx7n50aRjE4veQt19iSOAEvFXQkZbyabDs9nhh3U1hCDqMdLnqA3dpAqmz+RGh3/gFsFTlvEggHtApcXkqZeE6fj86PBad93SVtW2olpr+FD5N8+X49TLQ59ioA/ARwnY1WY+Icrin8crCFiHCPIgdOsgTPj5ZAefZLKwqvnC9PUA/Ext+pXaAATxmD0ZO9QkXbP2Cz9krSjKq+DeYeTnAQS1qdmldHdHfOfEyMhPtjnrdfUmjiUiNdbLGw+CvW9VV0NgWg9qLsePliBCvAAnDpodWYosfbdZC0FEGdENUNYT+gFSHfHNJ42MjBSc4rm9DfXFxMwp57ZVv68obyhL30etcNSDIGolB6WDZ/KI8MJVMNCRty5kNt8EQcBXs5nkV0xtpxkLQZ6nabPQTXTss/gb1Kg7jMrAM3EUBzKZ237vrKf56Alc8yqNC7fXCrqlI/7Cae53bS7Hj48HoZ0DdLj19Z20exGT9wDUOf2hKAShdna2xuNZNPOZzFwvTilExG3FX1qF+PrpWKzP16B7MdzPS6kV0VoJghk3E6B2cTl2v3A6hrblYedDnLK79SVg3Z/e0rHi0ftvej2Kjl39gxcR47IdddjzNTvPBBF5i2fnwMoDZowZoMhMx+RHh1XoqepH8yVbVbsa+XzXF7r6Br9HgFqQnXoCQqKaidgTZq00M5fjx4dYL86nk5ebGENbXwiidQjCdPupa/ItffW9saj96be+sf12xyKV52tQ035dvhgrg1ftrGorFLsJOBGA2jpYOempAsPOk74zxrsmxPQEuLTA7d4aabxQrjrQhu3AvwOsfzd54WaW4Y8AWOIgCM+BxHkmiKfseGFp0DkNt871+tJ3t6vZQKG+6O8GrOldcib4M/NbiHCsc7OD3xZuTZ++4TZNaEVbdq7Hj489vpBPJ9V5htBHCEIPUct4EEo9z3ZXl2vuWUB2TLyuup6vIo+HUkXMWfU/MfHq3rYVP9gCHcjES8ClifPPAg/TRSGIwFeBRyYW4TiT9YeQbYOhL1xIAc/lKvNMEA9ZvO2Y0dG71BkVo8c0dGbUmKNQxMNYkZr3C1VpzhL5hpk06yPa1DZzPX6iTvBGhC8eROt4EMrgmu2uM0IZuvWHSt6mrt6hI4lYHUoqLda54+TuNQ7TuLDaZx2z7fMJGAJwQKQ3ulK4eoJQh3Y6ZpxFMNypM9cv+LwSRBVkr0zT3T/4EzBOm7Yp4YZcOnl6VTYuV5oPgii9O57dcN5zABoi8Q3NzfX4EYKoZdT5120pD8J9AM69MOcikBlegjv264y3a74WQ2PWUfPnOEykvmJVHqnKFsVoIabphnisCOvUGNvfAtFyR/tqX/gRufVr/iNoSM31Cy4EMWWN+SIIzc4kT+iopyfRzVYpB1n5jI5/DrK5Hj9CEEIQJggEnlNwLZbOWEM4+PBP7uxch3CSi/ecRfBhuqkFwtjPnDsitMITXgarLJzIA/SQxTQ6OrpmU1f/4AXEUPvxp57IHsSO3Efag0rMd8LGiUEJ9GYrxu5nxGYjiCipJkwGbqWMhiDm5K5jzdkGT5hVs+jsu/NrrsePEESUUWZetqU8iNIXmOukcyVUFLT+UIHLuw4xtSvFHboKOkyk8s607bT1B47DdJXmt4Kwhm26q82afGSffWKb3SkqpicJt7sfjSA22vHCcY4FV81hNbXjJvgAkZswlWwqHXh2NPlp8+FlXnI+CYKB3/JkbKnJGYiKRr29x77NpkUqJPlXO4hcf/bGHIXS+J0Z6jRMIBmlD7+ynjAqMJ2yRrMN1C+1Rqn5uR4/QhD1GAHeNlqPINy5ksqTa6Fg7+VMTayb5P3WIVzEEbhP3OuGl0C/F0U+OSQ1xbR1at3m6o6D9/Ss2Mu22u4n4MOOIfA0irwsl0s94Te0enoHfzgj3UOVsXqToetzP4fxfRA9PYmPwKJ1DOy5w/HSH3TSkFHkHWma7ajGaSqC8NDo4buN1ATXKGU8B9scW8U163va1BrO/uZy/AhBRLG0edmWIwiNq1x6+WHz/o58TdqXzvvS8932ZPwsq62wdvpylZDDYp6zEkDoROwylzfTbBQPwmehtHxoSaVamM4Yy8D1k+O7neOXiVSTEj3wq9Fn2FXSLahLnZ5VWWyZ7XWL2l68zXmwSvuC+5xo1/XjJndVxu8krIYgirpUFEGvUT0Ogena1yaSjJCqpdKmWlOwLf75m2lO1EHRRwj4HTFfNTqaetpPr6m1thmpN6bfE89ZCZ9MBc625338BOxCcmMQlWDkJLU5yTRcSfcXuNr/bYE/4Mj4qv1i1LjFT4HwBTB+vCPvjv7UqALBZ++3NsWBH2iak63R1iB8CMIn9DXBzKfmR1PDOnk0C5dq0r00n0mqy1cC8xVV2isnt3PmBdLm+6n1oJNnAotGECqApj0NrMNFM5FGqh/ywujyF0X/yPDkwArMMTYtkjsnkfK044h/xXnK2O9UtluveR8/QhA1z80t50EoRDRpBtYQ420gqOs+AydcdziJCPcylw4cTVUNuGtCt3AZ8fSxbr2gLgShZNdN1gAes+OFI3WHxEremIVbPbugiI/JpVMbwkZfKWdRYe8rGaxuQqs84wzrqHxmzYizvg+5hoYxAvSK4kGoZsZt5uVjo6l1YXp19iYOs6iU2K3ijYWmpwhr0/m7pn1FQMb3TkzdW2Cr3UbOk/RrO+I7n+zOxeSWy/uBwmPMuJyI1NW+5RsUzch0bseP58rRwFxKbr3Fg9CP0JYkCI2bvrk8uEsDPGiRWReqcEAXGqv2xF2BLQzryHxmzVjQJFG6b+B55a2QSg3gvKtaVfM9yGWQ7ntGt129g58hKqUin74VjkDXtMdfOM+dS6dMtu7JUOH3W1h0nE/G20p/1NWbGCSi62ccAgzYQeXN8YNxInwym07e5oddd3fifyFGygNSV7rOeCKEmKbI30Cv8qUxP5tx50Kd12Z8JtYiAZduH9/tsqDLiXzwMCY/hYPLG9tKhPWOj6RIayI6sjPBWTFilPETdYKvN0GYelVRPhQaoWxLEkRpcM1My+3EOnCA67I67qjszSHkNmJP/9Aq5tJ9EY5rOen3ZNmfzq5PqTSTd0MUAAAV4ElEQVQVM0IzpQvcn6cuML5DQJfPoPDd6hiVIMqHne5w3hcQ9PXs4wUoMbeA+QLYuMVzB4D/HcqbEeB9+NwdoPo594D9MOzc9bXjghxc6Xc/dlSCKGP/4pu6nX/Avpx09/fsHzGk8hS50nxHSmFi+tLrvYASjd1tMf5+dDT1X662Au57Nvc+VJs+Gy0q3RlnvVUV5mr8zDVBaC5MKoL5kgP2w7f9diea2r6RyrUqQXgzke5APdAL0G3Pq1Q1OT1dvk3tbp/J/gUAWYDUJKSuLfwQQH/huUxdLYRzKTxQuTTIN1dQVIJQvfb2ruq3yb57Rp4en0tlVHnthSo78HRe46h0+jBAizVekLqM5+/y6dQ1fi+Az0U5leKbGXiQQK/6XLH5OEAZgM+etpdPDFq3SK0EB9A2LdvUGZX1U7bit5c9lB35saYKRrqnIeqLH3BtrWrqWQYyCo/AK1EJuTgKx6fTazeZ9q+7xncHLsE3ren6mIvxM9cE4XsrZWnc0CsMLsTYOsvkkjBTu8xHuZYlCF9PwCAcoNmJVPpitsGHjWVSG8MM1du76mM22SpG7byaMqya+r0Apiu4ja5Gwb6PgPeWK/leXlINQUxdBDT4XTD+1ilU0AL01KRB10a8AL7SvLrf+YJcJqlulwtc3DY+ZOgQvBKyAPMxzgOGETyILQS6hMEXR7CZsU4mhvcr09Of+AQzqbvNd4/ezo4Dk1HrasJ9qomqt/LO9viZa4Lwe4dmvE8B65VR7TFf5VuWILQ3uxnemKVfhwg+Pe024JK+ofda4BsCwkbOKupO5wds4i+MpVOPRzmlWyVBoJwOWqWmrlx+VCLBoPWSqXuZafDN9Rx1udL+JoNWXXbDxGcqvUzKqzIR0pSoA393FNv43IdHUs97dq+ZexAl8qd47GUqFG8EqD9I1mp0MtVdV650bzLzt5n5eI1npqvyGoi/jgJWB52WD5JJtwMJ8E+tYaLfbI6feSAI6K53deIQdFWwCV6NUKZlCUKBq/kKMlpg03kf1Ri7sr5g2XQWE6tJ553TL3jJFeX/YuC2ohVLPbx+zTPOAeG+89fvVrFqCaKET29iiIhudFyzqP59X0ecT3Bf7u6UTW2ZjS/a0hmDdSaT3QuQSkJY2dHzGsBPM+huG/TjDZnh34V5DX4vwlSiQz6HwCsdGW/Vvv4/gLDOBv/LWDqlroYteSW1EoTyDpXNnnseRzDjcwCpWwFVWKkIxjOwcK/N/KOxTOqX1epUy0uvdhjF2opDDB4C6N0A9i63pz4wngf4UcC6viNu3xNkPxMZfHbkRbpUya+f2Rg/80EQSj+1oYBi1hnM9lkgep8rK7PxhUUmNpmPMi1NEPMBqPTZ+Aho1iCMw4eNr51IKAjUDwEhiPphKS01CQJCEE1iKBFz3hEQgph3E4gAc42AEMRcIy79NSsCQhDNajmRu2oEhCCqhk4qLjAEhCAWmMFFXUAIQkaBIGCGgBCEGU5SqoUQEIJoIWOKKrOKgBDErMIrjTciAkIQjWgVkakRERCCaESriEyzioAQxKzCK423EAJCEC1kTFHFDAEhCDOcpJQgIAQhY2DBISAEseBMLgpXiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+goAgIAhUiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+goAgIAhUiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+goAgIAhUiYAQRJXASTVBQBAQBFodASGIVrew6CcICAKCQJUICEFUCZxUEwQEAUGg1REQgmh1C4t+s45AZ19isQVaB2D3cmdPxqnQn06v3eTuvKt/8IvE+Ob0/wk35NLJ02ddSOlAEKgCASGIKkCTKoKAEwEhCBkPrYqAEESrWlb0mjMEhCDmDGrpaI4REIKYY8Clu9ZDQAii9WwqGk0hIAQhI0EQqBEBIYgaAZTqDYuAEETDmkYEaxYEohBEs+gkcgoC4kHIGBAE6oCAEEQdQJQmGhIB8SAa0iwiVDMhIATRTNYSWaMgIAQRBS0pKwhoEBCCkGHRqggIQbSAZbv6Bo8h4K6KKky4MJ9Ofkv93d2d2AkWVoDwOYD+AsDbyuVeAPAgE646cB/Op1KpYrVQJBKJ2DObYh8jsk8H4zAA7wQQr/RDwKMg3Nge4ztGRlJ/MulnYCDx1u2TdCcIh5TLzzh8tmTJij2s9vhpBDoJwHtn6sW/ZNA1k+O73btx47WTJv25y5T6L1hHAvYZIFoCxh7lMs+CcLdl8z+Pjqb+S/0vCkGYHpSbb/3VuKE4PsGMTwH0UQB7l/QnvAzmDW583XoxcGw+k/zXarCXOo2DgBBE49iiakn8CKKnZ+jjbNk3AvS+4Mb5NyB8JpdOPRRRCOpZmvhrtnFVeB+lll8D8ddRwOpcLjUe1JffBPnGG3u+1L5o6/kg/KODFPyaehGgT+Uyw/8GgE10U2T37CYafPNU9PcAvD2gTgFMV8C2v2zH8IF6n6SeL/0VBXT1JgaJ6F9C9FeQTo8bIQiT0dV8ZYQgms9mHol1BEFkZWDbtwPYy1BFNWFfkMskrzaZTEtfmDHrUgafDyBm2Ee5GI/Z8eIJYyNrn/Wrp5sgUSwejVjsqwDUBG76FBh8YT6TuiJML9XntgJdScCppjoR40dF8A0WkfLg6pZqYz70X7z47Lb2t2y9HIzPm+oPYJyZz7CI/oyBf6oYRTwI0+HZ2OWEIBrbPkbSeQgC+CGAv6Kp0Evl2czAgwR6FeB3AegFsJOrg3EifDKbTt4W1HFpItnpldUAn+0pR3iZGCrE85SaZBj8MQI+6Ag5Vao8TnbhqGx27R90fWkmyOcAfhSgIx3lx0F4BEy/Dulriw0+bCyT2uinV6BOwDR2BD6IgS6n90KEu5ixFMBu5fZrzsU01/orz6G7P/H3YPqGhxxKYSWsB+hFgN8OwlJHyE2pPA5W3gT9pRCE0SvbNIWEIJrGVP6CugnCVfJFAOcfsC8nnesM5a/lvyPgEtfkvRmWdURu/Zr/8OnRZyLh39iM896xHx50r2dMrRe0fY2Ac1yTz30dcT5Bty6hmSCd4rxGwGXtcb7KXffjS1cd2Gbb1zFKayE7HqLVufTweX5eRE//4CnM+IlLvj8CfOYB+2KdUydFJh1v2TrEjCsdXoOzt9kgiFnVv6s3cSwRrXF9NGjHjkkYTjyIFphY5CR1axjRjyAY+C0sOi6/fvg//Sb7crz5+pkTA93SEX/htJGRkYK7nmZBVhV5AEUeyuVS/xOAKHX1J04jJhXCqnguRSKcnk0nb3bXCyAIRWArcuvXZH29gUMTu3Zso9QMkiA8gQIP5HKp59z1enpW7GVbbfcT8GHHb4+jyMtzudQTfv109yeWgEktxLrDeLNJEHXXv0zgd9OUZ1R6DMYOupYOvR823+HyVCv1ZZG6BaYX8SBawIg+BLGFYR2Zz6wZC1GRunsTF4JKoYXKs5lsWpbNDj/mqkvd/UNXgvlc50QaFCpy1+/qG1SehPJaKlNRdmIRjtr4QOoVZ1kfgigy49z8aPL7YWbr7kscAdCdANrLZbeSTYdns8MPewiif2gVMyuSqqylvGqxdfTo6Jp0WD9dvYkhIrrR0Y+qMlsEMVf6j9vMy8dGUyqFeeDT27uq3yb7bgC7OAuKBxGGXHP8LgTRHHYKlFJLECEhFWeDKiwTt+37ALx/etp2bJWt/G/J0sRBMZt+AeCg8v98PQA/gTVtTDDwiXwm+UAoQQR4Ae7++vqOf1cBsRECDpzWSbP1cmBgIL69sNctABI72vD3oNz9lLYRx3Cba21kdgiiAfWfWrvwfDQoD0Q8iBaYW4QgWsCIGoJ4lWw+IptN5UzV6+ob/B4BKkZf+bK/uyO+c2Jk5CfbKv8px6nXTn9pR5iwKm3oJmRiXJ4dTV4cShBAqiO++SRd6MtLECftXsTkPQB1BhFEd3dif8RpBIz3TJdjOiU/OvxTY+x6h04mKnkglWd2CGIW9NcQaZGZV+ZHU9PnasJw6OxN9FlE9wLYOQjrsHbk98ZDQAii8WwSWSIvQfBYDG1HZjK3bDFtrKt3aCURT+9eUjFonowtHRu7VR2oKz09vYOXMeGiyt8ErPvTWzpWPHr/Ta+b9qPKdfUPXkSMy4LISBdiYsLF+XTycpO+tPU1X7WayW0TW7QsYN3G031PT+IjsGgdA3uWf5wVgpgN/bv6Bg8lQJ0TKYXiGHgmjuJAJnPb701wVmU6O0/cm9qK651rEeJBmKLX2OWEIBrbPkbSeba5Mm7OjybVXn6jw2Gll9x7beaMraH6UAz/DrD+3UjIGYX4IwCWOAjCQ2g+BPGFfDqpzjOEPqYEYUKMYZ1pJsjZIoi669/dO3QOiJ1rOg9ZvO2Y0dG7XgvTu/L7wYd/cue3vrH9duemACEIU/Qau5wQRGPbx0g6zUE54y/tSgeaUMPrNvMRY6OpjCoTsu3USM6AQp4J1XSC92vTtL4n9QXjwY42Xm6aEsQHm9khiAhx/Qj6z/TmqtBfYdDdP/gTME6r2EMIotZXojHqC0E0hh1qkiIoF5Npw/39K/ctcFzt2nm37iUXgvBHMixvkrNmDbmYIi38mhKEe2IH4YZcOnm66biplBOCiIpYc5QXgmgOOwVKKQThhcd0glzoHoQQRAtMALOoghDELII7V03XgyDUoSeyWW1h3bcst2sNwpNdFc6ssfXW1XSClxCTHgFT/DwbBiTEVO+h3NTtCUE0tfmmhHcThEoglx1NfjqKalOZX/l+Rz6hGbt5dAuR1fRjKpPpBFczQbhSpQcdcvPrq6/Ps6W2mdYgPk+M7zp0k0Vq00G6AMoJQbSAkTXbXD1nGMLU7PLs5edHJ+LxZRtHbn2pUrend/CHTDhruq0qvzbDZFG/zxVBaIgxNLGfW37NAn/zEETv0JFErNKFlE6RV7PNdfHAiXu2Fwq/AOjgCjaySG0yyhu/jBBE49soVELNQbmnihYv27A+pTKqmjzU1Td4HQFnOF7w6/OZ5JnOrbKaLZF+KTmC+qycvFUnl58F6BFme92ithdvcx6AmyuC0O3hB9NncqPDPzABruTBTSW623GAcJZSbUSZdE3x05DbBMDLc5mUOvhm9MhBOSOYmrKQEERTmm2m0BqCKBLRKdn0sMrOGfroU2h46/f0DH2IrdI6xXRyOgYuzWeS6vIeozMX3d2J9yBWStfxDgcZXZnPJNW9EtOP6QTnp1yE+h5yRATPaOp8yN43AKxutqs8TeNBDAycvmh74fUUQEfvEN881UjpgiFPfq3SYJBUG6FvXuMXEIJofBuFSqjLxcTAryx78vBs9vbNIQ1Qd//gd8H4W0e5x+zJ2KHOU9Tqt/L1pbeCaLmj7GYQH5NLpzaECaom04nC3lcy+G8cZccZ1lH5zJqReSIIaBL7GeeY8klW1zQEoTDvqSFZYffSVX8J21bexoyMtkIQYW9Dc/wuBNEcdgqU0v8+CLqpI25/NuDQF3X3DX72zRw633am+w7andTZmzjMolKW1OnLhkxSQzuuspyZWpz5Ttg40X0FaQQPQItNlPp+xMfAye4kgs7Oyt6QwuIDLiGaiiB06b7fvLUvNN1558DKA6xC7GfOfFcOr1A8iBaYW4QgWsCIQRcGMZBn4jPH0qnHnap2dp68C7VNfsN9iY8qb09MHr1hw+0v66Dx8QJU0S1gvgA2bnFP9qqvWHzyUiYozyFu4n1EmeD1cmq25QaEPXzudtDeoT11YY66kIiuA7Cfpv+mIgglv8+FQX9kpv9z4H72nc4Lk5T+T/8Rh1hkfb98O6EHAvEgWmBikQuDWsOIGoKYBGA57jcogqAWrEfBtB2wPwzQYs01oEbhosW6C3l2QOm8BhQBfY0z8d/l06lr6jHBu9uohmB6+gePZ8ZNmqtY1Q12+alrVLVXbrq7bzqCmErb7Qk1TukVfuWoe7zJGkRrTC0QD6IFDKnZ5jpCoIcYUCm0nV/sAdrS72HRCQFXjc6oq0iifRtd+2YoYrAKCMcBXJDLJNXtctrF7WomeKccVdbX3XoXpp5ar1jDzAcB1FMu3IwEgdJVqju98j2eumu8cnlSmP5bAP4HgL7ol6YlrAH5vXEREIJoXNsYS+YhCMaDdqFthdVWWArwjwG8PaCxAjGuKRbaLhkb++mrxp2qG3ZKoRZSBKHWMPY3qesX8qqHB1AHgig10dmf+AAxqW2/01dw+uj2GhNdPNlh39g+TmtBOKSZCULJbnLftAOLx9iiwTae3OrK41VkpmPyo8P3mIwJKdO4CAhBNK5tjCXTEUQlI+nUWsPEWQCdSsAHyx7FOJh/Awu3xlG8OZ1eu8m4M01B9eUZX7SlMwbrTCa7F6ADHGGa1wB+mkF326Afb8gM/85kS2yVHsC0dLXWVxPlM89TFzE+B2ApgH3KX9Ul7Jhwsz1RuEGt1TRzsj4/uyudJop0HBinMuGjYOxRKlsKN/EGIly1/z64V61NaBI9Rj5sWMv4k7qzh4AQxOxhO2ctBxHEnAkhHS1YBNyH7Qh4CTYfls2mHlmwoLSI4kIQLWBIIYgWMGITq6BJV+K7BtPEai5I0YUgWsDsQhAtYMR5UqEUHitQCsCfA3gEzL+GjWtzudRzpiJ19w2qMNzqSvlqr6I17U/KzR0CQhBzh/Ws9SQEMWvQLoiG3XdCEOPy7GhS7YALfXSH7KLUD+1ACswrAkIQ8wp/fToXgqgPjgu1FfelSaWT8XHrf+dH1vx3ECZqc0L7Tq+sxtS22MrzKtl8RDabyi1UPFtJbyGIFrCmEEQLGHEeVejsSyy2QOsA7L5DDP6NzTjvHfvhQecp6srvKsGjVaRvE+E455kJAl3THn/hPGdm3nlUTbquEQEhiBoBbITqQhCNYIWmliHwFDUxfjl1ilw9rM7UqAOBe3s0JuTiKBxf67bppkayxYQXgmgBgwpBtIAR51mFqcVq62qAP1mNKGphejLOpz48knq+mvpSpzEREIJoTLtEkkoIIhJcUtgHgYinqCutbAXjMth8lTtJowDd/AgIQTS/DT13Uke58KYF1BcV6oxAKSdTxyt/DcKJmpPxBQB/IKY0GLdu377rv2/ceK1K1idPCyIgBNGCRhWVBAFBQBCoBwJCEPVAUdoQBAQBQaAFERCCaEGjikqCgCAgCNQDASGIeqAobQgCgoAg0IIICEG0oFFFJUFAEBAE6oGAEEQ9UJQ2BAFBQBBoQQSEIFrQqKKSICAICAL1QEAIoh4oShuCgCAgCLQgAkIQLWhUUUkQEAQEgXogIARRDxSlDUFAEBAEWhABIYgWNKqoJAgIAoJAPRAQgqgHitKGICAICAItiIAQRAsaVVQSBAQBQaAeCAhB1ANFaUMQEAQEgRZE4P8DLfZkEZ5avf0AAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-24"><g><path d="M 31.61 171 L 70.39 171 C 87.29 171 101 184.43 101 201 C 101 217.57 87.29 231 70.39 231 L 31.61 231 C 14.71 231 1 217.57 1 201 C 1 184.43 14.71 171 31.61 171 Z" fill="#0cf232" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Kernel resumes system</div></div></div></foreignObject><image x="2" y="187" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnX18XFWZx3/PnUkTRGlZVgpYQNQPu6Cia6GZZJIYF6iAQEthJuGlUD+sgMqCb/iCrKLAIotvdAsIyktBoZmhpXUriK01NpNJUiyLCMgigkrlpSxbCkKaZOY+y5nMpDd3zr1z72Rmkpl57n/JnHPuOd/z3PM7r88hyCMEhIAQEAJCQEOAhIoQEAJCQAgIAR0BEQixCyEgBISAENASEIEQwxACQkAICAERCLEBISAEhIAQ8E5ARhDeWUlIISAEhEBdERCBqKvqlsIKASEgBLwTEIHwzkpCCgEhIATqioAIRF1VtxRWCAgBIeCdwLQKRKg9+kViXGPJ7h+DlGrv61vzvPciTA7Z2h49lRl3AtjD+gsT7htr5DO2bozvLDZtiVc6Au3tS/ZPcbAPwLtzqTJw8mAi9l+le4ukJASEwFQI1JRAOIkDgNhoE58n4jAVUyltXBGI0vKU1IRAOQjUjECEwl1LiPh2AG+zgRJxKIflTDFNEYgpApToQqACBGpCIFrbIx9jJjWttLeIQwWspgSvEIEoAURJQgiUmUDVC0SoLXoMAT8BsO9kVnRnY9D8VG9v/G9lZijJF0FABKIIaBJFCFSYQFULREtHdytM814RhwpbTQleJwJRAoiShBAoM4GqFYiW9sgCMKkdLzJyKLORlCN5EYhyUJU0hUBpCVSlQIg4lNYIpiM1EYjpoC7vFAL+CFSdQITD3UeaZK4DcIC1qMT4EZt80cBAfNgfAgk9HQREIKaDurxTCPgjUFUC0dLR/U8w+R6A31UucejsjLx1JGUcD5gfB+hDAOZm35UC8Gdi6jMN/tGB+/FgPB5Pe8EdaoueRMBPc2GZ8KXBvth/tLRE9oCBcwH6PAjvBJAC+CmGsTpt0E0Pbl71rIrjFF/9FolEAs++QCFifBpAB4D9AAQAKKH8E4PWWNPykl97mMw7ng8cSWQuA+NYAAcDCGbDvUjAIyDcMSvAa71uCii3QGTqcYzWgfDP2XxOHMJsbo8cbmQOaNLRmQOVhJfBvIVBN44Nz/n51q03jzlxKod92N/V3Hz63EBDuovBXSD6BzD2sdjgcwCGAIo3Bs37vfBubovMN0AbLLv8dpjgY4cS8a1e7MFPfPvhV+vhx6y9nwJStkrvt2xJf/FN293EhOudvqsFC07Zx5gVPIcYZ4HoH7MHYTPfJANr04ZxXe578VIma5hy2Lc9D/OPicxuHKGTwDibgSMs7YpqQ14g4HFm7kmPpdZu2XLvy37LUK7wVSMQoY6uw2DyWgIOLYc4ZIw3iAvB9G+asxQa/vwEGfh0cnP8VwDYrYJ0DTwHUncZqcA9ADXr4/Ijo8Hg0Vt77/5fJ4FY0NZ1qAFeSUCogIGkiegeTpmfGhiI/58PY6LWjshH2MT1QOajLPS8BuIrkMKKQiO56RAIpPnDCNBiANfaT9pPFIzpgoH+npvsBS2nfeTedVRnZL+GtPF9Zj7VIsBuzD3x9tPA617mJ76DQKxvaev6GMC3Ani7W4EI2DAW5LMf7I2/sLsDZHyKmK8q8F2mCPjGyPCca9wE3vbustl37j2ZDkWargDjfEebm5ypFDFuTKcaLhsa+smrhT64cv9eFQLR0hJ5DwKkppUOnwyEbh4dnn2hD4PQ8gx1dr+TUulVzo21YzWkQfje6BtzLnXLg72BB+hbBJ7PyPTGtY8y9mQidrn6UScQBvgxh7Mfjpll4LeGmVqUTK75cyHDUg0iBYwrGXxxdkRSKIrldx4yg+nThnrXbHOKNB0CQcByBr7l8qE+kzb46C2b489Y811u+1DvcrZxL9i5NxXE6blG1R7DTwNfcoFgXgTCewikXOrkRp2uhVIiMdLEEezY+43GPXZex+DzPNpgmoHvDyZilxTqtJXbvrN1+ncIUA+AY7zUojUMA0/CoMWDm3t+7zduKcPPeIFobV1yMBvB+8olDs7pIw3Gs0wYJNCrDN6LgDYA8/IrwF2o8gQiM6VhnTbgrYDxW4BV70oZ0zCZdHQy2fOoTiCI8FPmzKjBuoNrGwMJlddMOoQOyzsmsqzWamY1bP9kb2+vGp5rn/nzz2uYtcfOFRj/MCc/hJeJ8RCDVCMaYPCRBLxX8/E/TmbqBCcxmgaBGAUwYumFbmdgE4FeB8xQZoREdONAX89F1salEvaRmX7YRXFNh+E1gH8HGBk7cLNBBm4bG55zvq6jMp0CAeBuAGrUlvONpqZRx+2duBFAGIxD7AJAwJeZ2ATT1ZbfXgOwEaCXLN+K3XPCq2TycclkfGA67dvlGxoG8xMg4yEAaTcGAPeONmHxdLoImtEC4fBxpgl088jw7IunOnJw+DDTzFhrBvgSe08SaojRfvr7DU7dAtCRFgNMM/iLg4n4d3VGmT+CyIWip8lEdzLZ82DuP8qwmppePrSh4eX/yTXizvEzc1uDTIHzh/ru/p21YVPzqtuep3MAfN82NN9uFR9NfqmlPfIF24epkn7CZFx00AHYZF97GZ8fbvgmITOMVusfueeBxiCfppsnnwaByOVJ2c91nDYvs06DHdXRfSCM9Ji1F14p+2gJR84FkZrWyrEbJsLFI2/MuV1j49TS0f1BmOYdAN5nYe3YME6zQExwZ+Amc3Tsa7Y5dmppjy4EQ5XH2uFR60BGlskOMF847wD0WG0vu6bxZRB9dZLdEa2wC72FU0XsO9TW3UkwVcc2J4xqdHN1U5Cv0X0PCzoihwRNusnWSVBTw2cl+3pWlXJU4CetGSsQShxMI7iOgA/YCvTHANILE4nVT/spqLbhzvcmmwbzZfMOwLVuC9DZhcobAF5qSfcvSPPRAwPxp+zvcmjgdzCM4wcTq4YKlcNJILzs3NI5MMwtkuveq2lMVLCNSHNXgfULCrVHziGmG6wfBRGWJftiP7a/a7oEwq2nnVdvFbCPzs5lTSOp1+MAnWh5/4UDidj1bnahpr2QMh+wrskxsHwwEVNTgpOeGSAQqqf8lYG++Ledpn5a2qJqk8UKTZm3g/ikgb74Fh0P1aFq2OOVmwj4+O7fd6/feWBRFvsOtUWvI0CNRscfd9HKBFEdklm7sBagTktZ1jcG94z09t6+q1A7UY7fZ6RAjAYQcF3AZV4HE6cXWgh1A6Z2ihgN6Y2Te2He1zSyveb11gVi67qB9d36Bp7uagy+eI7bVE8uDV388fWEsYXJ5L3b3cqZ6WUFsBqg43PhmPHjwf7Y2ZqPlVrau5ar3polTdepItu7KdQWVSOJyywGnhxtwgn2YfI0CUTB6YdcvitlH+o91JDebGnoPe8wym9UeSiAhuMTibt2WOtl+gWC70cap7p9r21tp74rhUAvAQda8q5G8xcO9sd+4CqW4+52fgZgVjbc82zQ0Zr5+4rY9xELl+751jdG7rWOBry6sm9pixwHZNZbM2Vh4Nkg0p2l6BAXIyAzTiCYKEpsfmeyiuYVrWCPpBCM1vaubmZWPdvcsN5xBOCUVn4a+p6LtoFnOmuwv0f5kCr4aONnt8oWjAygNRy9igmXToRlbGps4EX2oa4a5gZM+iWQmRNWT9ppBOD0Xk0aowx8bDARU2I88UyPQOgbUF1ZKmUfGg6vm8zHDfXHE4XqtrW16yg2WDVErwN4yAAe5jQvtzfE0ywQnqZJNNuSAcJTSHHnwED8r64C0dF1GJms7Hb/bDityFbKvnVlYY/fe3PnknlGKrABILU+8zCDHm8g87q+vvhLheyhHL/PNIHYAfCLti2Vap/wagBq2GWdo/Q8RaMBp3q6t1iHpWrqYTARO7fQ7gdrWppej/bj1jTwr5BJC61rDwV6SJPOUQDw3IiodEPhrjOJMmI4/jgIRCgcOZmI1kyIpscP1Jr3zs7O4Ehq37vUEY3c/4nx78n+mJonnlaBcBk52fFXzD7C4ZPfZlKTchnz4d2ZcN+V5LchmGaBcOrNTyqGzm4AxBuD288oNMrWiKxWICpo3xQKR+8gwlm7P7mZsSvJr+3MNIGw5z8zUpi3H7677QV8Ln/hFI+awdTxbtspdUDa2s7YO42x+63bWpnwucG+2Pf8ANR+3Jp99BqB8HVznib+M2Yw1eG13HnxHQTCPtJQ2w3/9pbGUx75xZ2qh+r5CbVHLyWG2reeU6S8edTpGEG4rb1MFv7K2kdrW/RyBr5uA/wKCKsMk1bu2jV761Q2ZEyvQHgftbW0R28HQ22syPZj9GsqdkP0KhCVtG/NCFRle5iBdQRe2Rjcs3e61hU8f8hq6cRP4FKH1Vw5an3FpAXj8YVhugfARyfnw/u6QS6ew3znAwAVPB9gfTeDG2g8PxNuP3S95fwG3vtHo96Xdw4CeJLHAh1DQ3erE6gFHy8Coe/B8R8AQx0E9PnwBwEssAhE3tz4tAiEx2F+pe0jewZCTZEc5AB6GISHYdJKbqAHBntXKTt1PZxpTWdaBcKhM6IrZ55AeJxG9SIQlbZv3RqlrcwpBh4jRg8HaO2Bc80nvXpm8PkxTin4TBUI7W6icPjU95oU+IXND5PveXKHnTpTAjkRmbByoC+2zJqYlwba7eWViK+dAy4NEZVK3ohpWgTC453XlbaPTCfA+UZEXS1sB2ENs3H7gfunf1OoYZlWgfA4TaQKWV6ByHO9Ujrr1ti3StzJb5zDi9UZj/Vk0u0jI7N/NZURYykLNhMFwnWraWt79CxmqKtFrfvtt8MwjhvYvOq/vcCpdANQiQZ+qgIjArGbYKXtI/dm5SOKmNTaWCHXKdbqfoWA76THGpY7uWaYVoHQdJicbLXWBEKVU52vCaTN7xFlDgta2yy3T1b5Ubs1SKmr+vrWPO+lTStXmJkmEGkCrhwZnnOVk4Kqfc+Nb9l5JzN3WaHkjud7OXVY6QZABEJGEG4jTNvHTc3tkcOI6QsEnObNL1gmhUfZoKjONYMIBFDpDpC9wc4IhWleTIA6O2W7w8axeX+ODF6a3BzfVC4BKJTuTBMIT4u3ukNCqqAMXDmYiH2t0PzsVD+YQlDtv1erQHhd1PXLQ4Wvsikmz2cTimHhFCfjZfRF41BK82ImdDm4NLFG127amKq9+4mft644g0cQ5bRvFzugUGf3wTTGH4XB54Ch1utyp6110XzNjpTS/lRaVSkQKuO6E8IZH0aEpcm+mNoW6/i0tkY+CIM2MPD32UBpZjppsL/n/lIDVulVg0DoDveok9rJ/tgnysFkJgtEpe3DK9/MqWtz14fA5lIwljj0RPNOYftp4HV58RN/pgpEpe3ba53mXOukKbCUGF1Zt/+To3s4he31fX7DVa1AuDjDKnjyV3N6FeXsTVSDQGRENxz9IRP+ZcKIfOxA8Wt4M1kgKm0fftmp8BlfW89hGYj+09oD1W1NVq7yvRwkc8pHKNx1PBGrsxq5OXTHEdVMFYhK23cxdao67Dq/VMqzq59di0W+WxutagVClWb81GFQ9fqtTsuAAq449P5vCrsDsBPMNiTKH87eBPyPCfzeMPm2ZDL+sDVstQhES7jrfBBb3RoUcuynM6qcOwN1UG4bQA8zmxuaGl5abT3wNJMFopL2EWrrbiZOn0BELQwcnj2wqe4k8fKoA31XE/AlS2AvO8b8HdRsj36WGFZHlFUpEJWy74wgs3kimFqUiyUGkk3B7UsLHfjL1WGoPbKMmG6z1Om0THGq91e1QGREIhw51qCM7xLrPF5BVxwa52CvGmyc2N+/qs/Ll5npkeTvqNK6lagWgWht7XofGxmXBROLaF7XdXLMdHv6dU7kZrJAqLJUyj7yetw+R21eDmFqWTOdOtjfo07Nuz4OzgSrUiAqZd9TPbHtZ0qvUP1N9feqFwglcvkO4jJYXL1A6s5U+NoJpR+9PJQeHVtovzKwWgQi6z75bhAtshiWK0erAarDSKOpucsZ/EnL/4cZxgmDiVW91rAzXSAqZR/N4UibQfTzN2+52zPLx1dHRXPY9NcG7zqpv/+nal995tF5DvDiXVTFDYe7200y1wPYy0uPdiZPMVXKvjUHLX2d1coTGMCX54SpioI1fi0IhIOb3MyuJjePp2q+77tgfGYyULqzMWh+yu2u38xtVAYtnzRfDzh6nqwWgXAakXm83YpC4UiUKDM03j2ac5jum+kCkZ0PLrt9ZF083wdQq8UOC66jqbC63Xy6k/zZTtQk32OFOlCZUZTjTY6oyhFEpex7/NT23JUAn+G3o6V3+e3NJ1UphSGXVk0IRMaY2yMLwKQW0mx7jJ1dcTiuYYB/wwhcOJhYpXzQT3Jp4HSYyW30UU0C4TAKUIjVpS2XwMRded5Cm8/cKxAcu5IJauRgvVbScfRRBQLhvMZVYvvQH/50vqBJNfjjd4UbPwT4XZaG4TmD0wv7+1c/Zm8sNL1SlczTZJifsN+rnrljoWlnlIiVbzLdHdJVKxCVsm+HkddLzPTZsV2zY9qb/9ojhxuMlbbLyIZN5kVD/fEN5RCAQmnWjECM9/h0N6FhlJnPHuyPq7th8x5nYcms0KirQTdnrjhU1yOa+LB2GxroaRjm4oHNcXWrW95TTQKhMu9yBab6edwvEFO2rOYHAJqvuXJ0mIk/P9gXv1HHpBoEwr3jUTr7cJj6GMc2+YpXvOno7RAmfEhznazrupvrQTHGn2Dg12AayaQ/fpJ74ipP1fkBYZgZJ2frsmoFolL27TwCzRCcdJVs9vpUNYKcm/+t+Pc1V6jR9/N7LQmEW8PmetikpaP7n2DyPbbemEeOShzoNDc3H9UmELmPaNYuuhlA1CMIazDlKuCSgURM3S6ndSpXLQKREYky24d6x1Gdkf2CKXV/s/U2Mc/kU+qyqpHhOde4+fAprhzjrseDafqWxdNqVQtEJexbvcPh5kmvlaquKL2jKcgXuU13e02s2HA1JRC7P2ZTLfpNmmoqtADd3HzmXtQwdgUB6mCY28nGHOsUMW5Mpxouc/KBkwtYjQKh8p6911oJxLUA3uHFyMbvyOZzh/rij7uFryaBUOUop33kOKmRBAfo69mrKr3YoIr6KBl8sX2ayIm9D99AytvoDU1B/qpqoGx+kqpeIMpt3zn+2W/ogjdH2Fe+eT/8HC/fEAB1QdIl8/bnWCFHjB7TKzpYzQmEIhEKRy8gytxvO9k5FvNXBvrj17i54shMr4zQSWCcrfalA9jPks6Lb95D/Ucwbk2Ppdbadys51UK1CkSuPGpOOti0ozkA41wmMwzQPIuIquHyXxi03gTduiXR84dCrk5UutUmEBMsymAfdrvJ9jyPB1idJWnOei/Ore1keAPGRhjmLfPm4vEiGpGMvycD9K9gHAvg4OwUoRr5/YlBa9IG3fTg5lXPTojX5LsaakIgymnf9jrN+JBr3PkREE7XfEPDAG8jNvrTMG9J7dp7SLy5Fq1pElEICAEhIAQqQWBaRxCVKKC8QwgIASEgBIojIAJRHDeJJQSEgBCoeQIiEDVfxVJAISAEhEBxBEQgiuMmsYSAEBACNU9ABKLmq1gKKASEgBAojoAIRHHcJJYQEAJCoOYJiEDUfBVLAYWAEBACxREQgSiOm8QSAkJACNQ8ARGImq9iKaAQEAJCoDgCIhDFcZNYQkAICIGaJyACUfNVLAUUAkJACBRHQASiOG4SSwgIASFQ8wREIGq+iqWAQkAICIHiCIhAFMdNYgkBISAEap6ACETNV7EUUAgIASFQHAERiOK4SSwhIASEQM0TEIGo+SqWAgoBISAEiiMgAlEcN4klBISAEKh5AiIQNV/FUkAhIASEQHEERCCK4yaxhIAQEAI1T0AEouarWAooBISAECiOgAhEcdwkVo0TOKqj+8Agm5+jNK9MJuMP13hxpXhCQEtABEIMQwhYCDQ3n7mXMWv0M2D6AoCUCT52KBHfKpCEQD0SEIGox1qXMmsJtLaesi8bDb8CcHg2wA4RCDGWeiYgAlHPtS9ln0SgvX3J/ikO9gF4twiEGIcQAEQgxAqEQJaACISYghCYTEAEQixCCIhAiA0IAVmkFhsQAm4EZAQh9iEEZAQhNiAEtAREIMQwhIAIhNgAgPnHRGY3jtBJYJzNwBEA5mbBpAG8QMDjzNyTHkut3bLl3pedoC3oiBwSMOmXAA7JhWHClwb7Yv/hB3RLS2QPBLAaoOMn0mE6a7C/5ye6dEqV/+a2yHwDtAHA3l7yy8DJg4nYf7mFjUQigWefDxxJZC4D41gABwMIZuO8SMAjINwxK8Bre3vjf/Py3s7OyFtHxmgdCP+cDf/HIKXa+/rWPK/+VuI2xg2fJpinAvSe7PtSAD/FMFanDbrpwc2rntW8i5rbI4cZoH+FieNAOBBAAMBrAJ5k4hVNAdzjNZ9eyiJhqoeArEFUT12VJKeZhiZNV4BxPoA9PCSaIsaN6VTDZUNDP3lV18CE2qK3EPDxid8YmxobeJGfRkXTUD+TNvjoLZvjz1jfWer8l1ggqLUj8hE2cT1A/+iB7WsgvgIprBgYiA+7hXcSiFQq8AoM4xsg/qxFhHRJpQj4xsjwnGu2br15TAVoaYn8HQWNG5j5tKwoOGXhOTJ4aXJzfJOHMkmQGiIgAlFDlVmoKKpBQIB6ABxTKKz9dwaehEGLBzf3/N7+W0tb5DiA1gGYlf3N9/mBUHv0i8S4ZmL0ANw2mIidC4Bz/ytH/kslEGoERAHjSgZfXKCx1aDnITOYPm2od802p3rRCQTS6RM5GPgOMU7wWJ9pEH9loC/+7dbWJQeZRnAdAR/wGPc1BpYMJmIbPYaXYDVAQASiBirRSxHmzz+vYdYeO1cAfJ4t/DCYnwAZDwFQDUgjgDA4M2WkphosD/eONmHx1o3xndb/NjefPtdoSKuG4325/6veajIRu9xT3o6JzJ61C/cB1JoNPwrwooFE/Oe5+OXKf6iz+52U4s+o0RQzv4UIJwPYy5KP9YDxvxPlMOgHA5tX/be1XC55UxvJXybGQwxSI6EAg48k4L2a3v7jZKZOSCbX/FnHTCMQfwX4EeuUXHZaaCNALwH8dhA6wNjHlt4OkBEhNr/EyEx/jT+MP8HAr8E0ApgfAGh+fh45OdqEE+z176WOJUx1EhCBqM56853rUFt3J8G8zzKtlGbg6qYgX6ObClJrC0GTbprUiABpIjor2dezyp6BUFv0OgIu2v1/741Ja2ukhQ1SYpBrmB81xwLHDA3d/WIuvXLnX72nyEVqammPfAFMV08WVH7CZFx00AHYFI/H1brOxLNgwSn7GLMavknITPNZRfiBxiCfpqsPjUBYk9xBhItG3pjTk5s+Uj9m1nUMfBlEX7W9Z3T3aI9/YxLOGeqLP25NMOOLykzfAVCn5f95wu3bECVCVREQgaiq6io+s3kNONGKgb4e1aBPTOHYU1cLwbN2Ye3kRoLXNwb3jPT23r7LGl7TyL9KJh+XTMYHCuW6tS16OQNfz4Ujxr8n+2OqUZt4yp3/YgXCYYpqI9LcNTAQ/z+XslOoPXIOMd1gFW0iLEv2xX5sj+ciENtBfNJAX3yL7l26xf8JzsCGkSaOOI0I1OgKKfMBAg7NxWFg+WAipqbR5KkDAiIQdVDJRyxcuudb3xi51zoa8LIbJ9MLta0vMPBsEOnORGL101Z0WTGxThOpY/oFp5na2s7YO42x+wFqzqaXJyyVyH+RAkEt7V3LwXyhhYXrVJHN3CjUFlUjict2/18/8nIRiAsHErHr3cw4FO46k4jtovOcwemF/f2rH3ONmz8y1HYQ6uAzqssiikDUQbXrGhd22UJqRdLcuWSekQpsAEitTTzMoMcbyLyury/+kh2dfaEZKDzNFGqLHkPAzyxTHnnz3JXKv98pJs0W37TTCMDJzDRpjDLwMftisINAaHd62d+lG+WwZhOAdgQS7jofxD+Y+K2IHWp18InVbBFFIGq2aicVjELh6B1EOMsyVeC4K6lYJK2tXe9jg9WZiH2zabxuMh831B9POKWZv3YBXY+4Ivn3KxChcORkIlozMb9PeAop7hwYiP/VK8POzs7gSGrfuwBEcnF0U2x6gdBP99nf3dZ26rtSCPQSMmccMo/XDkKoLXoSAT8VgfBao7UVTgSiturTsTSt7V3dzJlpBuui6DAD6wi8sjG4Z699XcEvms7OZU0jqdfjAJ3o1tjlftPsftpOJh2dTPY8an93JfLvVyBaw9GrmHDpRFmBDX97S+Mpj/ziztf9sAu1Ry8lxlW74+Q3/DqB0AmJ7r2acmlHKbq4IhB+arL2wopA1F6dakuU3TmznoCQQ5FTDDxGjB4O0NoD55pP2nffeEGVP9/NQwE0HJ9I3LXDHj///IRzj7gS+fcjELqeP8B/AAx1n4TPhz8IYIFFIPKYaafZPJ5Y91Mue8ZFIHxWZY0FF4GosQp1K0443H2kSaY60HaAh2IrVwvryaTbR0Zm/8q6fdItrmZO3WmaKW+Bt9C0R7nz76chLbDt1ANe1yCT3GiokFNZh/FTLhGIqVZdbcUXgait+ixYGrW/PZA2v0eExT5O/Co3ELcGKXVVzveP04vGe9ZzVwJ8Ri6MbmtkS0vkHQhSLxjKb5B6PC24ljP/fhrSGSEQHvxCKbB+yiUCUfATqqsAIhB1Vd27C5tpaE3zYgKWWhaVC9Hw5JNHM3X0yGgwePTW3rsnTiTbF3i97qrJZbAc+ffTkIpAeHMyWMig5PeZTUAEYmbXTyVyR6HO7oNpjD8Kg88BQ82Huznx2w7DOM7ubsKaUc3i86RFUc0oYyondEuW/6kKRDFebL1WsHaKSUYQXvFJuCIJiEAUCa5Woym/Qk1NLx+apsBSYnSB8M68sno4he128lmzTpHnWqNYvlPJvx+B0B3eI8aPkv2xTxSbd7d4IhDloCppFiIgAlGIUH3/Ti3t0YVg3GGdhlKeXXks0GH1lWTH1ByOtBmU8a+05/hvPDHNZN+y6nW7ZhFV4Sv/fgRC5aU1HP0hE/5lIl9lPEQmAlFE7UuUKRMQgZgywpmfQKij6zBi80Qwtai7ghi5rY7OAAAFx0lEQVRINgW3L+3t7U15yX2oPbKMmG6zhC3ozlvjeiM3zfRL2/0RBX02VSr/fgWixX7KGHA8x+HCObebSx2U2wbQw8zmhqaGl1Zb60cEwoulSphSExCBKDXRGZjeVE/8alw1FBQIhcHuekONFNjkGybvXvLgjmOKJ5a95t+vQGhOjivPh1cOJmJfc3OCaDWRlpbIexDI3Mh3UO7/ul1fIhAz8MOqgyyJQNRBJWtcLfjyGZQnMMAzZjDV4XbBTWYKxu56g7EJxCvevG1NuQvPXS5U0NlcpfLvVyCy7rTvBtEiixm5ele1mptarB9NzV3O4E9a/j/MME4YTKzqnRw278pRJUYFrz9Vafgtl/W9clCuDhoIt+FtfRe/PkqvO5sAwFNDpnf5jXhjcPsZhaaoNK6mnyfCJmacmSXvaUqmUvnXNKQFfUk1hyPHGpS5TW9i55fb7XsWi6NQOBIlykzd7d41xrwOJk63X0EqI4j6+FZnWillBDHTaqRM+QmHu9tNMtdbLuVRb3qJmT47tmt2THdSurk9crjBWAnQkdYersm8aKg/vsFLVh1cTWejenM2pwJXIv/zO0//+1mp1C8BOiJXNrUziU2+yOnOaIdRgIq+A8yXwMRd9rjNzWfuFQiOXckENXIIehl9iEB4sTYJU2oCIhClJjpz01M7er4Lhrpe0/68BvDvACPrJI/frmaIAMzND0o3jw7PvnAKrjcmkizkWsP27rLnX+dsMJuH18av8VQ3iJqXJxPxO615U6Osxl0Ut92+lwsyDMLDYPrd+D+crvPEMBN/frAvfqPOhEQgZu6HVcs5E4Go5dq1lS3TyKSMGwBWp6f9PuqK0juagnyR7kpMp8QcpodUcE+uNazpViL/oXD0AiKscHJD4rQld3wqjm4GEPULFoByZXLJQCKmbpfT3vAnAlEEVYkyZQIiEFNGWF0JRCKRwLbn6YI3pzauBDDHY+7V/QaXzNufY8V5eLXdmzDeCt42mIid63W3Ty6f5c6/BxFyXH/J5k0JxLUA3uGFLQODTHyu/U5oe1wRCC80JUypCYhAlJpolaSnThw3Nu78CAinM5lhgOZZFkuHAd5GbPSnYd6S2rX3kNcpJV3xda43AF40kIirg3RFPeXMv2ro//oCLWbmzwH0fgBv251JZ/fluTAqb8GmHc0BGOdq2KrpvL8waL0JunVLoucPXkRSBKIoM5FIUyQgAjFFgBK9MAGNQJTMtUbht0sIISAEiiUgAlEsOYnnmYDdu2sZXWt4zpMEFAJCoDABEYjCjCTEFAhoFqkLutaYwuskqhAQAiUkIAJRQpiSVD6BfHcUfD/SONXpXIEwFAJCYOYQEIGYOXVRczlRi7Wz9ti5AuDzsoVLE9FZyb4e5WpDHiEgBGY4ARGIGV5B1ZK95s4l8wKjPJpM3ps5UNbSEnk3G/Qt69WmDPzWMMcWJpP3bq+Wckk+hUA9ExCBqOfaL2HZQ+GuJUS82iVJXw4CS5g1SUoICIEiCYhAFAlOok0mEGqLHkPAzyxeWm2I/LnoEL5CQAhMPwERiOmvg5rIgebOBWu5YqNNfN7WjfGdNVFYKYQQqBMCIhB1UtHlLmZn5+I5I6nGb4N4MRj7AEgBvJWIrn3Hfry2GBcd5c6zpC8EhIA7AREIsRAhIASEgBDQEhCBEMMQAkJACAgBEQixASEgBISAEPBOQEYQ3llJSCEgBIRAXREQgair6pbCCgEhIAS8ExCB8M5KQgoBISAE6oqACERdVbcUVggIASHgnYAIhHdWElIICAEhUFcERCDqqrqlsEJACAgB7wREILyzkpBCQAgIgboiIAJRV9UthRUCQkAIeCcgAuGdlYQUAkJACNQVARGIuqpuKawQEAJCwDsBEQjvrCSkEBACQqCuCIhA1FV1S2GFgBAQAt4JiEB4ZyUhhYAQEAJ1RUAEoq6qWworBISAEPBOQATCOysJKQSEgBCoKwL/D5yktjRWSi00AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-26"><g><path d="M 51 301 L 51 237.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 232.12 L 54.5 239.12 L 51 237.37 L 47.5 239.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-25"><g><rect x="1" y="301" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">uPEP driver removes OS_HINT</div></div></div></foreignObject><image x="2" y="330" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnXucW2Wd/z/fk2SmLAp0UcpVRPfnT/GGW+hkkkkdLWCLlEshmRYolJ+7XLSLq7uIIquooKvu6tIfF0WUq9BJpLSIIBRxnMltytZFRURXQAUpFFkExGlncs53fTLJzJnkOck5uUwymW/+6auT5/J93t8n53Oe2/chyEcICAEhIASEgIYACRUhIASEgBAQAjoCIhDSL4SAEBACQkBLQARCOoYQEAJCQAiIQEgfEAJCQAgIAfcEZAThnpWkFAJCQAjMKwIiEPPK3dJYISAEhIB7AiIQ7llJSiEgBITAvCIgAjGv3C2NFQJCQAi4JyAC4Z5VU1MG+2IrCbizjkrGADwD8A4w7rcId77uADyUSCRMN2X29EUXG6CtABa6SV9TGsKNmZH4Ol3eVre/pvbUkSkYiX2MGF+cKqICmzqqyWedzbrqtVXytxcBEYg28UcDHpC6lvwewIUHH8DxakLRAQJRV/tnuxvM5kN7NuuabY5SX3MJiEA0l6/r0pskEMX64+ML+Jzt9ydedDKoQwXCdftdO6pBCWfzoT2bdTUIjxTTJgREINrEEU0WCAB07fjY3uu3b792QtfkDheIqu2f7W4wmw/t2axrtjlKfc0lIALRXL6uS9cJBAMnZJPx77opZPHicwJdXS/sR34jwmx9GqA3l+QzibAuPRK/xa1AeKnfjY2V0rS6/fXa7zX/bD60Z7MurxwkfXsTEIFoE//U+4C0N6O3N7oHGbSBCX9n/zsDPzGsiWPT6Tt2ljZbN4KYSwJRb/tnuxvIQ3u2iUt9tRAQgaiFWhPyNFIglHmLj47u3b2LEgwcYzPXZOZV2VSibLdUJwlELe1vgksrFikCMdvEpb5aCIhA1EKtCXkaLRDKxN6+6HKAtgDoKprMwIZsMv7hTh5BFNvmpf1NcKkIxGxDlfoaTkAEouFIayuwGQKxuH/Na7pyuR8A9I4pqxgPdAf4xKGhxJ/slnbaCCI/ivDQ/tq8VnsuGUHUzk5yzh4BEYgKrMt+xA4PV6civORvhkD090dftXuCtoDw3mkbedSHwIpk8tYXOl0gvLS/ET+5o5auPsTP5mpYWAPKbxLYA0COgZ8DfBNPdF03Ovrtl1RdXgSiNG1xbain5/S9KDBxMQFnA9gPwBiYH2XCLdZ47sZt2+543k1doVC0lw36PoC9ChxesZiXj6YSSS9c+vpOeUMOviECDinkG2fg/dlk/P5K5eT9lDNWANbZAP0tgEWF9DkAvyWmEcvg6w7Zn7PVzvMU64lEVh2QY/8IgDfm/zb9230ltDQWYQufB9ADwA/gWQAPMOEqL3V4YTNX04pAzDuBwGN+ykVGRjbtmJ8CoW9/PT9g9aD2+ScuY8L5hQeOU3HPAfT/MsnB7wUjsQvdnqTWCQQZxlOw+Dt/EZ436Cvju7r9e0aHhm7YVU2M1HpV1y7cDVCoWBYxPp9OxT/phUswPHA6Edt2yXF6fAGOczp/ozZTwI/1YPoXAK+uXhc/SgY+lB5O/DD/yK/w0QmEOTER83UHPg7GRwD4NNldCVp1OzsnhQjEfBOIeTTFpB1BeBwFVvupB5cOvIUsjgN4W7W0he/HmPlsg+j1DPzrVJ5KYUhKwnIQ8GEGzgVwuEOdJhGdkR4Z3OhmBKHS9PbFPgTgyuny9CNNpzb2969bsDv3SgKg46dEBvhMOhm/VJcn2L/69ZQzNwKk3uK9fEwQvjr+530udjrToworEwjwEMNIEfjjDuKgNKeioHkxslPSikB0sED09kYPgp+GwPgbWzMT3f6dpw0NDanh+9SnE9cgvLS/lh90KLTqUDb8d2se1CbAjwJGVpXL4CMJeKttdPECGGkQ3l+LQIDwPBj7FgUHQAqgxwujiTCAx6wJ39Gjo7epqRNX01mh0MDb2OAfFKaqVDZPb9MFoVT5DyjY9YIFPmY0mdheyrYiN8aTTMgS6CUG70VAH4CDy/1T+eBnuUDgZQDdUxs2GL+BgR/ly7XwbhBeD2B9Jhm/qpa+0Kl5RCA6WCBCkYHVzPkh/9RwmgkXZUfiXyptdicKhJf2e/2Bq4OJ3X/14s3MPGDPS8DWnMHnbhtOPGH/+5IlJ+9rdAU+S5Nv/uXTGx5GENPl8ij7fauzQxt/U/ybGjWNmXjd6EjikeLfqk0xqXS6EYDTjjcdq97wwLkg/prNtntg4pRMJqGCSE59nLdfY7Pl4wtLuakj8D2RNW83OPdNgI60FWUy+GPZZOIrOns0AlFMNkbAxw86gK+yrWdQX98ph5mm8cdMJvE/XvtCJ6cXgehQgQiFTt7PMgL3EfBOWxNfIouXp9OJTKcLhNf2e/2R94SjxxiU30KsFqLzH2JcxxZfUPpQtJVNvZHoP4PpC2Ui4VEgGPgV/Mb77OLg1AY3AqHylgsq/3Tc71+2fei2P1Tik19L8OF2gFbY0mnfxstsAUwwX3LwgfhypQXowkL21QCvtdXxO5i8LJNJ/LrUPkeBYP5EJpVQUXQrrmF47Q+dml4EogMFIr+bxjJvAqh/RvOYt6gdNroHWLNjMVU7ld3IXVy1tN/LD7y/v9+/O7foRoBPK+ZjIGuNTxxf3DnkVJ4aeQT2+OPXCzuPppN5FAiqML9fWrdbgViyNHqYzyI1TXRYoQxX00yl01MMPOmH2Z9M3v643ZaenjWLjICpdjTZ1msqTxXZ8xdGYXcREJwSZQcODgLxhGnwMs0oxYv751VaEYjOEAha3L9m30DOPIIIZ4ARs7/ZFpr4AsNYkU1uHNU1eY4LRN3t9/Kr12znnLEoXK0szXw/4E0gPG1DdSsQahAU7It90y5ebqaZyhe46dZu/7Nnla5zaab8HEcATgzdjnIcBEK7/lbNX/P5exGINhaIBnZME8SfyIwk/s1paN2OAjGb7fdSVzAcPYGINhWniZzemJ3KnByB7HcrgOhUGm8C8YTlzy0dHdr0lBu7PQiE7vT9w/YF79L6NDvFnMRSJz7XZ5PxD3iZ7tGIs1YsdQLBhE9mR+Lq/IN8XBIQgeh8gciB+OKD98dXKs3xdrBAuGq/y99LPlkoHLucCRdP55k+c+C2nGAk9hFiTC+wehEIj1t1vQiE5vT5OMAnZpIJdZCu7FPWbwi/Ro77M5mEuqxq6tPXd9pCExP32Le1MuGj2ZH4V90yU+nC4RNebdECFeH43VP5mM7LpAa/bi9HKxAeoiN7samT04pAdK5AqKtGsxbxOfYdLU7NbcddTHX+8Dy130NdFAzHbspP5RU+anE6nYr/vYcyEAwPrCBi9aCb3NHkRSAAT1MlXgRCK4BEV2ZGBi/QvemXl61Pq3nzV1XdC9BvvXBjcICA9wE40Ma/7FCfRiA8Tct5samT04pAdIpAEJ5nzu+P32YYuNcc920t7oN304HnvEDU2X43jFQa3eE7p63DlcrUvHk739dd5/3VngWiPPSGdppJcwLbcVG7qSNUjbhqBMLxXIZb38/HdCIQbSwQ1Xb+NLLDtqNAzGb73bKcDwKhaaN2mqk8hpPz6WsRCLc9rL3SiUCIQOQJiEC4+2HOB4FQJMp2JmmmmUJ9sUsZ+PTUVE+FrbciEO76V7ulEoEQgRCB8PCrnC8CUbYVt2TxWbPo7HgI0+EFpKlTPjLF5KFTV0gqAiECIQLh8bfUG4ndAMZZU9kqLDA7Fd0TjvYZlA+xvWc+jZdFao/1eV2DmFxrKduKO+M2wjL7wdrQGsX2h0LRI2DQVgZeU/ibyUwrs6nBezzid5VcBMIVpqqJRCAqICqLL+Nxe2EoHPvGjHuhK+Rv5Eniql7XJJApJvfUgpHYxcS4fCqHx36h8pWFxm4zgVA2lh5KY2Dq3EKwL3YFAWpnU/FTMdCdOkVNAXOYgDcVM9SyuO/WSyIQbklVTicCUWkE0RdbSYD9/uYfGbxrZSp1p4oMWfHzjmPX7vmqP+++Y8ad0CIQjsxaLZDV/Gn/vmyLKrCDDVqWHR78hctyyg6NtdsIQrWjLPRGYZppvNu/u+Smwqrt1wUDRJVRh45lQWjuJWAhAb+0gF8YFl+fTicesqcXgXDZE6skE4HwJhCuT7Bqwh/bb7Wacd1n/o2yXIxUNLETssm42ivf9I+MINwj1oUR9/I2rIl51HZTTAUa1BsZ2ADm9YX/57exMvOuGdNjLs9llIfkwEsGG8enUhvVzW+uPqFI7Axm3GALdqjdWisC4Qpn1UQiEBUQlc+zouKpUltR6g1RhXa+ZEbxMoLoiBGEWjEoeXCqdrmNK6TvG204xaQa1dsXXQ7ko9Z2qf+rm+bUv7aT5K7jUIXDp7zVIt99Mw65AVt3L+Co061z9g7T07/qYCPnV2sW9suZfmyOTxxbGiRRBKLqs99VAhGICph0b4oq3n+VDk3BcDRGRNeXBcwTgegUgYDuYQfwUM6PNQ8OJZ5xaKhz32hTgSgNvcHAT4jZAtG78m10CK3h1P7eSOwrYPzjzO/p5m6/9cGhoUTZyLqYToUUJ4M2zFjTA0xmrM+m4rZ7KCZziEC4ev5XTSQCURlR+VzxZPrNOcO44MHhjU/as6twxP5A4NOOdxOLQHSMQEyOInR3O+jvTVbbY3fl6J8Ko0p/GYg2FQhlZ+l5hxm2VwjDoXO2wyhAjUn+k+Fbn01u3FYa0qMnEj2cmFSU2akw35Pa5PyyJgJR9dnvKoEIRBVMFQ745ADeDhg/UfOhmmslTWJczQR1R+9kfP05JhCuepDHRE5z9a1eg/HYjHxydbdD1x4vXgnwOZr8TzGQVFdngvjtYByhCcE+na2dBaI89EbRbrdTrjPw9EaiS8Ck1tb20wilChczDNBzIO62XQdakpQeh2GdlBlO/EznOxGIWnp0eR4RiOocqbcv9sG/7Ff/csUf+MxyTAIug5W7ng2/uoDljSIQk4A6SSBUexxuOqvWq14m4BoGVIC/hfnEbSwQusOBBW+6unFOB6N36ep3weLvFO7RrsZLIw50amZ44385ZRSB8IjUIbkIhDuOFFoafQ9buAqgN1fJ8hwz/0M2lYhHIqv2z7Ff7dAQgShA6zSBUM2KRqO+p3aQuqTpCgCvrdw/+FGyjDNNw7IM0Na5IBCqPZodSGqK5zPpZPxSdz+h8lQ9PafvRYGJz9GkUE5d3VqhvBwxrjFzgUtGR7/9UqV6RSBq9crMfCIQHjgWrotcTuDzAfpbAIsKb3/Pg/mXRHRNl483FxfbyjqpTDF13AjC3n3Um/a4SScx8/kg+r9g7GvrH9sYdM3E2D7f37792ol2juaq+0lotm1XDK3h4WcFFRW2ezetBONMBg4HsL9tG+uzf7mH+jEwvmVO5DZXu9K1WK8IhBcPOKcVgWgMRylFCAgBIdBxBEQgOs6l0iAhIASEQGMIiEA0hqOUIgSEgBDoOAIiEB3nUmmQEBACQqAxBEQgGsNRShECQkAIdBwBEYiOc6k0SAgIASHQGAIiEI3hKKUIASEgBDqOgAhEx7lUGiQEhIAQaAwBEYjGcJRShIAQEAIdR0AEouNcKg0SAkJACDSGgAhEYzhKKUJACAiBjiMgAtFxLpUGCQEhIAQaQ0AEojEcpRQhIASEQMcREIHoOJdKg4SAEBACjSEgAtEYjlKKEBACQqDjCIhAdJxLpUFCQAgIgcYQEIFoDEcpRQgIASHQcQREIDrOpdIgISAEhEBjCIhANIajlCIEhIAQ6DgCIhAd51JpkBAQAkKgMQREIBrDUUoRAkJACHQcARGIjnOpNEgICAEh0BgCIhCN4SilCAEhIAQ6joAIRMe5VBokBISAEGgMARGIxnCUUoSAEBACHUdABKLjXCoNEgJCQAg0hoAIRGM4SilCQAgIgY4jIALRcS6VBgkBISAEGkNABKIxHKUUISAEhEDHERCB6DiXSoOEgBAQAo0hIALRGI5NL6W/P/qq3RO0BYT3Fip7zE+5yMjIph09kejhBuOLAC0DsAcIz4N5G4OumRjb5/vbt1874WRgvtycsQKwzgbobwEsKqTNAfgtMY1YBl93yP6cTSQSppuGBvtiKwm4s5iWCRdlR+JfUv+PRqO+3z+D5cz4EIiWgLFvPp0Lm5Wt4yadxMznA/R2AK8GoGx6hoH7mPjfRkcSvwDAbuy0p1F2PbnDdySRtQ6MYwAcCsBvs+2XBBo0J3yDo6O3PVup/MVHR/fu2oW7AQoV0xHj8+lU/JNe7Orv7/fvzu13q8I2xRLYkE3GP1ypnIptAZ4l4Kcg3NTl481DQ4k/ebGpmFa1sXs3rQTjTAbeYes3eX8Q8AgzD5oTuc3btt3xfC11SJ7WExCBaL0PXFmgEwiY/G746CQAX84Lg+7DdF4mNfj10q96e6N7wI/1YPqXwoO2ih38KBn4UHo48cNqD2AngQiFBo5iw7oJoDdXqexhNiiWHR5UD/u8fPT2Dbwf4G8BeG2FvCYDNy3w8wVuH3z5h+nTdAoRPg/gjS6ckSOi2yeILnxweOOTTulDfbFLGfj09Pc86kNgRTJ56wsu6sgn6es75Q05+IYIOKSQ5yWyeHk6ncg4lEGhpdH3sIWrXDBWRbwM4s8hhyszmcSYG7vy/dCkz4FxrmOfm1lQjhjXmLnAJaOj337JTR2Spn0IiEC0jy8qWqITCAI2MPCvFX6oT5gGL9s2nHjCXniwf/XrKWduBKjHY/NNEL46/ud9Lq40KtEJBCz6NRHf4E6M8lY9QlbuuHR60++CfdGPEOiLU2/01Y3+Dkw+s9pDTzHdlaMNBJwJwFe92BkpniaD16aHEw/o8oVCA29jg38AYL/C969YzMtHU4mk23p6wwPngvhrNpFJjy/AcdvvT7xYWoYSfPIZlzFYjS48toVHLb956ujQpqcq2dbbG/1r+GgQwNFu21BMx8CvYNBJNtH3WoSkbwEBEYgWQK+lSo1AjAPYbXvg7mTgAQK9AljB/Bsk0TWZkcEL7G/8odCqQ9nw3w3g8BI7TDCeZEKWQC8xeC8C+gAcXG4vXTs+tvd6J5EoFQgi3MmMoO1haQL8KGBkVdkMPpKAt5YKADGus8D3E9H1NhFUb7opgB4v2Kim3IoP4aKpJhGdkR4Z3OjEOj9FsosSjPx0UunnZQKyDHoCxN0AwmAcpnnwvsxM67KpwU26BzZ8uB2gFbaHZNXpoWLa/v51C3bnXkkAdPxUfttUnb2+xYvPCXTt8eKVAJ9T1hLC88T4cb4tgM+JtU2Qf6tjVqGOMTA/CjJ+nJ/uq8iLh8YX4CSdwNXym5A8zScgAtF8xg2pQSMQ0w9D0BVsWpfY35iPWrr6EBjmxINDiWeKCR0eiiYzNls+vrB0pKGmdnoia95ucO6bAB1pa4jJ4I9lk4mv6BpXKhAlaTabBn+0tK6eyJp3EJsJAt5krweABSAAYIyAz7DJG+ztVFNETz2NdSD6/zNHUnwPTJyiG0Wouf3x3KINDLWWMePzNDP9wyEHWltK11sUT59pfZUIakrP/oa+E4axPDO88b/KRKJ8BPDTcb9/2fah2/5QrVMElw68haz8COSAQtqdZNGydHrw4ZK81BuJ/jOYvjDTLn7UYlzwugPxQGlbliw5eV+jK/BZQn6ayN6We7v9fKpuei7Yt7qfYKkXi+JUpprO+8ICP39Rl37J0uhhfou+XiLAVYW7Ghf5fnYJiEDMLu+aa3MSCAaunxjb59xKUz7FSoOR2Mcov5g99THBfMnBB+LLlRagCwvZVwO81pb3dzB5WSaT+HVpo5wFovLIo7cvuhygLQC6SsocI8La9Ej8dgeA1BuOXgTKPySLnx1s0DLdlEZPOHqMQfl6bOs2rqZZqLcv9sGyNR/mLbCwplSM1EPSZ5F6yKvRh/qMM/D+bDJ+f7WO0NsX+xCAK6fT6QWvpy+62ABtBbDQVub9MHkgk0n8T4V6KBiJnkVMV9sf+kRYlx6J36Lx6RUEqNHo5IfoytLRaWmewmL9ZoD6be24q9u/Z3Ro6IZd1RjI960nIALReh+4ssBBIKotWk6V3dOzZpERMNWD6W3TFVZ+YNsNK7x13kXITxVNPiOAz6ST8Us1D5MZu5gK3z9sTfiOrrQDqK/vtIUmJu4pWxsh/EdmJP7RSovjmjdu7cN4cmfQohsBPs1mt6PYaZxDwb6Yevu+xPbdmMV84mgqoR7UUx9dXexiF1J+A0Hp9BTTGdnU4LdL7KHeyMAGMK+3/b24dqOdKirNX94WLlvneMexa/d81Z9332EfDTBwQjYZ/261zlsq+gw86YfZn0ze/ni1vPJ96wmIQLTeB64s0AuE+50xocjAamZWb4bFKQUvD8W8jeVlsHbKRDeCcBKT0sb3RmI3gHGW7e8vWOBjRpOJ7ZVALe5f85quXO4HAKktl/mP7iGmERKVdH0mGb/KlSMA6MRWjeSyyfgHSkUsGI6eQERqjaLAXc/MXrdmgVu72UAzQjGdRgBObXMzytH1PdYLVlk1Pf2rDjZyvq0AqbWchxj0SICsK0ZGEs+55S3pWkdABKJ17D3VrP+R4pZsKq524FTb96/eer9JwNm2h6f2gVbJKM22S+3OHI1AuJ5aCYVjlzPh4mk73Imglo/mLTcYHjidKC+URRGp6Y1WY6dWLDViMg7wiZlk4vtOrMunAunWbv+zZw0NDamzKVOfMvEh/Bo57s9kEr9327l0Zy00ZzYoGI7dRIQzbNxkV5JbyHM4nQjEHHGe9gHosKultEm6qRsmfDQ7Ev+ql+aHwye82qIFalrh3VP5NOcsyra5ephWKH84sqs5a7cCEQrHvsGEv7MJkKvySzkF+2JHE/A923qJ40gn2BdzPX+vaYfJzKuyqcTUwcOiLaUiRcDWP/1V98k/ve/mV7z4NRiJXUyMyysx0YxAVfIxBrYQ+MZu/55Dsq7ghfrcSCsCMTf8hHqG+Zo3f9XqewFyM089rQXgAAHvA3Bg8Y+6E8KaEcTUqe9quMsEgnBjZiS+rlo+NwKh3TrqYk1AV7eOqdO8fE842mcQqRHDnoWyHNdjNIvO2rS6N3+A/xsw1EFGjx8+AsASm0CUHerTrUGVVJJj4OfEGGQfbT5kkfUrtyfvPRoryWeRgAjELMKupyo3D0Cn8h12utRjznRezQO8XCDcTROpQpsrEGXhSmAPA+IFSCSy6oAc+0fsp6+dBEITesNxmqn0BLbTonaFbc9emuGUVivo4fDqIy2y1O6vqReECpW9DOAusuiG3bv3/qGbXXaNMFzKaCwBEYjG8mxaaXNaIBgPdAf4RDfhL+a0QFSY8itvV/k2UY2QOJ6+boVAqM5d4TxIpb6vDjd+y0+5y1XssKb9SKTghhMQgWg40uYUKAJRmasbPvWs45TWrh1BVNjZo9mZVDZ1FApFe9nIT0XtNVlf+ZbToh2tEohi/XmhsKwPE6DOxpSeZHdyVsXwJM355Uip9RAQgaiH3izmdfMAdDJHM8Xkautorc0rm2KaLyOICmcDNGcbyhafNYvO2nMmyi+NFLta/VzIR8H+1YfSBL8PBp8FhlrP0AeOnMzgePK8TjskexMIiEA0AWoziqxHIEKh6BEwaCsDrynYZjLTymxq8J5m2Nq+AqGJb1TjIrXmPEVVppottlNbjTU7zZxCa+Rdpju8pmJXpVPxv2+GT92WqWI2LVjw/JtM8q0lxgAIry/L6+IUttv6JF1zCYhANJdvw0qvRyDUXnwKmMP2OEe1Ls66aVC7CoSyvXTLac1bQ8MDK4hYbfnNH4Aj4A+w+Jh0OvGQE6OyQ2m2cwvl22adY0kVyy/bsuthpObGjw1IQ72R2LFg3GSfhlKRXXnCt7TavRoNqF+KqJOACESdAGcrez0CodveCVR/AJW2rSA09xKwkIBfWsAvDIuvL30otrVAzPJBOTtDTeiNqWmmsrMSDvd42MsrDweOiqMOh75aDNehLiV6CqCHmK2tCwLP3V48mJcfLbF1PJh6ASxmIL3Av3Nt6cE9p99CMBJdR5yPyFv8NHWKc7Z+k/OhHhGIOeLlegRCNbE8+BteMtg4PpXaqLZquvqEIrEzmKHudCiG69CekG5rgSiPkup5q6taoPVb1r0A3lIE5xRqoxSsJjbRBj/8l9pjULmNV6RZ+FZH6i/LJuOfcnG6Pm9ab2/0b+DLBxR8na0tM8KS13tie7bXwFx1ZknkioAIhCtMrU9Ur0CEw6e81SLffTMOuQFbdy/gqJv4/JMxdfxqzcIW7A8/Nscnji29UrKdBaJJwfqqhs8o9qDy0Bs8yozPE5G6WrRwkE4fWqNMbFRQPwO3gehE23c7QbwyM5LYVq3XOoQ9H2MYx2WTG4eK+TWHAj3FfCqPR4UnLH9uabULiqrZL983n4AIRPMZN6SGegUif21nJPYVMP5xpkF0c7ff+mClMwr528oM2jAzRAXUPRLrs6m47cazyZLbWSCUfbpw3wz8xLByJ6bTmyqdLteH+wYc71HQOb9kOumPRBhmxgmFtI6hNXRlObTFTZwkCoajsZLLmABN6HIHUXUlRPqQ30h0+3ee5naKqiE/ICmkJgIiEDVhm/1MDRAIOIwC1GzEfzJ867PJjeqtc0bgv55I9HBiUoH+psJ8q9arxV2n0Ue7C0SFC4NUhNEPH3wAx0vDRKhzDyb8X2bG6rILg1y+sRd7Tfl5hxn9qWpYdHvqCm15AcwXwsKtpfdU9PScvpfPP3EZE9SFSX43o49weHXEIuuu6TMa+VzPMdNHJnbtHdedlFZ9x2DcWHLZlDY0+uz/oqRGNwREINxQaoM0jRAI1YzeSHQJmNTum/LDTYTnwRgG6Ln81ZEW3q3dpgh6HIZ1UmY48TOHN+SZ90F42F3TzJPUdlsbdOVotYuMtD1Hc2J6Ol0NW0CrtGUMhIfAVPCV9U6AFmvu9x5j4n/KjiSucejuDiPQfOqXAf4ZYBRuu+PXqujwABaVl+X+DpI2+NnNexNEIOZIF2iUQORFYunqd8Hi7wD8Bu/NV+JAp+qu2CyW1e4jiKKdkw+w1OXCAAARLElEQVRqUg/EmOa+6Wpo1NvzWbWeJdHc7qfqcx0WvdS4QluuLbSlmu2l36tQGBdmknF1u5xj6HiHmwXd1qWuKL1pgZ8vcBNyxW2hkq65BEQgmsu3YaU3UiCUUWqagQITnyNAHayqdPK12IYcMa4xc4FLRke//VKlhs0VgVBtyN9pvYOUQHwZwEEuHJYj0Dd2+41Publb2qk83Q6kSqE1XNhVS1uUGmSZ+AOjI4lHPNRx3l9GIJcB2MdNHgDqfooLdVN3LvNLshYREIFoEXiv1TZaIOxv0d27aSUYZzJwOID9bW/Tz/7lHurHwPiWOZHbXLpbyakNc0kgpjgsPifQ3f3ie0BYw8QRAIfapmGeJeCnzDzohUMlH+vOpri9da9a31Gnmf0LXujxwfgAkxUG6GDbS4CaDvodg+6yQN/alhz8b7dbYu31qjqmeZXVMQbwU8RGyoT1zdyuhaMSzbWa19rzexGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu0AMEAJCQAi0JwERiPb0i1glBISAEGg5ARGIlrtADBACQkAItCcBEYj29ItYJQSEgBBoOQERiJa7QAwQAkJACLQnARGI9vSLWCUEhIAQaDkBEYiWu6CyAdFo1Pfks8abyLRWg3A0QG8EsMiW62UGdhhMaYt408QCHt5+f+LFZjTrqKWrD/GbfBwRn8LA4QD2B+Ar1JUD8DQBv2TirUzGXYcssn6VSCTMZtjSiDJ7+qKLDdBWAAuL5TFwQjYZ/2495ff3R1+1e4K2gPDeqXIYD3QH+MShocSfnMrujcRuAOMst3mCfbGVBNw5szy6dnxs7/Xbt187UU8bymwh3JgZia+zlxmJrDogx/4RAKpPNvvzggU+ZjSZ2N7siqT8aQIiEG3aG3p6Tt/L6Bo/F0wfBnCQBzNzTLiP4fvE6MhtPwPAHvLqklJoaSzCFv87QEd6LOv3AD4Fk2/LZBJjHvM2PXlnCgTGmHl1NpUoEQ5vOEUgvPHq1NQiEG3mWTVieGoHxf7yVnsFgNfWYZ7JjM1mgNc/OJR4ppZyenujf01+42pmPtU2UqilqIct8p0+OnLbT2vJ3Kw8HSoQCtfDlj+3YnRo01O1shOBqJVcZ+UTgWgjf6qpiV052kDAmRUeyGrK5hmAdk+azkpEXl2hGU+TwWvTw4kHvDT1qP7o/v4cbgOo30u+Cml3wjBOzgxvTDeovLqL6WCBAIGu6fI/e8HQ0JCa+vP8EYHwjKwjM4hAtIlbFx8d3Tuwm24lxnEak/7IwDcNi6456CDrN6Xz+kpYxk1jmcX8cQKO0ojLy8y0Lpsa3OSmuYsXnxPo2uPFKwE+pyR9fvoKjK+RyT8eH1+4szjXrUY+zzxj7jfB/iNBOI8YxwLw2/Mz8Cv4jfdlhzb+xo0dzU7TyQIBYMxiPnE0lVBrLJ4/bgTCa6Fe11i8li/pG09ABKLxTD2XWOGBPAbmz3YHcGWlxU1bhdS7dPURsKybALytxJCdIF6ZGUlsq2ZgTzh6jEG0BcAexbTq4c7ki7qdJgr2r3495cyNAPXY6yPGdV2BnefX+mZbzXYv33e4QKjFp58Y1sSx6fQdO71wUWlFILwS68z0IhCt9yv1RqL/DKYvzHzzp8fJwup0evBBrybmd9HkjKsBXluS9xGycsel05t+61Rmf3+/f3du0Y0An2ZL8zuYvCyTSfzaiy2RSPS1OcZ3S0RiJ1m0LJ0efNhLWc1I2+kCkWdG+I/MSPyjXjcriEA0o8fNvTJFIFrss96lq98Fy/o+gP3sb+sw6KTs8OAvajXPaVRSbW66p2fNIgqYwwS8yVb3+kwyflUttmhHI4SLsiPxL9VSXiPzdKBAqJ1iu+zbdmudahKBaGRPm7tliUC00HcOb+sN2aaomhUKnbyfZQTuI+Cdtma+RBYvT6cTGV3TNQ/NVyzm5aOpRLIWVGptpWsX7gYoNCWAjFuyqbhaiK93C24tJk3l6UCBeIyADQz8a8n0YNYanzh+27Y7nncLTATCLanOTicC0UL/hkIDb2ODf2AfPTR6jj4YHlhFxLcB6JpqKtGVmZHBC3QPaM1D849k0bG1THUV6wv2xa4g4O8Lu6+YGMNdgT3OHxq6Qb3ttuzTkQJh5ZaxEbi4dIMBA5dlk/FPuRVlEYiWdcu2qlgEooXuCPXFLmXg027f7msxVXuqF3jCNHjZtuHEE6VlhkLRI2DQVgZe04gpplpsnq08nSgQfspFJnxd3chZ95ZME75ksHF8KrVRnXyu+hGBqIpoXiQQgWiRm3VTLwCnxxfguEaHyugND5wL4q/ZmmoS0RnpkcGNpc3v6zttoYmJe0oWlqsubrcIY13VdqpAjIxs2hEMRweISO1mmx45gofGF+AkN/1LBKKurtUxmUUgWuTK4NKBt5CVn146oGgCAxuyybgKrdHQj9e6NCMbNTPxqMW44HUH4oF2jq/kBVwnC4TapBDY449fJ+DsGUyYP5FJJb5YbapJBMJLT+rctCIQLfJtMDywgohVULhisDsw0xnZ1OC3G22SdlRQIXicOsOgmaIomvV7Bm6BQTe2ezC+ahw7WSBU2x38+ALDWJFNbhytxEcEolrvmR/fi0C0yM/BSOwjxPiKrfq6dgtVaQYFw7GbiHCGLd1jar5aTUfo8hYWt2+oEsZjDISHAL7DsKy7d+3a91f1RhGdTXfoBKJp9c9ONNcyn4YisTOYofw49SICF1NNIhBN6wlzqmARiBa5KxSOXc6Ei23VNzWccWEnkdq5VPzsYIOWVTprEVoafS9bdDOAA11iyjHwc4PxPctHt7T7CGM+CISaaur+qxdvZuYBmw9NEH8iM5L4N6epJhEIlz2+w5OJQLTIwWU/QKDiG329ZgYjsY8RQ809Fz+uBEmFHafAxOcK21SnQm+4tCcfQypAuX93Gqm4LKcpyeaDQChwvb3Rv4GP1HrX62wgK4ZeEYFoSpebc4WKQLTIZXNFIIp4liw5eV9foOtUkPUPAL3ZY/hvNbK4eoGfP+kyptSseGW+CISCGQzHziPClSV+u7fbz6fqfCICMStdsO0rEYFokYvmmkDYMU3GekIfsTHAxEcDONgNRhU8jkw+1WtMJzdl15Km0xep7Ux6e6N7wMBtIDrRPtXE4I9lkwn7Wlj+axGIWnpU5+URgWiRT0Ph2DeY8He26p+w/Lml9VzyUqkppWseBPwBFh+TTiceqhdB4R6LIBFOAeP4KoLRNmcq5pNAKB+Hw6e81SLffSVrSuqejuWZ4Y3/NUNQSq8/1Vw56rXfSLhvr8Ran14EokU+qHVNoEZzPe9iqrGefDYV8M/oyp3pdF0qA9dPjO1zbqt3PM03gchPNfVFP0ogFSjRtqsJZVNNMoKo5xfQOXlFIFrkS82F8yYzrcymBu9ptEnh8AmvtmiBOnPxblvZPzJ418pU6s6XG11fsbz8tIYfF4LpX0ouD6oYMLBZ9pSWOx8FYnJ6kL4D4H0zppoY67Op+NRpexGI2eqF7V2PCESL/KM73UyMz6dT8U822iSHuq5Lp+IqgF6zP9r7LprVVi+NmY8CofjoQswDeNpg89hU6vaf59PIFJOXrtSxaUUgWuRafRC95sRiCoYHTifiW+xN1Z3anjzdbV0CGPsDvCeARxsxynCIO3VXt3/PaCsjus5XgXCcamLeAgtrMpnEmAhEix4MbVatCEQLHaKL5uol4qYb0/PTPD7cDtAKW3ptNNfSoH4MPOmH2Z9M3v64m7oqpSk7qOfiZHG9dVbLP58FQol29y5KMHCMfaqJCOvSI/FbRCCq9Z758b0IRAv9rL0Pgmhw95/3XtuoBVzdjW5Od04E+2JHE/A9WwRQk5lXZVOJO+vFFIzELibG5VPliECUIfW6y0ezjuXpsGVvJLoETGptauo2QwD562Xhp0vAOGvKSNnFVO9PYE7mF4ForduoNzKwAczrbWaMM/OZ2VRisF7T1OE2oytwFwFBW1mOC8RLlkYP81n5E7eHTT/Ip6cdarVn8ua8/W4FEC2WwW1wq9x8HkEU/EC94ehFoPx96NNaQDTIzBaANSIQtfb6zsgnAtFiPzruTSdemRlJbKvVPMc7qRnXdQV2nj80NJQrLdvhCtS6BUvbRqbzMqnBr9favkbkE4EACutDmwHqt081AVACERCBaERPm7tliEC0ge8c9qbvZOD0bDJ+v1cTJ7cyGlcDvLYkb9VDauHw6ohF1l0A9rLlfYGZ12VTCTUd4ekeaYe5bscb7by2tZ70IhCT9IJ9q3sIltpevdCRp0wx1dPV5mxeEYg2cJ3T2z6AMQI+wyZvUDtL3JjaE4kebjBuBOjIkvRjRFibHonfXqkcNYoYzy3awODzS9LlCPQNH018zm3gPSdbvN6P7KbdtaQRgZgeH+immmYwFYGopYvN+TwiEG3iwsmhPl0LIKYx6TkwvkpM8YMOsn5TeqNbMTYSQP8IQMVGsp+SVcWNMfPZ2VQi7mYEEAqdvB/7AneBcZTGlhyIHyDQzWTx8K5dC3cUF9T7+9ctGB9/aRGT72gQzgPoXaW2ELB19wKOurn2stmuEYGYJuywXjWdQASi2d2xLcsXgWgjt6gtqeQzLmOwuna09CFftNQE8AxAu/N/IN4bjH0rNONpMnhtejjxgJemhkKrDrUM/xYC3uklX6W0KlifYeVOTKc3/bZRZdZTjgjETHoO04uTiUQg6ulqczavCET7uY5CS6PvYQtXFcJq12qhyYzNps/4yIPDG5+spRB1F4TRNbEBnL+Jzkmw3BQ9aUuA1z84lHjGTYbZSCMCUUaZgn2xzxJwSfk3uDEzEl9Xj1+8buOtpy7J2xgCIhCN4djwUiYPuJHaZvhZAAd5qCDHjC0+8CdTqcQvPeRzSko9kTVvJ5hfIMaxJTGVqhVvMvCgYeCi9HB8xM30VrUCG/m9CEQ5TTW9aBmB+8pGjjKCaGTXmzNliUC0v6toSd/A//GRdSIYJwF0KID9bW/0LwP8O4CSAG/u9u851KzwFWqdJLCLltJkoDd1tkLdA7HIhjA//UXAI8x0e85Hd9c6epkNt4hA6CnrDlfKFNNs9Mj2q0MEov18IhYJASEgBNqCgAhEW7hBjBACQkAItB8BEYj284lYJASEgBBoCwIiEG3hBjFCCAgBIdB+BEQg2s8nYpEQEAJCoC0IiEC0hRvECCEgBIRA+xEQgWg/n8wrizR3GjS1/Uy4KDsS/1JTK5HChUCHEBCB6BBHztVmiEDMVc+J3fOBgAjEfPByG7dRBKKNnSOmzXsCIhDzvgsIACEgBISAnoAIhPQMISAEhIAQ0BIQgZCOIQSEgBAQAiIQ0geEgBAQAkLAPQEZQbhnJSmFgBAQAvOKgAjEvHK3NFYICAEh4J6ACIR7VpJSCAgBITCvCIhAzCt3S2OFgBAQAu4JiEC4ZyUphYAQEALzioAIxLxytzRWCAgBIeCegAiEe1aSUggIASEwrwiIQMwrd0tjhYAQEALuCYhAuGclKYWAEBAC84qACMS8crc0VggIASHgnoAIhHtWklIICAEhMK8I/C+wpca3TmA5zAAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-37"><g><path d="M 151 651 L 51 651 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-40"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 662px; margin-left: 142px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="133" y="656" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-60"><g><path d="M 201 701 L 201 793.2 L 313.83 793.2" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 319.08 793.2 L 312.08 796.7 L 313.83 793.2 L 312.08 789.7 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-61"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 712px; margin-left: 211px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="204.5" y="706" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-35"><g><path d="M 201 601 L 251 651 L 201 701 L 151 651 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 152px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Any GPIO<br />w/ WAKESTS<br />active</div></div></div></foreignObject><image x="152" y="630" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXl8ZFWV//e8qiQNCN38YIBmUXBhRkR+agtJKkmboZt9aeimkm4WgWEUkBYEF2QZxRFwmx8IP1REEJSlOylpFpEdjJ2qVAXsGX4OgwwfBBF6sBuUTUgnqXrnx61UJS/v3VfvvVpS23n/KF13Ofd7bu733nPOPZcgnyAgCAgCgoAgoEGABBVBQBAQBAQBQUCHgBCEzAtBQBAQBAQBLQJCEDIxBAFBQBAQBIQgZA4IAoKAICAI+EdAThD+sZKSgoAgIAg0FQJCEE2lbhmsICAICAL+ERCC8I+VlBQEBAFBoKkQEIJoKnXLYAUBQUAQ8I+AEIR/rOak5AGLo3uFTHoEwF6WDt8kkw8dGYkl50SIJu9k0dLo/JYttJiAIwDuBtEuYOxggWUMwJ8BPAGmB9Ihuvfx9WtfLAa2np7lC9McHgbwgWLqA8hMycKvAxRn4FfzwvzroaHY3/y2194dXWSAHgKwfb4OA0en4oO/9NuGrpzCsXWLcTAzH03E7QDtDmCrXNm83C+AcCeTcc8eO5vPxGIx9e/y1QgCQhA1ooi8GJ3dfWcBuMYhFtE1yeGBswFwjYncEOJEo9HQxk34FJv0rwDaAYSDDYyfZsYlk1u2X7dhw3WTfuuWgSB0XY0x8BOebPmX0dFb3/SSpcwEQR3dKw8AmV8jxsEBcdxIwHdbw/zTIATnNT75vXgEhCCKx67sNTs7o1shhNsBOkzT+PMZg5c8tj72fNk7bvIG23ui+xDTDQR0lA4FP02m8emRkYHH/bRVIYLIdU3PwaDjkuvX/kchWcpFEPsvXrlHKGNeSYRjAIT8jN+lzCvM/PlUIjYoG6ISUCxDVSGIMoBYriYikWgnG3Q/gO1ybU4AaLW0vzoZH/xBufpr9nayp4aX6SwGvm0xfWh35FNmHJo6vRHPt5mc7HXeAui0ZHzgF14LXGUJIivWU2SmDx8ZWfeCm77LQRCRxdED2aSbAexaYF4pXF6Z+p3bAOxSgEgyDPx8XpjPltNE9f5ShSCqh72j50hX32VMuHCaHAhXg6HMSjmS4JGJeTh8w8OxN2pI7LoUZdGiz7a0bv365WCcq1mk0kx4kJj/b1sYcd0C1dt7yrzJyS37m2SuBrBcY0oZI8JJI8ODtxcCSEsQhJ8lhwdP8QksLepdtUOryXvDNL8M4Cj7eIhoYPyd+Se5mb5KJYiOrv7lRHwTgG1tMqtF/nGD6NutIfMRO45Zgt5o7GkafCYBpwFYYB8zE+6dbOPjZc77nA1lLiYEUWZAi22uvX3VzkZL5mEA+2b3V8CLITZOMA3zp2B8MNeuOKuLBXh2PersiX4JTN+yLaYZZtxphvjLQUx5yrQSNs2r33VaK9OK9XvSDKcPGx1a95Kb2GUgCGvT1NEV7SOiG20nojGTedloIqYc0Y6vFILo7IkeACblzN5pdsP8W5PCp40Or/lPr1OUqtfbG33PljR9kYCLnWRL102MzV8dxLdTnmkirQhB1Mgc6OyOHgrQXdOnBcajZrrlWGqZ/D4Bp06LKc7qkjXW1bWyxyTzHospT7U5RsBXd1vIPygmkiZ7ItnqjWsA/qxVQAYuTcUHv+a2SJaZIFTXWvJj4MZUfFDt0h1BDsUSRCSy/H1shO8FsI9lzBkCXcUZ8+JkMqaivQJ9Hd19Swm41UY4GRBfkByO/ZsfsgnUoRQuiIAQRA1MkN7e3vB4euefAXx8XhxiXD6SGLyooyt6NBGts+x0xVldgs7UTnV8ku4C4UDrolaOBUiFdbZtoRgDB1naLqivChAE7KfRKVn4dxPh8JINQ2tetcNXDEGoOTuR3vlqBp85mxxw6fjYgstK2e27nEo2wzAO9XK4lzA1pKoGASGIGpgW3d0r3p9GaIiAPXLiTLxruz0iFR98uLMzuhvCNGQxM6ki4qwuUm+Rnv6VzHyL1bSkdteTYwtOL2VRy4vT3hU9yKDsSTAf7w9mOjGVGFC7YsdXCYJQnXR0911FyPqv8t9rJvig0XhsQzkIQhNQoZp9oC3Mx5XDqdzR1XcGUTbc2xINRbe1hTedPDQ0lC5S/VItIAJCEAEBq0Rx592HWbs96uzpvxrMyhma+4I7qzU75z+EKd0zPLzuZdXoAQccu4PRGj6ZQOoUs7fF4bgJ4H9n0I8mxxbcX2gRjXT3XcLA16fFJDyLNPcmk7GNQXBzmNvAoyG0HBaP3/ZakHbsZV3CiP/H4MzBicTt/1VK2/m6uj4YWLvHQj5RZ7qqGEH09H2FGN+xjOltk/nQ0UQsXgaC0MxJuBJQMbi6nPQ2k0lLRkYGniymTakTHAEhiOCYlbWG9g/B5mdwLpiYPmH4FcaNIN55Z8dXW+e9fg4Iyk5uj0KxN/8KQP+UjA/8yqctO8PMy1OJ2N1+5VQ29I7uPnUnYdrvQsA3RuKDlwRoQ1tUu+utgE8n0tP3GWY+B0y3cojuLHRDeA4JQjkftLejg5qY9Lf9y7+7z0VHrbGGepdrLpQ6l5qlvhBElTWtu/uQNy/lRVvUu2rH1nT6EYD2m9mdB7tZrSMIZDJHIhT6xrux8n0BYEgz+PxUPHalnSRcds+uzlFdnxqTWtl2prYwYtW96646AB4lFZ1DgiibiUljpssQ0YkjwwNrSwLDVlk77xH89FxOmZqtLSGIKmvcaSvWm1M0i1sgZ7WGIDYqx6Xt1vYYKJtfSIUmhhj8SQI+oonxd11sNKlCAsnpXHz4PmSwopiIGKtq9SaL8piuSplCFSIIxylMhU2HkemNx29/rlQTk8a/8TIbtCS1fuD3pWChqzuXfZVb9kZoTwiiilrURZu4HaFdnIK+ndUuNt386N8i4LLWMP/A7mBUMf4tpnmDLTIHcDHNdCzu/zCZrJINLsw17nt3qYvmKpdDvr13+e5GOrzemgSxUOjnXE2LShCE1gTEeLSthZfpHMhBTEz7HXzSNu95Z/yO2fOhckTb0dV/AlE2qGD6K0ciwbnSb733IwRRRQ1qQlhdd+ZTmTFxL0ARy5+K75vVBQhChQ8em1y/dsQNCm34posDWr/I+7NP2xe2QrveoGqLRPr3Z4MftN7WZcL5qeHB7wZtq5zlK0AQ1NnTdwUYX7DKWch2H4Qg1KaGWjLraSqQIf/F2sKbj69EdFGt6q2cc6CW2xKCqJJ29AtpYXNKhzMyZQLgZcl4TOVvKvi5xf8zY3UqMXitV32No/x1MulgXVI6jY3al5nJuVv0RyxesqvfO7r7jiJglrO8Fnai5SSIrA/IwFdBdJHthnjBSK0gBKEJyQaCpQbxo67pMpoTqXK2X52KD54TqCEpXBQCQhBFwVZ6Jd3E9zKnRCL9+7KRNd/MpDXwGYWjj5byH4aqWxjcFliNo9nTzKRyG42n344BdGQOXc86QbRQq6aKUglC6XViwtgTBh/DwBkAdnPgwnxBMhFTIa/aVPFBCEJbtoInsVLxCTJHpKwTASGIKs0KjTPX09GnWUSV9L525y4nCN+mge7u47fPYPI+gNRbCdmvwA5cFydfsC8NYT5pToaWjo6u2VQOFWlOX4Xk13apWxz9yuaGVaWzuRLw0Pg8jhZKdicE4VeLzVdOCKIKOtf7E+BrsdbthL1OHmqIOoJgwkWp4cHL/UCgrV/g1bFcTh11XyKbidbLn+AgTJ8nIz+yqzKdXf2ng3iWKS2oiakOCeJ+ZPiEZDL210I4CUH4nUXNV04Iogo610Qk+Tan6C8peceGuxDEeanhQXWfwfMLShDaE4dLygnN/YnAFwG9BlAOH0QdEYS6uf7l3RfyoJ/Eg0EIIhKJfgwGPcTAjtMnyQAbDS892X/XmTaJcf1IYvAzQduS8sEREIIIjlnJNTSx3b7MRFMnAWdiPwCezuqgC7x9kMXUd6TegP6U5PStlD9s0n6iyZ5qCL4JUpWvUYLIvo9NwLNM/FAxbzsHIYi59glIFFPJy01JDQhBlARf8Mr6TJvB23HWKBzxU8wCb+2jmPqak5LWz2L3D1QincJcRsPU8oKrm2lB5NXNA+Xn+NvWbcf+7sGb3y7HTLa2kUu3MevRJWZakUoMqAzH8lUYASGICgNsb16XTbRMIhQ8hRSzwJdKENrkeExnJBMDP863rZGrbKk1rPLrTF4ocHmsFJ0EWXDnekdeKkGo+vYTsJd/qRQs7X0R8CpMPmhkJPZEKe1KXX8ICEH4w6kspVzMQ2VpO9eI683qahCEkkmTqfaetvA20aGhm7ao352njPKk1tCBqjHtVSQ7aMMThPONEt8+tCCTXUvqZcrsG0SOZi4rBDGH2tffY8BfwFTEG9Mczj0Qr/4397k7q6tFEBrTzqxFWeOn8J0+JKjqNJf9UAlzVqMThC5Qwuvd66C6mtpc2F5ZVKl+y5TZtxh5mrGOEMQcal1z98HTuewmnsvbBq7tVYsgpk5NO90GIDrDY1NmJnu4byVNFapvXXgxA88gbBySGlr7x3JNhUYniKlnTe1vlOBNg40jE4m1w+XAMXcrfA2Illnaq4j5sRzyNmobQhBzpFmXXEolPYSji+0H9M7qahFE1ozkeMWNs2amsfTbHzFADwHYfkoN5Uut4aZWl5fKrpsYm7+6HC/KqX6bgCCgPQ2Dhybm4ZhCl/L8/rlFevpOZMZN8qKcX8QqU04IojK4Olpt74p2G0QqZ9I2+R9LPS7rjvpuu/BqEoQ99UZexjSFjrO8elYRO7ZdETmivhOgXstvmXK8SZ1vrxkIYuoU4UwKCFDJZOvyJrWcHuZorbJ2IwQxN6BX6olGXbtqRA47fjUJQmOSUC/NnUCEEy25l8qaWqOQWl0WoLQi7PGxBd8p5SSh0qOHMuaVRDjG9u619jW3eoximibCqRTq9wHYdzbedHNb2PxcMW9T5+6rqPe7Z/KNAWUl8Ln5k2+MXoQg5kCP+rsP5YnW0Z1MoHl1q8oEoXM43gHCR8H44JR1KdgLeaWqLdIdPYlBKtx2K2tbDKQA4wup+NrH3JLb6fpWxBA2zfMB/JO9TVU+UC6mCmZH1cke5MRjr+9CtmrEvzUJJ48Ox57yoyvlc6AQnZ1703yWTvzkk/LTh5QJjoAQRHDMAtfQ3n2w3QcI3GiugkteJ4ezutoEoQ9ZnB512VNr+MCTOrv7Pveuye97ugUd4KfBxvcN0C8TibUv28kiGo2G/rgptGs4kzkYpLKo0sdtKbbzIqQZ+OG8MF+k21HX8wkiP8BIT/QIZrp5xpc0jX4GwMMAf78tjLh9/ArDjRuNPZm4D4RzAfydRm8PI8P9XvmkfOhbihSBgBBEEaAFqaKN4vGZgdVvP5roKIfDt9oEocaiCWnNDbH8qTV8Yked3f1HAPxTl8VpepEH8D8Apaf+gZUfaWfvPvhpMnDWyPrYr91OI41AEFndZh9kwlqA318Al7cAeiWHoSZMe1bNDAM/nxfms4sxVXnrRkr4QUAIwg9KJZTRR3uUN1pH+4gLMOu+QU0QRCTayUbWUb+dFdJSnfUlqCdbtb39hO1C4clLmXCm5v3tYpr/AzMu3GNXvt0rWV6jEESZcVQkco7fZIPFKEjq+ENACMIfTkWXKuUVOL+dFrihPe2srgWCcHmTomaiU7IOZtM8h4DTrE+T+tRDmgnqSdPL99iFU17EkG+zkQgiP6aurujfZ0CXEUHdYbBc5PREUmWh/RoyvCaZjKkkhPJVGQEhiAoqwMU/UJFoHd2tU6uzuhYIQkGtSb1xHzJYUUsLgrKNv7jJ2Jsy5koi6mTg72231pVt/c8Av0CguAk8NC+8dTyfPiTIlGpEgsiPX825LWn6R4NpuUkcIWAhgG1zv2cxJOApE3w/wqF1qaG1LwQJDAiCs5QtDgEhiOJwk1pFIqDxl1QstUaRIko1QUAQyCEgBCFTYc4QsKcHqXRqjTkbmHQkCDQoAkIQDarYWhyWM3NreZ31tThmkUkQqGcEhCDqWXv1Jbv91nfRiQrra9girSBQvwgIQdSv7upKcueNW+93tOtqgCKsINCACAhBNKBSqzmkbMoEMnZtbd1qo4rq6e09Zd5E5p3jmHGF5TJahhmrU4nBa6spq/QtCAgChREQgpAZUlYEurqO3takeb8E8Cm3hlW+I3Ni8sjHHrvjL2XtXBoTBASBsiIgBFFWOKUxdWIYT78ds2RptYOyGcRHJYdjKhmefIKAIFDDCAhB1LBy6lW0zp6+m8A4WSP/a0R80shw7Ff1OjaRWxBoJgSEIJpJ23M01s7u/iMZfCkBH8mlWtgMIJY2jO88vn7ti3MkhnQjCAgCJSIgBFEigFJdEBAEBIFGRUAIolE1K+MSBAQBQaBEBIQgSgRQqgsCgoAg0KgICEE0qmZlXIKAICAIlIiAEESJAEp1QUAQEAQaFQEhiEbVrIxLEBAEBIESERCCKBFAqS4ICAKCQKMiIATRqJqVcQkCgoAgUCICQhAlAijVBQFBQBBoVASEIBpVszIuQUAQEARKREAIokQApbogIAgIAo2KgBDE3GuWOrr7biDg1FzXsbbw5uOHhobSQURZ1Ltqx9Z0+hGA9svXY8YtqcTgpwFwkLbcM7Dy7ybC4SUbhta8GqQ9VTbS1XcZEy6cqResLefzpNmWns8YvOSx9bHng8pjLe9IJsh4tK2Flw0Nxf5WZLvU2RP9Epi+BSBkaSNDoOvGx+afs2HDdZP5f+/o7juKgLuL7MtXNSacnxoe/K6vwrlCixZ9tqVtm9d62DSOBsylAL0XwLbTbRD+AuY/AxRn4FeT83j9hodjb/jpo6dn+cI0h4cBfMBP+RLLvGaCDxqNxzZ4tRONRkN/ehkfCzEtZ8IhIOwJxg6Wem8x8DIYjxkGHshMhB4aHV2zyavdRvldCGKONelY2JnOSCYGflyEGHaiUbxQ1ILe3b3i/WmEhgjYwybH2ybzoaOJWDyIfL290feMT9JdIBxYJHnZnye1dr86GR/8QRB57GXLTBCu5ADClRPvLLjQSg5KllojiPb2E7YzwumLQXzGLELwBjnNjLtC4IsSidh/FypeawShHraCgdNA9DXLQ1beIwYyAFIM44up+FqVsj7QZsxPB7VURghijrXR3hXtNojuB7ANgJfZoCWp9QO/L0aMzq7+00E8/SobAa/C5INGRmJPBGmvoyt6NBGts+1+s00UsxPVEk4AImxvX7Wz0ZJ5GMC+znGU/lRpGQmCOrqj5xLoO7mstXlxM27kUGMEQZ3d/UcA/NOAi6RdLWkQfxNpfC+ZjI3p5l4tEUQk0r8/G+bPAfqHIH8ntrJKx7eYEy1nj47e+mYJ7dR0VSGIOVZPpLvvEga+PrX6lmbaaO+OLjJADwHYfmanTiemEgO3BhlWR3ffVQScra/D97SFt4mq50P9ttnR1X8YEatX5fLmFt9HftVHpKd/JTPfkquvdmwmgJZc/xMAL0vGY4pki/rKRBDU2d33uXeJ/nsAtrIIUpAcaoggqKMnejIx/dAmf1GYqkrEuJ5NPltHErVCEB3dfUsJUH8fOxU90NkVH0aG+5PJ2F/L1F5NNSMEMYfqsJteiHH5SGLwomJF6O4+fvsMJu8DqH2aIICrU/HBc/y2qWnjpdxivPMUh+HFMDK98fjtz/ltU+N/GA2h5bB4/LbXvNro7e0Nj6d3ug1ANFf2ZQDqdHPWTF26rS286eSgfpt8/TIQhNvimiHg0vGxBZfZzUrWcWtMTIEI1AtDP7939kQPAJMicftCuZGAa2HSna2t4y8NDd35er49NX8nJow9YfAxDChz1G6OvpgvSCZi6kRVkumlDDpyiBaJLH8fG+F7Aexj+/H1d30PNwG8ZiIUfm7D0Br1FG5WfuWfeyc99t4wsJRhfl536mDgxsmxBacX0rkfndRiGSGIOdRKx+L+D5PJjwBYCGCCgSNS8UFlSin6i3T1/YQJ/zzdQMBTieMUQvhZtq2ZF+EyzLw8lYj5cqrqHN4cgLQikf592chilFu4eNRk/ItBNGA5KZXkrC5x8aGOrmgfEd1oPzn4IQcFbbUJImd/XwOiZZaJlwHzZTDxbTczkXWSKufuxpfpLAa+bcOhLGRXoo50f0/U2dN3BRhfsP6oAjs43XKWTzOR0v1RRHST9dSe/VtmWpVKDKiNTEN9QhBzqM6Orv4TiLKmE4DwLNLcm0zGNpYiwqw2i9jx2/0YYFI7Q2UvmPZtBFng23uX726kw+sB7JUbVyCC6ezuUyeFa/KYqN0ZT7Z8wQhP3mF1egMo2lldyuIT6elbwYybiyWHWiAIXYQYA5em4oPKYRtk558/SV0/y39FdE1yeECZLIO0NevPoBQd6f6eDlgc3Stkktp45OcliGhg/J35JwXd+UcWRw9kk+4AsN1MX3wfMljhh1xL+Xuf67pCEHOHeFnCW+3i2k4l6mffJxONOSe7+1ONzPZtsG8TUc7Gq96cbp06iPg3US1aGp3fugX3AhSZJgie8ql09PR9hRjKdJH7indWF7v4lIMcaoEgOnr6ziXGFZa59KQ5GVpaTPim9jRShs1PsTpy+3PW+MU2k0lLRkYGnixiCdCdRspycipClopWEYKoKLwzjZcxvHWWxF1dR29r0jxlS/7U9NJJuCg1PHi519A6O6O7IUxDYHxwquwUEaj/Z/Nt+J78joU8gMnLFuGlxJiO8tI45It2Vhez+HR09S8nYmVamLkXoGBivnj3XfG9WCymnOm+vqqbmHr6brKYEJXMRd3FyQ/WFlSg/vl1MungkZGBx30BoilUjI4K9aXZYPje9Oja1Z7CmFY0mpmp7glC56hVZhI/dws0u2/fYZ2Ouh67Jtvi53vB9fMHpolC8vUHr9ntTzu4HW36wFRzIlGRLX4d8Zq7DzMRVNmdagi3A5QlsKmvOGd10MUn0hM9gpmUWWk6WizLoUWQg5K61gii2AuWeS1Y/hYMgN5mcDrExj8nEmvVxbiivqA68urESRD4jcFbjkok7n7Lq67u9+kNH9FuYMpeFiSYl4zEY2qeNMxX9wSR+4Ozh2n6WyD18f8+61r8CT4Wq9kTtHjziG7m2e8xMPAMT4YWe5kM7NFGnDPnZDF1YuOJi7q/QC2Z9QTsnZPTt7nLeZrJtjDLz2D3TxR7jyTI4uMSFlk0OdQCQTgCG4CiTUyVWgmD6MiPDBqzWikmJj9dNkSZxiAI+2Lm0waqi//3ubg6bjFbF1f7zLDvfgPsqn1NMs3FNM8jvua286xLe442fWDqMBH5qJMfoMZM4bhEqDvxFeOs9rv4dC5eGYFpKmekNRQ0A+ILksOxfyvWCVvtE4Q9sCF7GipxTL4maoBCfnXkt0mN+VLt+a+bGJu/OqiT2m+fjVCuIQhCE6HgmSJCZ7vPKdSzrma3W/BGdCXCW62TT5fawsvM5lhsbb4CTbiq52nAERHl07btkgvKcWLRmbCA4KcxP4uPyz2Bsiyk1SYIXUQPgDQxfpSenPzGY4/doe4BVPXzo6MgAuoCIBQxMuNOM8RfLjW/VxBZ6qlsQxCEbuHwShHhshud0p2Hvd1uuwcK3zauRHirfZLZzUXqVutIYvAzbpPRvovUnWo6evouJMZl+TY8Tj7U0dX3cyKcON2nD7+FKuu8+4AMEZ04Mjyw1jHO2bes1c+BndVei48LOaQZfH4qHruy2JNDfizVJgi1de7sip4PyiYXtH8qbcajYOP6iXnmg36T8ZV70fPSUTH9dXRF+4no5/kIO0sbmXdv5/8HGNea6fA9XqbZYvqu1zoNQRAKfOfuNcCi7dReQXu786ZwwZj8ioS32kXWkFahKA27TNrTgaPNAhFJmuyynmau6QXTHsJawDSl3/0Gc1YXWny6ulZ+0iTzLgC7zsaYh0NoWebnNrjXYlADBAG1o24Zp9uIcbiHvC8R08Mm8brJcChZTGZfLzx0v1eCILLZard64yoGf1aXd2xajmzWWjxKhLvJNB5NJNaq2/xF3+koZvy1UqdhCMIeBukVf29z1L0E8BhAH8odIVyzomrMOQUjkioV3mqfQBo/hKvZyyGTy4IcxJTmDEP1l1lWe/QvfNFKk8UWgRyObovPuBn6EEz+BcDv1/6BEr6fHB48r9TFosLZXH1HyKm5vCVNVxOgUsRb05S7r0+5xROgWCVPGJUgCDWoKZJ4/fxcPrSwz4X4LVIZXJkHmu2E0TAEoQl3dbWZ28uq27oG429M+HxuwrjufjWpIEYm5uFwt6N4JcNbrZNbY8d3vcGscdhpT0xa050l0snav+ZG942p+OBpXoup8+TjbTLq7I4eCpDa5Wcv4+U+3zerdYsPDL5AhbJaIrB0a8cYEU4aGR683efCoi1WKwSRE44ii6P/yCZdpc+eW3CkmXfTxTxuEF+62y64P8hdEC/8KkUQ+X7be6L7GEzXqYA93+Q4LTQ/DRjfQsaMNdrNabteGoYg1MA0dnhtDL5jt8t0BgN/m06DoRpzsZ87Umx7xPnbwltLupzj9Udlj/V28xk4Qv4K+Ars41Vkqlv47aGThaK6LOPQ3X3wfNNC91hSEGe1Y/EB/pS9PA7sbpFNkcE6Ziy3ptVQUW4IG4ekhtb+0Usfbr/XGEFkxVS5lV58OfRJwDydgOMCvguh/mCeBuGM5HDsN8XiYq1XaYLIk+MB3f0fChGfCcbxRWR4fYWZP59KxAa9NkLlwKQabTQUQfi1mdt2u9kjecg0DDb4QQALsvygWQg1O+qCkT32XX2QnEbFTAY/znO39Bpur2/5MR1pTm++3rnQ5sfxebFO4wfy7azWEIQd7uxJYfydBXe3bvXGNZiyWVu/B9rCfFyxL9DVIkFYB5dNxLfR2NM0zGVEdBwYH/OZElw5uC/cfRdcUeppYo4Iwjps6upaudBE5jAi6uepk4X11rzbn6RK7/4TpPm8RjxNNBRB+LSZ22zYU7by8BhCsy95OW3omtQUBXe7tkipQEnriiEIzfifN8PpxaND61QK7+znll7DzfmqWfwdYcBeIbNuY9GLrtqPAAAZ10lEQVTcfXiTTD50ZCSW9Bq//klSf85qD4IYY+ZT87vCXPLB+2zml5LCXWvBSe2Fr44wmLAUBp/sQRglYTM9T+3pQAKkbAkytgJls4TBhnkgMx8PUHdhwmjMOxUNRRCa3bFjUbabJ/Inhd7e3pDtHQKHs0+TmqKgnX0uwlutE1wzfudiPvVgijWZnuf7EfYLhfYQ4lyeomm7vJ+LgPq7D/7vNLjEtftyVhcgiFnkkMe2vSt6kEFZn4f1YaA3DTaOLCadRL0RhH0RVY7e8LzX2smgC4hxsO01PVX8NYZxWCq+drTYxboKJ4iCoqpT1UubsA+YvghGn+ZENcHMn04lYiotfcN8DUUQ2R2y7RlOu1lH53/I522y2+btdnSbWcPrRDAn4a32megYA+G81PCgit3Pfpr0Gp4JxuwEYE/uZiMQL1ym5HC8+1CevykCvjESH7ykUGsuBKElh1w7Spf/SsDFtnafNMPpw6wnND+jqHeCsI6xvWfVfgZn1Atttudh/Z3m3PCqNYKwyrn/4pV7tJjmDQxkMx/PfP43OH7mSS2UaTiCcF6Am52q2kYgs04J9uge62UzTeRTwZfW5iq81T6J7GOwJmLzSq/hNiHtmFrTkdjb9AovniYq69OrZf1L8P4j1RDEGBN/LjUcU48laePdDzjg2B2M1pZ7aMo2bfmCmxYaiSCmyN75UpvfeVCPBKFkVifYti0Us5GEZxaGsk71OWis4QjC456C7bbvbD+DM9ncDLlo0k0XvEw3V+Gt9jnijPCZGWOxvgINptN/CBpC9nzD2sU8VK7p7umsLnZ32tG9sp1gKn/ErKyuRDhlZHhw6iEoH1+1CKK395gFW9Kt6ma8MjOqN753yZ2SS34JTeNP8kzNUgiqYnVkb1OZMrek3/4iAasAYyuA/44J3/aTDt9LlVo/mM/sAV5t18rvDUcQ2R1NV99lTLgwD3LeVOTmf5h5f3b2e8gEvAqTDxoZiT2he3mtUErxuQxvtU2mWSRoHYOf9BpuE9MRNZT7Q3BkkiWcnxoe/G7BP379PYZNKlV08D8MblML3exY9sLmjRIWH+rsiX4JnE1RYb1YthnERyWHY4/5kb9aBKELD/bjL/I1ptnP6WarMHB0Kj6o3ioJ/JWgo1l9+c3zFVhAdYroXbVjazr9CED7Ta81PuZ/MX1Vq05DEoSbM7m9K9plEN0PYJupGex8N0Lnh5jXsmnA5sAu6Ayd6/BW++Rx+GFyD5nYfAWBdnh2TPPmNxtx+Dlia+4+BLsJbR2vOvUZLRn1rrfVBl5QP6UsPi6mBRDw0Pg8jvrJXVQtgtAulmWKDtI8NVsTBKHmiiNrc4Asw4UWZpfHujw3SNVa7IvptyEJwrloTJlZWifNFZa3lrUpCRw2fOBq0+Dvz3rP1uOPypb2wpfTthjludWxm8PULpGw5duzXp4L+EdiD49VC2J6YnJVqDV8E0BHTsninV7DJZOo51sThfDRpW0v5KwuhSCUHJ2LV34cpqk2GtY04OrFGF+pOKpFEEr2iNP3M2YyLxtNxB4qZQ5qbuf7zsWl67dUHVnb1Ji/VBbX1anE4PS768WMXZPeBtxgr8o1JEGobJUd3X03EHBqTvFvG4yjTMKZ6tJoocVM8+jNbwh8A4NuzJsVvCJlrBOyVGddcRP3+O1nPxnK98AIfQ2mqcJbF+baDLQoa0Jo/wDi48GkMq5mH4L38zKZ7i0Ct8ytfseuvxPh/o52ORafjq6+M4hwjc3U5CsVR1UJIhLtZCN7it5uBl8empiHY/ycflx0onmj2XuzUEi/5dBRvn3dpqQsN+Kdc8DXBVG/87oWyjUqQcBhbwddwTCX5u2FbikjNAuhyuT4jOXNZy8ziv0inqfTtgITYZYM2T8GxpWzFrQinGn2CDAmfJMY3yxksrOOTftsaMCTjHa3qX2O1D2nUzkWn2zSt63fuJmZ+20yPUVm+vCRkXUvuOm1mgRRQO5fIMOnJ5Oxvwadj5GevhXMUE9tTt8T8dpEefVRDh1Z+nAJU+ZRDodWFpM2RZ8SvrTQXi9MqvF74xKE02m2ObeQufof8grQPE9o0U3hnVG1wlvtk8dKkMpRzcATKnolV853xk9ru5q0G8pBt8Rvm9q7D4Uzt/r+m9CcTFzfrC7X4tPRu3JPpM0HHAn+mO+CiVVuqReqSRAKUFcTGfhpIuOru+1i3uMnVYYifArR2bnMqNZLhH9Chpckk7FnfSvQVrBcOso363IjXv38CoCvIsNr/KTKyF6Y+x/0g0idHq3RbEVfnCwWo7mo17AEoX1lbQbRgguk/nnCqcpe+ZSqFd7qIAhNVMlMGXfzS6FJp4namJjJqOrdpsb+HchRXkg2F9+G1lldzsVHt3v2esKz2gShcIz09H2GGT/U3IJWP29k4BYiXtcWmnxmaOjO13PY06LeVTu0mrw32DwJjJX53GUW3fgys3ktbuXU0fTGbyqLgArp1eVYeh2Etcy4fTIcemLD0Br1ql72TowKD56YaNvdNPhEQvZBrN1s8pclvYgXJtX4vWEJQoFpz25qWSAL5lDS+CHyVT0dzlUMb501fzQ5lKZ/9yK5AhNR9xaDL+LUy+N9qS3AH4UuOkpFFzluVpdz8VEmG5eEfq7pJiqcrM+q50JhptTRHT2XQN9xIYkA0E8XLdure+XUkXUguawAN/lMxOcHgwyBrhsfm39OI75t3dAE4XYScPM/5GeD/u1jZbHAs0hzbzIZ26ibOdUOb3WcIrr7riLgbPu/lxJpoTflqLd9eXkqEbvb7S9K94ZDqXZqe18u+n7SnAwttT4jWe7FR3eTOCebNhVHjRCEEpHau6JLDSK1YNpe0POzNs4qU9bU1+XWkVVS9/Qggcf8Fhj/OrFlwVWNSA7ZCRIYkjqqoH83wPvN6dzp41xiXGEbbsHIn2qHtzoIoit6NBGpI7X1UldJkRa6t7x9RGqV/Aqcn2nnckPbcbO6EotP7sKgiuiy2uLVn9h1E2PzV1sXkBoiiCysyhw7kaZ/YuArGvOJF/SvM/Bjnmy5fHT01je9Cvv9vRI6svatTn4t815bToRLAPoHv3LlyqnXJ29LG/SNx9evfTFg3boq3tAEoQl3Vcrx5aDV7Ua9HsGx3ir2sWhWfKLoFnP11m5bCy8r9i0DrW/Ho029f4DvQwYr/DgGgwDV2d13FpANP7V8s6NLKrT4aEI9syI4snzWGkHkgco+GrTJ2JvYPBJMnUD2HQh1S91Kem8B/CfAeJgM8+7xt7cfrsTuuUI60k0l6uhd+T6a5EOYuBfAJ2gqFNzqpxgD+CViI6He5p4X5l8X+/cTZC7XQtlGJ4hawFhkEAQEAUGgLhEQgqhLtYnQgoAgIAhUHgEhiMpjLD0IAoKAIFCXCAhB1KXaRGhBQBAQBCqPgBBE5TGWHgQBQUAQqEsEhCDqUm0itCAgCAgClUdACKLyGEsPgoAgIAjUJQJCEHWpNhFaEBAEBIHKIyAEUXmMpQdBQBAQBOoSASGIulSbCC0ICAKCQOUREIKoPMbSgyAgCAgCdYmAEERdqk2EFgQEAUGg8ggIQVQeY+lBEBAEBIG6REAIoi7VJkILAoKAIFB5BIQgKo+x9CAICAKCQF0iIARRl2oToQUBQUAQqDwCQhCVx1h6EAQEAUGgLhEQgqhLtYnQgoAgIAhUHgEhiMpjLD0IAoKAIFCXCAhB1KXaRGhBQBAQBCqPgBBE5TGWHgQBQUAQqEsEhCDqUm0itCAgCAgClUdACKLyGEsPgoAgIAjUJQJCEHWpNhFaEBAEBIHKIyAEUXmMpQdBQBAQBOoSASGIulSbCD1XCHT09H2FGN+Z7o/ws+Tw4Clz1b/0IwhUEwEhiGqiL31XDYH9F6/cI8zmeZThn42MxJ5wE0QIomoqko5rAAEhiBpQgogwdwi0t5+wndE68QUwfQlA2gQfNBqPbRCCmDsdSE/1g4AQRP3oSiQtEYFI5Nid2Gj5NYB9ck29JgRRIqhSvaEREIJoaPXK4KwI9PQsX5jm8DCADwhByNwQBLwREILwxkhKNAgCxRBEgwxdhiEIFIWAEERRsEmlekRACKIetSYyVxMBIYhqoi99zykCQhBzCrd01gAICEE0gBJlCP4QEILwh5OUEgTyCAhByFwIigB19K58H9KZ5QboUJ6KCNoFQCjXUAbAnwl4ipkHMpPpOx977I6/BO0kX/6AA47dIdQSPoaI+hnYD8DOud/GAPwRoAdNMq8bHY79HgDb+2nvji4yQA8B2N6PDAwcnYoP/jJf1useRCQS7WSD7gewXa7O2ybzoaOJWNxPf/ky3d0r3p9GaIiAPXL/NsHAEan44MOF2untjb5nPG0cBpinAvQJCz5pAC8Q07Bp8PV77MKpWCymdCOfIOAbASEI31A1fUGKLO7rYZP/D0CfDIBGmhg/yqRbLh4dvfVNv/XUfQVqmfwmAZ8BsJVHvQzAwxwOnZoaWvtHa9lKE8SipdH5rVtw77tEFZnedTEuH0kMXuR3rKpcR1f/CUR8y0wdHpmYh8M3PBx7Q9dOZ2d0K4SxGkz/AmBb7774aTJw1sj6mArzdRCpd30p0YwICEE0o9YDjnnRos+2tG79+uVgnGs5KQRqhYH/Z5jpZSMj617wqpjblf8CwK5eZW2/b2bgBOuuu9IEofrv7O47C8A1lsV9NISWw+Lx217zI39v7ynzxtNvxwA6cppkgG+MxAcv0dXv6F25J6UzawFq99O+pUwGhCsn3llw4YYN100GrCvFmxABIYgmVHrAIVNnT/RLYPqWgxwIfwHzfwPGk9k2idvA3A7QBwGE7f0Q4/rWls1nDg0NKfOH9uvsiR4AJmXi2clWQC1uzwNIgGmcwHsx0KHZPW+GYRyaXL/2P1T9qcWUv6BOIcy8NRGOtpiDJgC+BzBene7LoGvzdbP1feRiikT692WDH7HI7Ms8lO+zY3H/h8nM1l+Y+zfXC3yRyPL3sRG+13LZL99MBowXmZAi0JsM3o6AbgC7O4Gm6ybG5q8Wkgj4l9CExYUgmlDpQYas34HTcyabZ7x3Vzyqs2tn01m0pC8AWKWzsBLFZjJpycjIwBSh2L723uW7G+nwfQD2te56mXGnGeIvP7Y+pghi+sva3zP0dTAUAcz0w3wXTKxKJmPKTzH9FeOk9kMQuhMAA1en4oPn+MG6s6v/dBBfO1OW70MGK+zyK3NW2xaKMXCQH3wUZbf3rPqowekbbGbBDIO/korHrvAjn5RpXgSEIJpX935GTp09/VeDebWl8J+Q4SXJZOxZjwaooyd6MjFdbz15MNOJqcTArbq6ke6+Sxj4unXxA/PFu++K7xVwsOpOOOpksCwZjynnccUJQnUQ6elfyZz1IeSc9fy7iXB4yYahNTOnE82gs76EEG4H6DDLz6uT8cEf2Is7yArI+MAHOUf2DwE+qQg9+pknUqZBERCCaFDFlmNYi3pX7diaTj8CkIoeyn/axUvXX3f38dtnMHmf1VbOhItSw4OX28u3t6/a2WjJqIid6dMDAzdOji043csUkl0AJ+kuEA6caZduawtvOtlqzqrUCUL1ecDi6F4hk5SZaK+cDL7MTHbzFAMvhpHpjcdvf86KkQ4fwL+pSEWDGa0t99CUWS77UQE/Rznmj7RR/wgIQdS/Dis2gs7O6P+iEH2OARU++TEQMiHOHGJfvAoJ0NnTdxMYJ0+XcXlPoaMrejQRrbOcNjwT6Vn7te/gGXiGJ0OLR0fXbMqXqyRBqPW2o7vvBgJOzffnx8zkdHA7iU215zyhwO9JbhqmYk85FZtg0nDNIyAEUfMqqm8BfRNEd99VBJw9M1q9Hd4NDXWPIAPjV1BmF6LfEpDgNN+eTMb+OkcEgc7u6KEA3QWgNdfnk+ZkaKmVpKzya04+GSI6cWR4YK1tnDryuTEVHzwtSMiq5q5FUXc26ntGivRBEBCCCIKWlA2MgB+C2O/gk7Z5zzvjd1idr26mqMACWCpU+AQBjUlO6wvJi+QIACA8izT3JpOxjdZxupjqzksND14ZBI+urqO3NWmeihD71HQ9pjOSiYEfB2lHyjYPAkIQzaPruRopdXWtXMiGeSAzTsgtRjMX3TQmJs3CDWZakUoMKJNT2b5KE4QSNNLVdxkTLpwWmuia5PCAOhk5Lqc5I6T0ZTU7f9X8AwB53imxgsfgFgIOsd4voSIu9ZVNIdJQzSMgBFHzKqpNAZV55J20sWuIeB+AP8RMnyBgfwDv092BmFkwnW8658Jb11scvBlmOiqVGFAhr2X75oQgnKk3tGYmzQ1sV6d20Mt+gQCTN7YDwdVshYUgmk3jxY+X2nuiHyamL2UvmzF2KKopzYKkWQADOaj9yjEXBKHxK2jNTM4cTux6+1oIwq+GpVy5ERCCKDeiDdje/otX7hHKmFcS4ZiAqTY2T0VT4u8KniCcCfXqliDUOB2RSRozk/3OR6GQUyGIBvyjqpMhCUHUiaKqJWbn4pUfh8m/APj9HjJsevci1h8IFDeZhiZbjMfVJTE/TmqNjb1uTUwKI0fqDZvzWeN0fpNMPnRkJJbUYTxXJ6xqzTHpt3YREIKoXd1UXbKcnfxOgHptwmxk4BbAuH8yTE9uGFqj0nlrM4T6IYhGclIrnHp7e8Pj6Z1uAxDN4ZZh5uWpROxu9d/tXdFug7IpwreZ+r1wSG8kEv0YDHqIgR1n2iu/j6bqE04EqDkEhCBqTiW1I5DmcpZKmPcTpPk8e54gN6n9EIQu/LKYMNeO7r6fEnAwwC8w6AnA+HkqvnY0L9tc+CDyfWku7k3fW+hw3PlAwdvp6hY1tWTWE7B3vn0mnJ8aHvxu7cwWkaQRERCCaEStlmFMml2wOiKkzInJI/0+AKSNu3eJmol09f2ECf88Izrf0xbeJjo0dNMWP8PR3hWw5X2aS4JwpN7ImZkm2sLjtvQlL7NBS1LrB9SDR9pPlwzQ69ShayhHNA8QsD0B/20CvzdMvnFkJPaEH4ylTPMhIATRfDr3NWLtghsgQ6nqRJMGW7msf5YcHjzFLoTmtFIw86ujvjO81LHwziVBKOe8LdFhNoyVmbfMNi8h1hbefHyhFOhqrM6UHHjTYOPIRGLtsC+FZtN19J3IjJssgQa+8kX5bV/KNR4CQhCNp9OyjEiXqE+95zCSGFQvvHl+6pGhtq3fuJmZ+2cVdiEITbI7dWLxmayvNzwxudOPbCcQR8rsOSYIR+oNdSlNYWG5SOeWWsOBb1fXio+YFHpw1iU34KHxeRx1e3XO2ohLKvV/z0xMHuz3ROipdCnQcAgIQTScSsszIG36C+AZhI1D7M962nuceg6TrgBnnwvNv1WdLcaMW1KJwU/rnNradN/EFySHY/9WIOcQdXRF+4joRsvTpBPMtMp+E1tDEJ65iPy8B+GGuJ1k1at6xGyC6OPZOi6pNVzao86ePoWpevvC8tHNbWHzc0NDsb+5yaH0QQZdPZtAkWHG6lRi0PIORXnmjrTSOAgIQTSOLss+Es2Crdbp35qEk0eHY0/ZO8w9TXoEmL8F0D9oBWI82tbCy3QLWiRy7E6m0fIgAf/bUjfDwI/Nicmv2Xe66lLaljR9kYCL/TwY5HYqYpPPdnO6l0IQagx6DHOjK5CGQ4edyykgqxNGaHUqvvYxO5G290T3ISaVZXY6zfcUN/k/fZR9YkmDdYOAEETdqGruBe3sjH4QoewbB++19T7recvsKYHNT4CypDCTd0llVlWnDuADlgynvzF4y1GJxN1v6UZU4MnRNAP/RaDfTmVs5Y+C8TFbf6rJp8hMH657+1rv7M1K8RZAr0wtnOYlI/HYzXnZSiYIp28k33TBRH6upwH3J1mV8H8BY312LOr5VxOfAmFPZ1v0HAzzmOT62H/O/aySHusJASGIetJWFWTt6OpfTsTKsbltwO7fYqKL1BvSBvMDlhj+581wevHo0LqXXBdB/5fzZjWh3oCAQccUigjq6Oo7gwjXuN0ItyevK5Ug9I8ZKbH9vTinJdEi8ZlqS5EDHWd9dzugXqV4EyEgBNFEyi52qJ090U+Bca2r2Wh2w2qnf3PGML7++Pq1L2rMOrMujbnJpN61DoUnL2XCmQWT/001kCbGjzLplotHR299s9A4XZ7ftFaZFVVUKkGohjURSCW/5qbwoZbJbxKyfh7rqc1t+L4xKnaeSL3GQ0AIovF0WpERKf9Cy1avH0qAetc4AmAXyy58EwG/Y+aBzGT6TpuvwPGuNRENjL8z/ySvp0TVQNRTmeHWlhMY3A+iv7ckCRwD89MAfpkOha5XZOR34NFoNLTxz3QMM58H0Ednn45mJ80rB0F0LO7/MJmsTHULczIWTK3hdxyqnLrt3jZOR4HxaQb2setFpT8B46cavQTpRso2KQJCEE2qeBm2ICAICAJeCAhBeCEkvwsCgoAg0KQICEE0qeJl2IKAICAIeCEgBOGFkPwuCAgCgkCTIiAE0aSKl2ELAoKAIOCFgBCEF0LyuyAgCAgCTYqAEESTKl6GLQgIAoKAFwJCEF4Iye+CgCAgCDQpAkIQTap4GbYgIAgIAl4ICEF4ISS/CwKCgCDQpAgIQTSp4mXYgoAgIAh4ISAE4YWQ/C4ICAKCQJMiIATRpIqXYQsCgoAg4IWAEIQXQvK7ICAICAJNioAQRJMqXoYtCAgCgoAXAkIQXgjJ74KAICAINCkCQhBNqngZtiAgCAgCXggIQXghJL8LAoKAINCkCAhBNKniZdiCgCAgCHghIAThhZD8LggIAoJAkyIgBNGkipdhCwKCgCDghYAQhBdC8rsgIAgIAk2KgBBEkypehi0ICAKCgBcCQhBeCMnvgoAgIAg0KQL/H52JHfOwXYv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-38"><g><rect x="321" y="751" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 801px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Check for ACPI Notify() events</div></div></div></foreignObject><image x="322" y="787" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfX2cHEWd/vPtmc0moBIOJYABRT09UQENZF9mN67yjkBCwuxugEA4FERy4BvyEVFRwZdDUfOLcqIoAYTsDOFNBAkvrtl522A8DhH5CQoCAglwvAn7NtPfS/XObHp6qrurZ3p3ZyfVfyU71dVVT708Vd9Xgn40AhoBjYBGQCMgQYA0KhoBjYBGQCOgEZAhoAlCzwuNgEZAI6ARkCKgCUJPDI2ARkAjoBHQBKHngEZAI6AR0AioI6BvEOpY6ZIaAY2ARmCHQkATxA413LqzGgGNgEZAHQFNEOpY6ZIaAY2ARmCHQkATxA413LqzGgGNgEZAHYEZRxDxeDzyxDM4MMK0lAlHAJgPYN5ElwkvgPE4gX5HjOTw8C6bN2++YkwdEqC1o/tYAm6deIdxb3MTL+7vT/4zSD07UtmurvgbRsboFhA+Wuo3A8flUolf1REO1NrRu5BgngtY7SzNmwKAZwH+O4zIquzGdf9dR22e0qYsXBTfN2LSPQD2tX34FTL5yEwmmZ3sxkzF+hZ9aO3s/gIxvlNDf14F6DkC/5WJ7yqwccumVN8jAFi1zrbO7qvAOLWe95kZQxALFx6/mzGr6TwCzgQwV3UQADwH5q/DxJXZbHJI5T1NECoolZepd4IQ7RvO02oCTgEQkfWQgOdh8mGZTPL+4Ag0xhttHd1nA1hT0RuiNdmBvnOCbIBBEJnK9R0SQci69yAZfG5mY/K3KjhpgggyQ1zKtrXF5yCKVWD6MoA3Vl8lP0ymcUom03efXx2aIPwQqvy9zgmC2jq7LwPj0149Y+AvPBZZNDh4/ZbgCMz8N6y1FsF6gI6S9OaxgsGHbNqYfCzMnk7H+p5EghBVF0D4/ujrcy/wk1xogqhxJh3cFd+jKU9XM3CYR1XiVvAsQAywuBHtAWCOS3lxLTw9m+q7wYvhNUEEH7h6JojWRT3vJZOF2GRPR8/sc6cZwIOv7dS87IEN17wWHIGZ/0Z7e7yNDfoNgDcVezMKYJatZ6uyqcSPwurpdK3vSSaIIknwF7MDye967TOaIGqYSe3tS9/GRvR2APs5qikwcB+BLhudbW7YfHfyZcfvFIv17slk/jsDn5OIo4aIsCIzkFjv1jxNEMEHrq4JwqlTEqc85ktg4tuqYsfgiMy8N9pj3Zcw4YIJciCsBkOIlYokwZnR2ThasuYCd3Y617cLQbxogg8bTCU3q3Smq2vl7NHRV+axET1mGwl8FcBbHO+9YrBxTDq9bsCtPk0QKkhLyiw4NL5L8zAlK28O9DcyzE8oy/ja4v+CCP1k28nwBMdnnja4cHg6vf5PsiZqggg+cHVNEBUKSR6MoOmoVOq6F4P3tDHfaGlZPs9oKtwN4P2ihww8GWHjJNMwfw7Gu4q9DkVZPd3rOwyCsM+CtvF9pg/AoWWzg/kWmFjudgjRBFHFWurq6oqO5uetZvBZ5a9zfz6K5ff1J58NUu2CBWc0zZrz8hqAz7C/R0R9I6/vskImJ9QEEQTh8bIziSCYcW0unRDKamWLk+CIzKw32jriRwJ0y8RtgXGvmW86nprGfkDAaRO9qVFZXQ/rO2yCEPW53Ig8byWaIKpYIy2x+GEGWRPVrkfYbEbzSwb7b3yqiipRtJC4jYBW2/tDJvPiwXTyLmedmiCCozyTCAKEtdmBxMrgvWzMN8SmPZKftxbgE0s9JMY3M+nEl1pj8eOI6Eab5VdNyup6WN+TQRCiTqkFGNMns+k+IcWoeDRBBFxPLlYULzKMo3KpdYMBqysr3hrrWUrE15cr3ei65uiWU/v7+/P2wpoggiOtCSI4ZvXyRkfHsnfkEeknYO9im0a36fk+lksl7m5ri78VUeq3iZlEkaqU1fWyvieLIKTGEB6HEU0QAVeAxIoCqPFKW2qCkHvOGsbtALXbmiU9DfkQBLV0xD9kEH0cJo4EWYtK2NXnAX6UQbdQgX+WzSYfDdj9ieLCWejJZyIHEZkrwZYF19sARIsFthDwAAhXz4rwzbU674nbVaQpuoSIehjY3+Y8Jix8Ht+G1waTzCsGB5J/9hLJVEsQYlHB5JsJeLf9dsfMp+XSyUQ1YqCWjvgCAyRuhruqjgETzs8NJP7Tq/zBi3r3jnKhF0w9GG9vyex6AisY5pXz5+GhZDIpnO98H6fDVsm5sKXlpDdR09gFRfHO7gCGwPwwE641R/NrN2266QXfyhULVJ58+YHRaPSQzf3XPw+A2jp7VoN51fbqqlNW18v6niyC6Oxcumeeo0Ip/U4b9Mnm6NYTnYdQ8bsmCMUJWirW2tH9Q4JlNVF6XjOZjxxMJ1MBq5IWb+/o+RQTfwZMv2bwbbOjnJNtsG4E8Xre2MsAr3WIqmTfKhDRDZw3P5XNJv83QNupfVH8I2ziRwD9m8J7r4L4G8hjTVBrnOIG9A0CPuFhFlxqQgHgAY5GTsv1r3tc1q5qCMJFbjtUCzmItoVNEC2d8f0MpsuKSkipk50DE2WHKRlBkGE8BZNvAPgd8jnAtzVHd4739181rDBHPIvIxs15KKvQTwATN4wg36+X9T2lBOERhUETRIDZ09Fx4q4FjN0BUIvtpDIt1iYygiCDfsrMPw50KgX+xzDzizOZG//uB4W4flPEuJjBIgyEyiZkq5IHzWjhBFUdTfEkJ3xB9vJrl+P3rQycJEQPzveCEoQrORB/KjeQXFvNzaHUphAJgto6elYALDyLgzpp5gn42sjQ3O94OUw5CYKAc3k8WoDTvHuCrIno5MxA37qAYyc/NEl8H0ripdILC7qWv3lWPn8PQOKGOf4EvNnX0/qeUoLQIqYwpqmwAogfCIPuYuDNE3OQ8bNMOiFOuFP6SAjiZZAl4tl5+wKxYj5tFPFYAH4LCIvA2M3ZUAZ+MTY090yvTcLN0mp8IeIFYvyBQcKDNbLt5nMQAe+ziZxKn3yIzPzRfmTU1hlfCCYRH0mILeyP8AAV30iDaYTA+/K4Ut+5MW6FYRzpjFcUhCCEg1Q0j+sB6rI1YIhDIAdr8Xf1vp3yLLymi4YOfCCAhdu/xY8AhgiHMPEw4+Zcuu8O25+orTP+eTB9S0LYeQb+RKDfj3vO8gfAEN9wOmgWCHTFyNAu57qNf0VMoPFYYqV5JERXaYD+VrxNxAD81RyLHBqWt3flqV5uAuzwkRAwBVJW19P6niyCkB1MSsp+2QambxABtvWKTVm862EBEKDqwEWlbdm+lTxMBs52+mIIvcFTT6NHnKwctww/23GXjYgfNhnn7LMX7nXKs4tWWV8vxqWy3zbubI7yCW56iZaupfONfFRsgpate/EpiM3RjPB5zjAK1qZfoK8WQ1SUdCCAxL5blSBcbODFRnheNpUQN7TQTU8lm7CvFVPRekec0u2bfh6g75pj0W8NDv7yFfvE8ggZUWDwF3KppBBRVTzuQeN4kKORXrtIT2A8VMA+gwPJhwJPaskLTt8HUUTcejKpxEXO4lL9QQBldT2t78kiiPaO7ou2hXMQTnO2tcVLc+nk9sCfth81QQSYxZKFUpWcM8AnXYu6EQQBd43M5riXJ2l7Z/cyZlxj31i8FKAu4pC7UeAeH/0FtXbGTyUmsamWNrECEVZmBhLXyjonm8BgvnD+XrjUQ6kqI7BRgBdnU0kRlsF6VAhiOshBuhn4mLm2tx+/u2k0bSDgABuOLxLxisxA8tdec6xtUe8HJfoDV8dMGUGImFCIGke46XvCmOMWLpUmrK52+3IjD3VldT2t78kgiFhs2ftMimxwiG09b1maIALMZMkECuT6HuBTvkVlBKG6aGWmfB6OWRILESiJioqdoNaObnGTuHB7p+SLVnZaVBF/uW3+QLmJsB9BWL/njR8DvMI2AJN6cyh9J+gNor2zp5eZBcmWbmeexOucULFYb6dJ5m22mEbiaP6D7EDis84bkowg3E7xvhM3QAGZ7wPAd6CAZW4GD7JN3nlQcGtCPa3vsAmipXP5/sSFpMMSz3XMSxhpgggwYZ1gTWfoZRlBBFm0rZ3dFxDjkonuu1gySGLvB9qIRP2SOqQ3ryCnRentw7FpOiOfehGEi44lz+Dzc6nk9ydDrGTvQxCCEDF2RvKvJQESMXaKj/fGKcGrkvgJjyLPXdls8h+ebQNCtdxz3bDlAQw9/Rva23vez4YV9HC7/kpRWV1P67tWghDi5GefLew+xtGDQPgkMQ536gRVDpSaIGogCKGMi1K+c2DgxmcCVBNKUQlBBFq0kvd/Z/Dwsen0ra+WbQ7OK77LJuLVqfGT4O7XAYiXyskUYxJlpOdp0flN4UxVgCHEKwUQ/Z6ANOd5fUkM5kYQY0NzfyMJdTJl5CDdDDxETBKnMTDTybl03y+DTK6WWLzDICsyasmwQU7clYlrHjOj+UWqFmlB2mQvK/H6fYYNOiS3sU/4u0gfOXmqKasrNsNpXN8uBFEtlLL3fAOCipc0QQSAXGIjXU8ipkCLVkIQUrJzWoYIHcc/d2o+Pmi46YobC8rt5Pc/fMXOb3h95CZ78EMmfCk3kPhmgCHyLCojCCKcAMYHeFwEtl2ZzvhKNp24eLJvDqUGB7lBtMZ6jiJiYeVVaq/vxikDRmYaKtNFVbRtCrIXujiNujp0lR9qek4issRv9sfXs7qe1vckE4RSSgFNEAF3nnqSUapu8G5dVHlfdvIHKs0v1WCsMOMs8x+ReXgy07Jcuk/E2AnlkTpcAZsALHCaiTKQM0fHjgnTG9irE0EIoi3WcyaI/2t7fWVexcpYScdXcnORzHuljVq5IZKCEosk4dip5FshT0nqr6yup/U9SQSRJ6L1Y0Tn3bdx3ZMq46NvECooFcu0xipOJgVmOtZhmx6gxuqL1hqLSY0gKnM4V9/iijfLbixF89aNtjzDoWPrQhDuXWL+YjadFDmBQzdrdX40CEGEeaKv2ABUCGIKAglKTvPKPg1y5TYqrNoqxqCO1ndIBLEF4OcBSjHw67HZvDFongxNEAF2PInMtn78IAJe++uOICpjE4UuvvMhCBGXaATATrYpEUoQRpUpVjcEIZlHQdqm0le/MjJrNr931H6XB74svVtP69uFIEJfE364aYLwQ8j2u0w5SPXjSX1vcxMvVg2MpwmibOCtHL0g4zqYlumnPbyHp2NfgOnjWTTIJjypNwigQnwUpG1h4CEx4Q2jWlGH5y2knta3Jgj1IRc5nOvicTEvDDUWk4gHk0d+vTChJcKtZBr3ptPrhJVUmZhjukRMKhFFqxksyeKcKhFTWQJ3iRxaeHGvyqUTNpl/NT30fifIJjypBDHNIiYX8VCYgLsqq+tpfWuCUB/yuiEIl0ELZF7q121ZuADZLWUqCEJmWTRZN6ZpU1Izvj86PPf8Uhwil8RNnulf/cZU5fcgBBGekrrSn0I615xmrpOog5D7MYjYT+TM664AK4vQK+I2uD0EC7yV1ZIDwrSsb00QCsNbLFJXBCGbwGJRzWraepYsnrp6N62SMq9lqfXGVBCEaFB7rPunTPj4RD8C6jpU+x+LHfdGk2YL080Pl96pxsy1taP75wThFMR/Z9D9gHF1KZGTnyd16buyjGJe6V9V++hVLghBtHZ0H0qA8PeYZU0a4HmYfFgmk7w/SFuErJ+aChvt3rVM+GxuICEcAyeeIG0L8n1ZWYnvg69y2e2bLsl/POurl/WtCUJ9JtUVQbhcgV8x2DgmnV4nEnFU/bjESnlQFhlzqgii8rSKrWTSIZlM34MBOloiPuEo9xRA9zObd81uem69nVQryMjhK+H3PVm4ZrsDmSpBuOQkHmXmU3LppEj8HvoTZBOedke5SbpBuMRSqkmEK5m/glKlWRrFoNbL+tYEob7E6oogRLNlcWxqtZt3CRIHN5n/VBGE7ETFwMW5VOIrquafbW3xdyFCIvzBPhO3A2B1LpUQeSUmHolyMhAZScRzZQ5kqgRhLc6u3rcjb95ZdrqexAB1QQhCKisPeLOzSHBs98vLboeqoTYmiSBkVkRBwsfIthSZTwQDT0ZR6Eql1v9N9k49rG9NEDOYIFxOmKJHKhFOK3rulmuBrWQ+Y4dnMjdtdb40VQRhXdMNXA+ixbY2bAXxsdmBpHAy83xcsBpiGEfnUuv67S+7LGbfXBWlk1/FhucI7BaEIESd7Z3dJzPjKrsTHYEunxXdck4I4sQy3IIQxHjbJiNYH63JDvSJbInlBhFTo4OQiVfDMOuU1Ssg9FBWd0VH8/NWM/gsx+SesvWtCcJvZ9n+e93dIKwF2r70bWxEb6/MqsXSXAxu3RU5hJtM80p7iIliWZHWstctTvtUEYRoi0wmbwX6MmiJV1wccZdvjcW7iegXZTkLJLkaSvhIw30TfzE7kPyux41F9p1RZlpu98QOShAu5Og5LurTurxkYIKQh/t+lZlW+nmfu4T7liZZkm5Uk3CDkPs+BA5AKIVf6t/go6ye7vWtCUJ9JdUlQYjme2U+25YS8T4CXTY629zg9F4UN4Y5c17er2Dlnka3LMsXfDbFqSQIjxvTi2A+Dyauc4ZfFvmkI9Gxi5kgTmE2KxJ43j5c8hwUGPiJOTr2FWfoC7HpD+fpc8Vw4qEkDLJPzdaO3haCKRIY7Vr6u9fNTn1a10YQ1gYyHkixImEQMS4v5JsulCUMoohxGoO/7cjCJzLOuZJwUPKqBgOp70NIybhc4jr5Kr+nc31rglCfRXVLEOM3iXgbG+SdO9lK0Vgy0+NmAHt45HTOg/iC+XvgMo8EOZhKghD9dNORlG47INwPpj+O/988ACAR38hODOIHkbLzc7mB5OVew++xMFXTaErzVQS9QRTbKMlnYclgAulh/KZ7lZtwGClHxUXvitGhXVYFSDnqm+3Or7/23+Uxv9QisKp+R2Id5amsnrjRTtP61gShOrKWFV99Px5ioqANf9pkXjmYTt7tpwCeaoIokcSsYboCsG49QZ9AiXdcxCC+3/QSfVVJEHC51YRiuVbqUJUEIV6n9o6esyS3Al+sAAjC/d7Y0Nwve+Ujr6FtKm2A1PfBw9JIqVJHIZnlF6BmkTcd61sThPoo1z1BiK6IBB3/2IIPmyZdss1G/WCPG4Ks5y8R8L3CWNNqp1jADabpIIhSP596hgRBXArgrSrDKCy8mPj0oHmKPcRUss/m3UQrpcLVEoS1WKXiHO4fnY0lQQOgyRpf6ybc0hnfz2CLvFtV5p4Ykwgb/5FOr/u93xjW2rbA9cM/sJ5fnc7fPTy0fcOAT8f61gShPsIzgiDs3bEUbtH8MSD6GEAHAPyWMpmvJXLC4wT6HTGSw8O7bPY6wUk3lI7uYwnYnmg8oJmjSiwmryESepTo7BdbIjBOZzJjAM236VJeBfgJBt1mgn6+KdX3iN+NyOtbwrs5OqvpJAb3gOg9YOxWLD8E5oe3ScB+lY9EfuYXwrgWghD9bZrz0k8IOM3W1gKDv5BLJS9Tn87ykiFtwrSwo+dfo8ynMuEIAO+2zTuRCOovxLgzT7Q2yJiE1DZpx130A1Lfn1oxbuuIHwnQLSUHw/H6/MOAO787FetbE4T6aM84glDvmi6pEdAIaAQ0ArUgoAmiFvT0uxoBjYBGoIER0ATRwIOru6YR0AhoBGpBQBNELejpdzUCGgGNQAMjoAmigQdXd00joBHQCNSCgCaIWtDT72oENAIagQZGQBNEAw+u7ppGQCOgEagFAU0QtaCn39UIaAQ0Ag2MgCaIBh5c3TWNgEZAI1ALApogakFPv6sR0AhoBBoYAU0QDTy4umsaAY2ARqAWBDRB1IKeflcjoBHQCDQwApogGnhwddc0AhoBjUAtCGiCqAU9/a5GQCOgEWhgBDRBNPDg6q5pBDQCGoFaENAEUQt6+l2NgEZAI9DACGiCaODB1V3TCGgENAK1IKAJohb09LsaAY2ARqCBEdAE0cCDq7umEdAIaARqQUATRC3o6Xc1AhoBjUADI6AJooEHV3dNI6AR0AjUgsCMI4jOzqV75jk6AOCdto4/RGb+6Ezmxr/XAkZrR/exBNxqq+OvUcp3Dgzc+Ewt9U7Hu11d8TeMjNEtIHy09H0GjsulEr/ya08sFn+PadCnYeJIEPYGECm+swXAUwBdlE313eZXz2T+3tYZXwgm0ZfdGfgfwxw7PJO5aWvpm1b/83QDgCMADBFhRWYgsX4y26Tr1gg0GgKNQhAg0OWzolvO6e/vz1c7SDOBIOLxeOTJp2kZCB/IpRJfdutrNQSxYMEZTc1zXjqfga8CiLrUXWCmY3PpvjuqxbnW9xYcGt+leZiSDBwGoECElZmBxLXOemOx3k6TTEFkbwIQyiGi1rbr9+sHgba2+BwYOB2E0WwqeUX9tKx+WtIwBCFOiczcm0sn7TeAQEjXOUFQa0fvQsD8AQGtIKzNDiRWhkkQrbHuTxJhje3GUFE9Ac/D5MMymeT9gcANrzC1xeLng+hbVpWMe5ubeHF/f/Kfzk90dXVFR8d2v5wJHx8vil+MDc09c/PmK8bCa46uaaYhIA5Z/3jWOIbZ/DZA/8aE83MDif+caf2YivY2EkEIvB40o/mjBvtvfKoa8OqZIFo7e/6DmL8/sXmHTBALupa/eVY+fw9A+zuwEzeypwHKAyxuFf80xyIfHRy8Xoibpvxp7ehtIZji9rKrOBSYzIsH08m73BoSiy17n0mRDQD2AjDKzKfk0sm+KW+4/mDdINDW2X05GJ8sNUgThPvQNBpBbBOP4wfZgcRnxw+MwZ76JojuLxDjOxM9CpkgWjriCwyQ2GjFxjt+OGdcy/mmswcHf/lKMCQnp7RDrwAi6ht5fZcVPjcCau3o/joBFxZbVdMhYnJ6pmudSgTaOruvAuNUTRD+qDceQSicKt1gaSSC8B/68hLOvjPwZBSFrlRq/d+C1jVZ5ds7u09mxlXFW9QrZPKRmUwy6/e9trb4uxChewDsYxEfcHEulfhKNYcIv2/p3+sfAU0Q6mPUiAQhNoAKqxYVSDRBlFlw/c7g4WPT6VtfVcFussu0tx+/u2k0bSDggPFv0XXN0S2nqholtHd0X1RUvouXnza4cHg6vf5Pk91uXX/9IaAJQn1MGoUghBnqnmXdrkLUpAnCRhAeyl/16RVeSYcCfRTgxdlU8jeqX2hv73k/GyxuEbuP80v1okjVb+py9YmAJgj1cWkIgiDgXAbOBLCfreuvGGwck06vEz4TSo8miPokiMrbA2dGZ+PozXcnX1YaWADComkkP28twCcW39G3CFXwGqycJgj1AW0IghAOYAZhFjOuATCn1H0Gcubo2DGbNt30ggokYRDEwoXH7xZpii4B4d9B9B4wdit+W1gD/Z2YBgowr8wP7zroZ27pnMiefXCc+P38IFwcDr1hIqwtEH8tYlry/H0ncK7CTNCyQY9gPUBHTdTDdHIu3fdLZyPaO3t6mVn4OZQc9lZlU4kfqYypvUxrLH4cEd1YqoeAr2VSiYuC1hO0vOW78kzkICJzJdjy3Xibzc9kCwEPgHD1rAjfLDPXtX+vvT3exgaJm5Pw7RDPaybzkYPpZCpIuzo6lr0jj0g/wXKEFM8oAx/LpRJ3e9UzbihgHAWYpwH0IQDznPPbNPhne+/BuWQyWVBpU8VcdMzlgxf17h0x+UwCLwXw9uIaF3U/C2AjE37k9T3ZWvBsl48BSEvL8nmRpkIPg3ska/xpAIMAJZuj5h1+46mCz3SWaRiCGBua+5umOS/9hIDT7IAGUUjWQhDjk9j8GgErPJzM7E37x7bFfd78PTnhtpDqkSCyA4nTWju6ryzDuQpxlMRq6rGCwYds2ph8zA6ShEi2kkmHZDJ9DwZdOG1t8bciSv1gvKv47oPmWOTQSTTZpfZF8Y+wiR8Je3uF9r4K4m8gjzXZbHJIVl44Cc4axu0AtZd+J8Y3M+nElxTqnyjSGus5icgi3eLjfSuzxiGKVWASzplv9P8WP0wGzs5sTP7WzxjAjSDEN4bzdAkBn/JfU/wwmcYpmUzffc62hUUQB3fF92gqGD9g5mX+7bFa4Tue/jhOb4mGIQgRQqK1q/ftyJt3EvBuG6wvMoyjcql1g35QV0sQ7YviH2WTxO1F2NoHeQoMXD07yufIThp1ShAr2zriRwJ0C4BZxc6+aIIPG0wlN6t2vrWz3GxXOLHlUonTnZtJBZFUQUa2NgmTVzu5FYjo5MxA3zrVdquWExsqRYyLGXyul+OhvD4eNKOFE9z8edo6us8GLIfG0uY+GEHTUanUdS+qtK+ra+XskfxrSYCOmSAZj9uUWFeUL6wDqEWlfluZAgjfH3197gVet2UZQcAsnI2IcVXAb77KTCtz6T5xS5x4wiCIoiWcmPN2MbYiHNyfj2L5ff1JceOZUU9DEYRAvjUW7yGiq22bl9hz+kdnY4mfzLoagmjv7F7mFG0VZ4BYHOI0nAbTCIH3ZeEBLTl9MeH2sWY+0dm+tlj8dJCxcLw+PhBA8d/W/x8BDHE6K/7MjzQ37bSmv/+qYfEHPxFTV9eSuSP55vMB/EuxAiH2EHGLSs/TDNxJoO1ex2xuyqaTV4orttFUEKKI96tsMM4VITkFuyqdHdZHqOa0bP9+5ck5mDWUyuoWIUtmzXl5DcBnVJQnvECMPzBIzI0Igw8i4H2SE6lraJAKhbuieKjUltZFPe8l01LYlww7XAm+vX3p29iI3i7ZGAtgPMmEHIFeYfCbCOgAML8SI7pidGiXVW4k4SQIYYVIwEj5fMerBOQs3IibAcTAlpizJHYsffbPecM44r6N654s/cEixLHXV4HoX8f/Zn4EKP3b+sMmgLZHBijO89L7jtAu9u69CvAfAcO6zXphMFO9+BuOIMTilImawPzFbDopHM1cHeiCEkTbot4PwjSFPHjcMmb8EbGB1kWQP88Z5E/Iop96hrq3OaP9EMBb7DPNL5aU88QddqiNir77nNRbO7p/SMA52/ugrjiWyNGloh4JydUcB6riRkJ4FHnuymaTQuQXxkNtnfHPg62kqPbMAAAQ9klEQVRQILbNix82GefssxfudYoUhd7KmNUknPmEoYV9w7uzOconOG+XshsAA6tzqYS4rfg+bbGeM0H8X7axuwMFLHOKtVw2xgIzbjYjfJ5THChsw1o6l3/A4PyVAB1kXxMM/kIulbxM1jgffdhzzPSZseFdEk6Caelcvr/BBaGzmjioFOv31FEFVVKPH9ToJ7axEcEfzx15fe5VEtKjtkW9B8I0xSHV3i5lvx3fAZzCAg1HEAI7p2NUEU9fUVMQgigG+roeRItt4yXkxudlU4kfexFRS9fS+UY+coPj+uwZNqLeCEKyySsvANVbgfOkG4bzniSkiJJyVnVNyjzSAdyNAvdks8n/9aiHWjvjpxKTmDslQwvXQISVint+YDQaPWRz//XPe7VVZhwAQLqhVsw5oADmC+fvhUu9FNBFRfaPARb6uNLzBAp8SDabfNTZPg+CeAgFXix7p1SH9Ibjc7gJQhAyMnbDy94vmbg7CImrzrfJLteQBCFAc3jdWjgScNfIbI67iZqCEIRkgwzkoSslMeZbYGK5TEFZbwQhVZYqWAV1dJy4awFjd9jI0ZVYWmM9S4nYFqKbA8naZYtn3Nx19+sAxEu/M+FLuYHEN0NYbNTW2bMazKtsdQWJIusMCyLOGVKT3oWL4vs6rMmUiM4pnnIjXZkYEfAWFdnxK96KbrMCSxYfN6sxF4LwjbNVqrdSJ4PHzGh+kasOJ0CoDYEDNRU22vSayvq2WnVFIczHmqtoWIKwQlfv9PI1zNxjQ6kA4i9mB5LflZ3wgxBEpYgFgS1iJBPbdfLVG0EITCtPmP5iptaO7kMJ+PV2HZH7O+2x7kuYcMHERs64NpdOnOJnFeO3KiarXsmm7XoDcGtjgI3fqXAXBxRfMVPlnJPrYCSmxa43ALe+qN5ypAQRwBihJRbvMMgy/d252BbPTTzIDULSNmWz4vb2noPZ4JsYeA3AHwzgfi7wajcLNb95Ox2/NyxBCDAdkTxL+G6FYRyZ3bjuv52AqxKE5BRclfJUYosOMH0ym+4T8s6ypx4JQqIs9V08EmJ1kxdTa6z7aiKcPHECrcKcU7aoKhXVCCWsiNPPAlXoN2Q3HDfFvMSazPOQItPpuFhxychHamXmtWlJ5rd0fsgIIogxguQ7oRFELHbcG02aLRJTfXh7X2euVVJQkmlogrBOufIcB1LlnypBtLfHD4RBdzHw5iLgStd75+BI5ZsuTjr1SBBSc0mPTVwitnD1aZBaYRE+mxtIiJDnNT2tsZ6jiFgs+pJCOJTMgc6biRBp/nOn5uMf2HCNOEEqP62d3RcQ4xLbhnRbc3TneMlCrfR3mT7FKwSJqoJedgDiKrCXbq6SA5CMIJhpmdNc1Q1AyfuhEYT4plNnVmzHSyCsM0xaOzy8y2Y/p1flwa+zgg1PEC7K5ILMqkKVICrFJHiGDTokt7Hvz0HH17mpuCXAqUeCGCfgCocrVz1B5YmXpRufqFe6aSimTPUbA+dGGUYSJNnJv8IU2a9hE79XmDS7Yloxf4jWZAf6hHVZhbVe5RySl5XebIE7AQqU0pfBTTRuOj3hHyS7GUjGOpC12mQThIvRi300h0C4Hyat5Sa6M9e/TuAUON2A8vSYwoINTxACSxdz1ApRUwCCCC13dcWidZG91itBSGTmbmKmCgUuu4TWmGqCAKCseHRbm4GdsYItctcbjqrJsMz3xC20hoslVrAWu5WW3JCDbvDOqoO+H0QHUfpW0WBChJpX8CLHVhBuZDau2nvPwu9VQ46EA3C4tewQBGGddDvinyWQSCu43c7cYTVUFwQBSDeDeiUISRA8qbJUEuZCGlqjNL1lp1gRc0t4zNe6BII4iql+a7oIQvJdqdNhJZG4W4RpgpCPektnfD9iEp74E5ZZCvPjJQK+VxhrWl0vibcU2jxRZIchCGc2siICwulnVS6dsJyG6oMg5Au3XgnCuqFVhN6otMl3KnDdQmuUZqY0w11IBBH0xKmyoKaLIMbxd4TekIiZKnxPPEySNUF4jji1dMbfS0yfJ+AExRuFqPBBNqi7GjG0yvybrDI7DEF4iJomwj7XBUHMMBGTwFWifC5T2ktuGb75HCbzBiExMpgUEdNU5TqW5Loo8w4P4ntijWdl+tma8fHawGol7KDvVyNikrXfitK7xXg3FXgJE3pcQqbYX51x6W53KIIQvnKyMAil3MZNc146klCWVU0u7qmw5a9eSe00+3SzfKnnG0Tx9lUeesN2ipXoKXx9RqZSSR2GDmL/w1fs/IbXR4TNuwjnbT3E+FkmnfjEZJ3uSvVKFOQFZl6aSydvtTb8Cj8BlobWKNUnIdBASuOg/Q26wTvrD/p+WAThbIdl1WcOfwhsrgBDhCa3h+ApFa8qVH1QTMMqv6MRBNziyxBhpcl4WYkgKoOdVbWAgmwq9U4Qkk1oIvSD02FKxcZd4sEqdBuh6CAmw4pJLMj2WPdPmfDxicUZwNmr1gXtxNguwgvge2I1Q4p9FTk/VPsUdIOvV4Kwt8uKu/Y0VoLo/9lz1FRr+qyKZdjldjiCGD/t9rYQzDu2Bc3b1QboEwSsZkB4WZce6Q1CO8pVTkMPK5l7HCG2lWI2ufhBnJ8bSAhDg5oeVVFi0I9UBsFDNbkrStZeIhTIUyLKKLN51+ym59Z75d+uuKUVnfRGm6Mjs/L5ewDav9gfX5Nsefwh71uHDKsi0Yjw+7sS8P9N4M+Gyb/IZJLbI6fKTZoDibSCEkyQG4S1V3DhaCJqY2C/IvGKnBgqj3A4/BYBImqy556iUtl0lNkhCcISNcXi54OsiJv2Z7Q8TLjcomicZJzRTFERZthvQCXB0GZUqA1n/5z9ETcFNvnH5Ul6/MNxFOut8KRWCSXhh7n4vWIjD+mkL/EsDxSfy2pbW/xdiFgZ+/Yp9UWx304zYksPxMzDjjAUyebo1hO9yMZqR0XOCQRO4SuJhyZ1KA26wTvHOOj7gQjCkbfEzU/Jbd5N1mFEZZ6HUWZHJQhL1DRrGDcD1OUBZBD780Cbgdz5xv2UVu8iJoFhxQbJuBfEa7ZlUxMJeUrJhZRlsJUexVDa3PwWxmTFYnJxytwK4mOzA8lNfu0SuoTR/LzVDD7LVnaIYRydS63r93vfaU0mCFq8Y4tnpZwgSRamxi/Ypb194xGLo+KWbg95/YfC6NjhzhTAQTf4qSQISZynQEQpOQSGEtbFby6E9fsOSxDjtwCpqMmOrStBuGwGeQafn0slRTgIV09Kt3DfzNxbUiz6nc4B783SL2FQRf0d3eXOf1WcqiWhpJ8hwr3MOKn4vUAil8qQGGohrb0WhzSXQhVhJNy+0RKLH2aQlW3Pnhv9LzBoiY+JI7XG4t1E9Av7u/CI8OtsgzP0hpV4h9kE0QetssFiQ1FbZ/dlYHy6/Dt0TXPU/JRXrmUrm55Bq8v0MUCZSbm9zukmCC+dmCxqMQClCL2ykN8q+rewNvcw6tmhCUIsmdaObpGo5UIXMD1j9LglDBIJVQoR4zP2rFaifsss7mljMRELxZUjPal3KGWZfBuGcXx247qMrO3TQRAW6VaE3rC3zj20hqwPTlPXMEJiSBSwvgEGgyw0l1uAqOJFMJ8HE9c5o3m2tJz0pkh07GImiJtD1PY95dtH6R2XuEHjP3uE4ZD10eUWIM4+v2dEVuVS68StqOwg5OZM5nX7mGqCkIiHPfNOyFIHAO4JoATS47nIjZ8C/A4bthMm9UHm1HSW3dEJArK49bYB8Q3iFkbKUZWruyT+k2hmAcCzAI2AzEcLI/kTS9f36SIIiUnrBJxeoTXkJFeZOzloHc56iyGYNwCYO/5b7beSipP8ofFdmocpaTd5tZUZj9vD9Mfxv5kHALRAknJ0iIk/lxtIXh5kg5DlKSm+7+t7IvtOW2d8IZiE93qlySbhBTA2AvSclQbUxIdBeHtlPfQ3GOaS7MZksc/lJaaaICSHLdGgPICnAcoTY+OspjlnlYIjukgLiqRblkIWVmphwofA2M2Bg2eqgSBjPJVld3iCEGDHYr2dJpm3AXiTA3xfghDli6KQtc40ogoDKTb4xOhsPssvX3Z7+/G7m0bTBgIOkNXrTPwyXQQhC71RbK9naA03rJwyXEWFrSv0FZtDwFO1wphaRcZFE3TFNnGESDEb9FHKTCir1N2ju3oiHL8p8w2O07BinwQ50Amy8PqlCqaaIFzSANj6UxnN4OCu+B7RPK730Vm6YZIXyZJGhuZ+Z6ZFfdUEUTwHyOWt7lZMzplw8KLevZuYL2XmZZLToGTi8MNE+HxmICkSwitFfizeIkQOXpkDTpmoZLoIYpww48cR0Y32uFd+oTXcVlalZVD1G51E/1DVqVpxZ7REisUc5JcCeKvKewzkmPj0wYHkQyrlZWUkFkgim+LXMqnERdXWKcRg1DT2DQKE49+EfsWjPnESv7yQb7rQLwbRVBOEJVquTO860RW3LHviJsER+moxF7sKBqLOB8ngczMbk79VXefVjtFkvKcJooiqywld6QZhHxhBFBGzECdgGYjeY7tqilPh49vEkxsYdH21UR5F/VHTPB+Mj4Gwd9kmbIuOOp0EIQu94ZWnwGtiSzb1qnUGleE7lE1ua1p7IrthdPaLLREYpzOZMYDm2zbZVwF+gkG3maCfb0r1PVLrRiIJRqjke6LSScvRdISOBeMU4RcAYA/bHNyyLQ/1X8H4eWEsf7PTWsmt/mkgCKspQl9iMAn940cBzLO1z3OOFXNuHwWw8FVpKeoTS7ojazwB424Y5pXz5+EhHc1VZWbpMjsMAhKC8A2t4QWOxEvYN7WmrD6JAl3Z5HaHGTzdUY2ADYEZd4PQo1f/CMjs8TPpxJeqbbnE1DAw4UhMcKvSiVTbB/2eRmAmIqAJYiaOWh23WaKkDkW84Ugdq+zwVYLKad3DwMW5VOIrtYpz6ngodNM0AjUjoAmiZgh1BXYEJEplz8ihquhV6ogCxQZyhqF4AgU+JJtNPqr6fV1OI7AjIqAJYkcc9Unqs1DGzprz8hqAzyh+IvBJ31sX0X0yM0TaR5EVUDnkgTOsib49TNIE0NU2HAKaIBpuSKemQ8LLNjLKo5nMTc+JL7a1xd/JBn2bCEtKVi0i1INhjh2eydy0NYxWOS2zSnk8fGzLnd7yMy5pSxjY6To0AtUgoAmiGtT0O8I5cCkRr/eAoiBybGQGEteGCZfDqdH3FuG4PUxKm8Lsn65LI1BPCGiCqKfRmEFtcQn9YeuBd2ypGrpafiPwDmZXFnBO8cZRQ9P0qxqBxkJAE0RjjeeU9cYnsb0IH3KGX/iQahvriJ81ykzLc+k+4bld9tij9TLwF0SNI3L96x6v9rv6PY3AjoaAJogdbcRD6m9X15K5I/nm74J4SdFbPA/wZiK69K178M2T7T1qDyIn03U4AqwNEWFFZiDhJRILCRldjUagcRDQBNE4Y6l7ohHQCGgEQkVAE0SocOrKNAIaAY1A4yCgCaJxxlL3RCOgEdAIhIqAJohQ4dSVaQQ0AhqBxkFAE0TjjKXuiUZAI6ARCBUBTRChwqkr0whoBDQCjYOAJojGGUvdE42ARkAjECoCmiBChVNXphHQCGgEGgcBTRCNM5a6JxoBjYBGIFQENEGECqeuTCOgEdAINA4CmiAaZyx1TzQCGgGNQKgIaIIIFU5dmUZAI6ARaBwENEE0zljqnmgENAIagVAR0AQRKpy6Mo2ARkAj0DgIaIJonLHUPdEIaAQ0AqEioAkiVDh1ZRoBjYBGoHEQ0ATROGOpe6IR0AhoBEJFQBNEqHDqyjQCGgGNQOMg8H91paTKirrUQgAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-49"><g><path d="M 371 1001 L 371 1044.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 371 1049.88 L 367.5 1042.88 L 371 1044.63 L 374.5 1042.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-50"><g><path d="M 321 951 L 51 951 L 51 407.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 402.12 L 54.5 409.12 L 51 407.37 L 47.5 409.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-51"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 962px; margin-left: 312px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="303" y="956" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-39"><g><path d="M 371 901 L 421 951 L 371 1001 L 321 951 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 951px; margin-left: 322px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Any GPE <br />pending</div></div></div></foreignObject><image x="322" y="937" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tnXucW2WZx3/PSabTggouQrkr6Mf7ZbXKJHOpo1ws10IhMy0goLggwsJ6AVTcXV0BRfyosEUQUblJOwnlJgpyqWMnk2Tqpy4iy/aDF7yAQAFbWnDameQ82zeTTM+cvOfknORkkpw+559+mry35/uceX95b89LkEcICAEhIASEgIYACRUhIASEgBAQAjoCIhDyXggBISAEhICWgAiEvBhCQAgIASEgAiHvgBAQAkJACHgnICMI76wkpRAQAkJgpyIgArFTuVuMFQJCQAh4JyAC4Z2VpBQCQkAI7FQERCB2KneLsUJACAgB7wREILyzapmU/f390W35+TcBfLK1UQR8JZNOfrllGhrihixYcFbH3Lmb32OicCwIhwH0egB7A4iUzC4AeBbgP4PxkEm458B98EgqlVKf+35ifQMXEeMK3xl3ZNgC0PME/gMTP1hg4+616aHfAWAvZcZ6B44l4B4vaWtNw4SLcyPJb9SaX/IFT0AEInimDS+xu3vwnWzwwwD2slX2mDkZOWxsbMVzDW/ETlrBIQsTBxkmfYGApQBe7RPDJga+Z05MXrl27Z0v+skbgEDoqnuMDL4gsyb1i2pCIQLhx1vhSSsC0Ya+jPcOnAtguabpBSI6NTMytLINzWrpJnd1nfKaSHTyUiacAyBaZ2O3MNElB+xtftfriKJBAqHMKIDw7Yl/7P7Fdeuun3SySwSiTo+3aXYRiDZz3ILDErvN2YqfAdRdarr6ozZ2TG3wfSjgxGw2Nd5mprVsc7u7Bz/ABlYCfLBLI0tTSrRtKg132qac7FlVx/x95PkzXnzVQIEoiQR/ITuS+qbTSEIEomVfz4Y2TASioXiDL7yrJ9FrEN0PYFdVOhENMZtvA+jdpdo2k8mLMplUNvjad74Su/sSRzPTLQBeW2k9rwcb3zFAPxkdXfmMvXNNJBKRp5823mAa+BiBzwKwZ2UZdP3E+G7nuf16V3k0ArHRBB8+lk6t8+KV/v4z5k5MbJ7PRvSY7e38T01bNhtsHDM6unJEV55GIHzV76WNkqb1CIhAtJ5P3FpE8b7Bq8F8XjkRET5qMj5MwMemP5PF6kC8Gu9LHAKmn1Su9fB6k3H+gftitdcpong8MY8idD5Pdc7zLA0cZ+aludGU6wJwvQJhBRKPJ/4JERoC1OK65WG+GyaW6UY0IhCBvFJtV4gIRBu5rKtr2Xyjo/AQgHeWml38FUeM/YjoDssOGlmsrtOvhxxywh7GnI57CYjZirodBT47m039vZYquvsGTmSGGpFMiwQDOXNi8hi3hesgBUK1u7t7yevZiP4MwNstdjiOCkQgavF2++cRgWgjH3b3DS5l5lst6w1jEXQcWShM7oIoDYPxppI5slhdp191c/4EPLhtLifWPZR6qY7iKd6X+ByYvmbdElttc0HQAqHar93swPTJ7OjQ9+z2iUDU4fE2zioC0SbOmzr7sNdtABLlJjNwdS6dvEAtRcR6B35gnWYCZLG6VteqrawRk9Q24oMsrJ9A1PhIbnjln2ott5yvu/uEvUyj4wEC3mMpK9UZ3XDy8PBwXld+IwQitnDwbWQWt0vvM10n4absSPIMEYh6vRyO/CIQbeJHzdmHCQaOzqWTasoJ8d7EIoDuBjCnZFJNi9X2joiB43LppJqHhzoc1jFv0yICnwPQ+wDML9W1BcATDL7NnMjf5DZVou18azggpeb0EcEqgI6c7sSZTs2NDv24XpdqflkXmHFebjR5Xb1ll/Nr6vgLmfmFmcwdf54tgejrW7JPnqNqUfqN1YRKRhBBeb69yhGBaBN/aX5Bzlhn0KxPqCOy5RGGZyudBCLel/ggGNcB9NYqheXB9G2Y5n86bN+sHO0wVnd28OLh4dTLXhva1ZtYYIAetOwuerJg8KFr16Se9FqGLp1mG7FKFviaTunXe3L7Kew0M1bNje6SHh6+catT2xsxgtAKhIMvRCDqeavaN68IRBv4Ttdp6Tr/WO/AVQScbzHJd8dWIRDMi4nogO3baq+07b5xJceEn0128sm6+XrNaMf3lkmNkP0ol06eWe1EcDV327cRq/TEuDwzmrykWt5Gfj9rAiFTTI10Y9uVLQLRBi6L9Q4cRsBPLdNHr5jMi8ZGU2lr8zWdm+/Fak1HtALA8RZxUAe81K/0UTBtI/BBPLXTpyLshFNsHd1ox08cKY1gTgC8OJtOqfMhdT0a+7Ws66qkhsyNEAjNKMxRDGUEUYPTQpBFBKL1nVhx9gHgzMRcHGX/da6fHvG3WO1yYlfNw99lRvhC+zROcT0gigvB9O+2MBSOI5jK0Y7eJp17ursTcTaKhwVfU/re90jJwe0U6xm4mQinTn9P+D3y3J/Npp5u5qvSCIHo7h34culcRtm0AjMv0Z3JEIFopvebV7cIRPPYe6o5Hk/sZ9vCCreol5o/el+L1Q4CUQDzl/bfF1e6HAzTbt906nA0nbzndtptDGoKqKfnuFebNFctyH9w2jk1rI94cqzPREELRE/Pie8wKfIAgH0tTXFcxxGB8OmwkCQXgWhxR1aefcAGMunQTGboMV3TddMGIFqeHRlSaxNVQztrBcLlhO2MKa7Kg3xKzC7JjSQvt7dVN9rxMs3U23vyawuYvA+grlKZnoWlmqu1i7YOc/LVygr6+yAFoqtv2buJCykC3jyjnYTvZEeSn9G9Jw2OxeR7DSpovlKenoAIRAu/GSp+zrb8KymAVPyc0uM+ZaTb/ulnF46mI3KcdrCj053VgEsHW1lX9WmmyvWY6nm8uri398SD84gME6AW5aeeEAiEign17LOFvSY5+n4QPkmMI+wRaRlwPechAuH1LQpXOhGIFvan9t4Hh5OuVjPiPYNng9i6Z9/zYrVmd9Bfoyj0p9Or/ugFVXfPwGVM+KKXDlZjX9UFYc1OrfOy6eQ1XtpWLY1+9KU/OOZUVn9/4lXbJuluED5crb6K732Jqe/S3TKMq5hemZHkKqdEIhCB8m6bwkQgWthV9rl2Bjx11tpfwh5PVmtGEL80eOuxo6P3qMNwVZ+K/C6dnm6E5LaeoNn95DrdVrWxtgR+ThaHSCDUTXNnZtNDt7tNQYpA+H2bwpFeBKJF/ajfkUS3dUafO90pHEPZFIcrST3N1ds7eBV/6OVdOk949IFbXvGCyo9AqPJiPYOnEBXjS5UeLsaXSqdv22ivr/L8BN/bGd014XbAzEuby2mCWINooxFEnohWTRJd+Ks1K/9ajZMsUlcjFM7vRSBa1K+asw+ep4mUSZrFbXV5RNXFar8dvB2f3/ya0BtO00wV2305oNAaZRsW9C973Zx8/mHL3RrwK5AtLBDPAfwCQGkGfjo5l9f4CTooAtGiHUWDmyUC0WDANRavOftQY0kzs1UNR+G3g69XIHSjHd0pcc1236q2+CXmsCng0Ylo9NB1wyte8FueW3qtkPhbg5jVnT8iEEF6v33KEoFoQV/pAtoF1Myqo5DZFghll2bqqKJTjvUkjrPeecFAIKE17FwrFtmBhnTEIhABvdFSTEMJiEA0FG9thWunh2orSpPLfZtsMwRCs/g8I1KtZpQRWGiNihFQZVgTlSSwnVLl+kQgAnuhpaAGEhCBaCDcWop2OMcwDuDZYtg43w+re5CtcZJcF6ubIRDKpIrtq5b1Es2IKqjQGhU0dXGinEKb+HaFJYMIRD30JO9sERCBmC3SHuvRnn3wsLjsVLz21jCX8polEJWBBnl6msk+ogoqtIYTM024kglmPi03mlL3OAfyiEAEglEKaTABEYgGA/ZbvKZzqnp4zK0O7d5+wDnmTt/ARcS4YrpMnyeJaxUYXYTW0oVID9tuy/O0Xdcvd2t6hzhFj5OZP8rpQh+/9YlA+CUm6ZtBQASiGdQd6tTEGVJnl7SRW7022+FMBJy2iNbawZfbU0/+ijMYjMvZ5O/ODFZYHw+P3Cjek7gYVLw32qKVgdxJXSxPBMKjJyRZUwmIQDQV/8zKNRfpuEZu9dp0XblOd1bX08Gr9tSTv2J6jbEaxMu332K30nIXRuALxjqOakTTuZVSDBxuF4nJKJ/2q+HUs17529OVwqOfVwqPvmN9SLa51opU8jWIgAhEg8DWUKzu7EMgoST0IxNop2rq6eDrFQjNAv0zRFjNjFNKPAPh4dU3h/QOvtkA/6Qi6inwPIDPo8ArHK5V1VYxNWrAIIi+AmC/ikQiEF5dI+lmiYAIxCyBrlaN/uxDcKEkNGsb2pPVzRSIosBUhN6wkguORzV/lL+PL1z6Xph8O8AHa/JsAuFGwyzcsHXrHk+sW3f9pD1Nf//xu2/Ld8QYdDoBi52ubWUgZ4JOX5seekLXtiDDfXu13ZpODsrVQq3984hAtIgPNR1j1UNtfpqu3R2lWaxutkC4HRIMOrSGV36x/qVvoHzhZoD6quR5DqBSzCqOli7jUf+6PVtA/FXksdxtNNKCAuEVn690DByXSyfVpU3ytAABEYgWcILD2YdAQ0lo72qYsn3GnH6zBcJpUR0uO69mw4XqToWnnqEBAFcBUGdL6n22gOk6Mx+9dGzsx5urFSYCUY2QfN8IAiIQjaDqs8x6b4HzWp3+hPbMk9XNFghliz2shvqsUaE1vLIrp1PrCFsLOIkYF29fPH+r3/wAr2fCFXMjuH14OPWy1/wiEF5JSbogCYhABEmzxrJ0B7MAXpxNp+6vsUhtNv0p4ZmL1a0gELrQG43gUS/bYjuj+WOY6DAA7yNgnxmn1gkvgvlZwMgR8XBhIvLg2NiK52qpVwSiFmqSp14CIhD1EpT8gRPQCETDQmsE3ngpUAiEiIAIRIicGRZT7Oc2Gh1aIyzcxA4hEDQBEYigiUp5dRHQLFI3PLRGXQ2WzEIgxAREIELs3HY0rXI7rnt48na0UdosBNqFgAhEu3hqJ2jnggVndcyZ99JygM8qmRvoWZCdAKGYKAQCJSACEShOKcwrga7+JftHJngik7lTha1APJ54Ixv0dSIcDyCiPmPgN4Y5eUQmc+cGr+VKOiEgBIIjIAIRHEspyQeBWM/gEiJe5ZKlQIQzMiPJW30UK0mFgBAIkIAIRIAwpSjvBGL6qz0tBdD1E+O7naeLb+S9FkkpBIRAPQREIOqhJ3lrJqA9Pb6jtOTEXD5r3UOpl2quQDIKASFQNwERiLoRSgG1EJiKctr5TRAfD8YeAPIAryOiK/fbm+9KpVKFWsqVPEJACARHQAQiOJZSkhAQAkIgVAREIELlTjFGCAgBIRAcARGI4FhKSUJACAiBUBEQgQiVO8UYISAEhEBwBEQggmMpJQkBISAEQkVABCJU7hRjhIAQEALBERCBCI6llCQEhIAQCBUBEYhQuVOMEQJCQAgER0AEIjiWUpIQEAJCIFQERCBC5U4xRggIASEQHAERiOBYSklCQAgIgVAREIEIlTvFmGYQiPUNXESMK6brJtyUHUmeoWuLn7TNsEXqFAJWAiIQ8j4IgToJ+On0/aSts1mSXQjUTUAEom6EUsDOTsBPp+8n7c7OVexvPgERiOb7QFrQ5gT8dPp+0rY5Fml+CAiIQITAiWJCcwn46fT9pG2uVVK7EABEIOQtEAJ1EpBOv06Akr1lCYhAtKxrpGHtQkAEol08Je30S0AEwi8xSS8EbAREIOSVCCsBEYiwelbsmjUCIhCzhloqmmUCIhCzDLwR1cV6B44l4J5y2Uy4ODeS/Ib6fzyemAcDJ4BwLkDvAvDqUrrnAKxmwjUH7M25VCpVqLVtiUQi8tdnIu8nMs8A43AArwcQLddDwKMg3DwnwncND6de9lJPf3/iVdsm6W4QPlxK/4co5ftGRu54Rv3/kENO2MOYEz2dQCcDePNMu/jXDLp2cnz3+9etu37SS332NB9YuPSAKBeWwsQyEL0VwDwAeQb+F+CbeXLODWNjP96s8vkRCK9pm22/em8oiqOZ8XGA3gdgfpER4UUwr7XztdvFwHG5dPIntbCXPK1DQASidXxRc0ucBKK7e/ADbJg3A8UOzuXh9SB8MjuS+qXPRlD3wsSH2MQ11esolrwFxF9FHsuz2dS4W11OHeQ//vG6F+bM3XQBCP9hEQWnop4H6OPZ9NBPAbAX27q6TnlNJDp5KRPOsYicLut02bG+gQuDPkndLPuVBMR6EgNE9N8A9vT63ohAeHm72i+NCET7+ayixTqBIDLSMM07Aezl0UTVYV+YTSe/66UzLf7CjBiXMvgCABGPdZSS8ZgZLZw0NnzHU075dB0kCoVjEIl8BcCAj/ryDL44l059u5pdsYWDbyOTkwDe6bH8cWb+mEH0Bga+Pp0ngFAbzbB/wYKzOubssulyMD7tw6daBjKC8PgGtXgyEYgWd5CX5lUIBPB9AB+kqamX8rOBgdUE2rx9iuRgAD2laRNrFeNE+GhmJLnKrd5iRzLvpeUAn1WRjvAiMdQUz5Oqk2Hw+wl4h+bX+ONk5o/KZO74s64uTQf5NMCPAnSkJf04CI+A6bdV6tpogg8fS6fWOdnV3b3k9WxEfwbg7bY0BYDXA0ZOfa6xZyMYGRCObrBANNR+NXKI9yU+B6avVYhDcVoJawB6HuA9QVgIxh4z/MBqFErvLX8mAuHlL7f104hAtL6PqrbQLhC2DM8DuGD/fThpXWdQHfDWPH2WgC/ZOu8NMIxF2TUr/8ehYoeOhNebjPMP3Ber7esZU+sFHf9FwNm2zufnnVE+SbcuoREIa3O2EHDZnChfY8+r1g46TPMHjOJayI6HaHl2ZOh83ShCCV7nLi/dwsyDM7IAD+YNPnvtmpQSu+nHxZ6pNI0ZQTTMflVwrCdxHBGttP1o0L47as3pqWdIjeKucpqGEoGo+mfbFglEINrCTe6NdBIIBp6AQcfn1gz9n1NnX5pv/tHMjoFu64w+d/rw8HDenq+rN7HAAD24vXN4reW7h1DgwWw29XeXllKsL3E6MakpLLXgq54CEc7IjCRvtedzEQglYCdk16zMONW14LDEbp1bKTVDJAi/R577s9nU0xU29SQON4jutjIgxg1s8vkuayVuv7jrjuY6m/aXBO9eAmJlNh7eHagpOZh8l22kWixCBCIEHUvxt448bU/AQSA2Mowjc+mVY1UMpHhP4mJQcWqh/Gwgkw7NZIYes+WleN/g1WA+z/K561SRPX+sd0CNJNSopfRwZmIujlr3UOola1qHDrLAjPNyo8nrqjkt3ptYBBQ7/TmltJvIpCMymaFfzaynP7otP/8mgNVuqHLnljMnJo9Zu/bOF93qUSOPjnmbvkfAx2aka9wIInD7Vbu7+waXMrMS6fJa0rjJvHhsNKV+CLg+PT1L+0wy7wXwGmtCEYhq5NrjexGI9vCTayu1AuEypWIvrLil0zR/DuBt052kZats+bNDFiYOipj0MICDSp85jgCcGqwpY4KBo3Pp5ENVBcJlFGCvr7f3xIPziAwTcICl46/YeqlJVyCiUzMjQ2q6perT3T34TjZYMdmxGaBRAtEA+/v7lUDudRuAxA5jnUeQGiC6Hw0ygqj65rRHAhGI9vCTX4HYTCYvymRSWa/mxXoHriJAzdGXHr63M7prYnj4xq3lT0rz1HdM/9L00WGVy9B1SMS4PDOavKSqQACpzuiGk3VTX5UCcfJrC5i8D6AuN4Gw28TAX6Mo9KfTq/7ohZ22g22UQDTAfp1AMvOS3Ghq+lxNNQ5dPYleg+h+ALu6sa5WjnzfegREIFrPJ75bVDmC4LEIOo5Mp2/b6LWwWM/gEiKe3r2k5qB5MrJwbGyFOlBXfLp7Bi5jwhfL/yfgwZd36Tzh0QduecVrPSpdrG/gi8S4zE2MdFNMTLgkN5K83Etd2vyaw1t2m4BKYaxWX6xv4NPE+NZ0ugYJRCPsj/UOHEaAOidSnIrzK5AqT1fXsvnUUVhjXYuQKaZqb017fC8C0R5+cm1lxTZXxq250eRp1fb9WwvVLD7P2Bqqn4rg3wHGL/wj5H9Wh6EtAlEhaA4C8ZncSFKdZ6j6eBQIivUM3EyEU6dFj3FDZjT5L1UrsCSI9QweScTq1PDUHH7jBCJo+xHvGTwbxNY1nV8avPXY0dF7tnhl8O4jPrrrq/6x7U7rpgARCK/0WjudCERr+8dT6zQH5Tz/0i5XoJlqeMVkXjQ2mkqrNFW2nXpqp0uiGWE0nOrz0+l4EQgHEZoOU+LVqApxbZRA+Ahf4cV+ZV/FaI6xurODF3sNiVJmFO8buBGM08v/9+Mrr5wl3ewTEIGYfeaB1+gWi8lrZX19S/bJc3QEwBt1f+QiEM4k21kg7B272+jH7V0SgfD6l9Ze6UQg2stf2taKQFRi8fILWkYQgAhECDqABpogAtFAuLNVdBACUYpDpLZr7lNqt20NoiK6KqxRY4O21UsH71anl/wiEDLFFPR7G7byRCBC4FG7QKhTwH4XWqciv/IDAHYvIXmGDTq0fApbtxBZSz1ecXvp4OsVCJU/iF/QFds822sNYuYOLEAWqb2+pDtBOhGIEDhZs8214gxDNTNjPYOnEBVP05YefnQiGj103fCKF8qfdPcMfJ8Jn9iRpLYFzWptUd/PlkAEsUhbwa6dBMK2A6uWba4L+pe9bk4+/zBA755+e3wsqHt5HyRNcwiIQDSHe6C1ak5SP1kw+FB7kDmXSinWO/ADa8gIBn6USyfPtG6V1WyJdArJ4WZf+eStOrn7FECPMJsPzu14fpX1ANysCYR9iyowY+TkwVEV7Bq2zdVHp+uVn2b32gTAi7PplDr45umRg3KeMLVlIhGItnTbzEZrBMJXuAh9CI3KcBO6sBIMXJpLJ9XlPZ4u5InHE29CpBiu40DLr82rc+mkuldi+vHawTm5z2v+eDyxH6I0DMabptujCTPiVI+GXePOQTRAIPr7z5i7Lf9KCqBjdtjoL9RGZXwtCdYXgm6laIIIRAg8qYvFxMBvDHPyiEzmzg1VTKR438C3wPg3S7rHzMnIYdZT1Oq70vWlK0C02JJ2A4iPzY6k1lZDqQ7bTeTnX81gdVtb+RlnGEfl0iuHmyEQU/cgVAQg/AsKfGg2m/p9NXa6zrGdRhDKPk2wvs0GG8eMjq5U255dn/jCpe+FaarRxoyLqeQcRDVy7fG9CER7+Mm1lc73QdAtnVHzUy6HnijeO/Cp7TF0rrSGunbbndSlCY3tJTS05SrLmaHFme9W9z7bw2p7HQHUO4JQ+Xt6TnyHSRG1QL/vjvJ4OB/Fsl8Np551qKN8NactVHoDT1I3YAShbNOF+95+a9/jKPBiN5Hs6l+yv5GP3G6NdzU9CvPR1hD8CYbWBBGIELjW7cIgBnJMfObYSOpxq6nq7mXqmPya/RIfld4t1LXDKEAVvRHMF8LEbfbO3uWeZ8fRx2wKhPNtaryeDJybWZNS4USmp9BcLluaQtxGi9Tld8LhwqC/MdO/HrCvebf1Eih1YdBf/oYPG2RcV7qdsOKvSEYQIehYZIopHE7UCMQkAMMS378AgroVbRRM2wDzPQAt0FwD6mm6SHshj2XKyHINKFzqGmfiz+ZGUtfqvDDLAgHXa1SBpxhIF69rJX4XGCqWVPnSo8rmt6FATIlkxVRjWfCqXTlqf98k3Hc4uhZZgwiDHzXbXIcJ9EsGVAjtqDcb6Y8w6CSXq0ZnFKNEYs5Wun77VIS6etLvMw7gwmw6qW6X0y5uz7ZAKAOKdeaN7wL8UR8GqetPr2VABfibumWvPQWiKJKd8166iqfuGi9fHlQNxUaAPw/QRU5hWqoVIN+3LgGZYmpd33huWYVAMFab+Y4TjI78QoB/6HRvcKmCPDGuLeQ7vjQ29uPNnitVN8zsuJtYrWHs5yWv05SXPW8zBEK1wct9yzvayuvJNE4rGKY54xrWNhUI//bjMTZooIMnN9nieBWY6djc6NB9Xt4JSdO6BEQgWtc3nlumE4hyRM6ptYaJTwB0GgHvKI0oxsG8HgZWRFG4dWTkjmc8V6ZJqH55Rudu7IrAOJPJ7AFof8sUzBaA/8Kge03QD9emh37nZUtsswSibJ6qf6JAxzPzOSB6Cxh7lEYHL4J5LYOunRzf/f51666fbOdgfU5+L9sPxmlMeJ/dfiJcs9/euF+tTWgCPc4I01LPuyV5m0tABKK5/AOp3U0gAqlAChECLgTsh+0IeAEmH57JpB4RcO1NQASivf1XbL0IRAic2MYmaOJ4Vdzv0cbm7dRNF4EIgftFIELgxCaZMLUwTykAbwHwCJh/CxPXZ7Opp702Kd47cC6A5eX0tV5F67U+STd7BEQgZo91w2oSgWgY2p2iYHtEW2JcnhlNqh1wVR/dITs/+atWIAmaSkAEoqn4g6lcBCIYjjtrKbG+gYuIcUXZ/uLJ+Kjxkdzwyj+5MXE4O7KZTF6UyaSyOyvPMNktAhECb4pAhMCJTTShYhdWsS283mScf+C+WG09RV1upgpSaBToSiIcbz0zQaBr50SfO98ambeJpknVdRIQgagTYCtkF4FoBS+0dRtcT1ET49cMUifxlXDsqeL7AZhfYTEhG0X+xHq3Tbc1yZA1XgQiBA4VgQiBE5tsQo2nyKdbrRamJ6N8mktwwyZbKNXXQkAEohZqLZZHBKLFHNKmzfF3inzayE1gXAaTr7EHaWxTDNJsCwERiBC8DiIQIXBiC5lQjMnU+dKHQFimORmfB/BnYhoBY8W2bbv9Qp0mb6HmS1MCJCACESBMKUoICAEhECYCIhBh8qbYIgSEgBAIkIAIRIAwpSghIASEQJgIiECEyZtiixAQAkIgQAIiEAHClKKEgBAQAmEiIAIRJm+KLUJACAiBAAmIQAQIU4oSAkJACISJgAhEmLwptggBISAEAiQgAhEgTClKCAgBIRAmAiIQYfKm2CIEhIAQCJCACESAMKUoISAEhECYCIhAhMmbYosQEAJCIEACIhABwpSihIAQEAJhIiACESZvii1CQAgIgQAJiEAECFOKEgJCQAiEiYAIRJiFIhQEAAAALElEQVS8KbYIASEgBAIkIAIRIEwpSggIASEQJgIiEGHyptgiBISAEAiQwP8DT5NGvC3HI6sAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-63"><g><path d="M 531 301 L 531 237.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 531 232.12 L 534.5 239.12 L 531 237.37 L 527.5 239.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-54"><g><rect x="481" y="301" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 482px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">OS moves active <br />core back to<br />ACPI C3</div></div></div></foreignObject><image x="482" y="330" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcXFWV//e8qk4noBAGJawCjj8XxpVIeo8t+x5IqO6EnUERhIEBQQbFHXBchm1ABGXYSXcXRMEIAhHbdFdXNZCRQQYZB5EdEmXIInS6u+qdX251Vef1q/te3Vf1Xq/n/ZNPuu76vcv33nPPQpBPEBAEBAFBQBDQIECCiiAgCAgCgoAgoENACELmhSAgCAgCgoAWASEImRiCgCAgCAgCQhAyBwQBQUAQEATMEZAbhDlWklIQEAQEgRmFgBDEjBpu6awgIAgIAuYICEGYYyUpBQFBQBCYUQgIQcyo4ZbOCgKCgCBgjsC4EsT8AxPbz9psHczMRxNxHUC7A5hTaG4OwBsAvwjCz5mslXvMs/+YTCbV30P9EolE7KXX8ckY02ImHALCXmDs6KhkEwOvg/GYZeGh3FDskf7+5WtDbYQUJggIAhOGQGtr4l2Dw3QfCPsXG8HA0Znerl9MWKMmYcXjQRBU37x0Acj+OjEOBhAPgMOrBHx/Vpz/o7s7+bcA+bRJGxoSc2DhdBB9HcB7A5SnSCrDsL6U6e14DAAHyCtJBQFBYBwRqGtJ7BOz6fxcNnap18FOCMJsQCIliP0WLt0jlrOvIsIxAGJmTdKm+gsz/1MmleyqdHNubGzfjy37doA+XEU7ciDcaQ/VnNvff9fGKsqRrIKAIBAyAi0ti3fJcc3XGPx5AC/GKdvS07PidV01QhBm4EdGEI0LE/uzTXcA2NWnKZsA+svI71wLYGcfIskxcPvsOJ8b9DZR39x2IAF3AdjJDJayqVYhx+3pdPL/yqaUBIKAIBA5As3N7Z/IgR8CMK9Q2Z+EIKqHPRKCqG9qX0zEtwJ4t6uJapN/3CL611kx+9fujV69Dbz6qrWXbfFZBJwOYK67i0x4YLiWj1+zKrnBpPuNjYv3ZCv+AIB9XOnXb3l7uBXg5UOx+PNrupe/WbydtLaeOvud7MD74sCBDPufdLcOBm4ZHpj7hTVrbho2aYekEQQEgegQqGtOzLdAjwDYQQgiPJxDJ4iGlsQCMKmHHtdpnZ+wKX56f8/y35uIidQVcHOWvkTApaXvFnTT0MD25xhsztTQ0nYlGP/shIwZd3K25mxDMRHVNyWOIiJFeMXJp4obYqZlmVTnivCGQ0oSBASBShAIShCV1DET84RKEB6n9RyBruGcfWk6nRwICrKHeCgH4kvSPckf+pHNgoWJvWM2/RrA3sV6iahz8J3tTzIglzFNLYjMfgZgu60/8IPIYUkl/QqKg6QXBAQBbwSEIKKZHaERRGtra3woO+9aBp/laGqOgMsGB+ZeHnRDdnbX41ayDpZ1aHp1x++8oKlvaj+MiNVtpvhAvo5sOqCvr/PpCuDU3UbessEH9fcm11RQnmQRBASBkBAQgggJSFcxoRFEY2OigS361dgTNh6qjfNxQR+VdV2tb2o7kwjXjX3Eprtr42tP6e7uzmrztLR9mRjfc5z4+2OoOay39+63KoFT10dmWiJipkrQlDyCQHgICEGEh6WzpLAIghpa2q8F8zmOwkM9XevU0gD43gjqSwgCv7V481Gp1P2bKoFzfuuy98zKZn8Not3AlH8kJ9jf7OtNKm2tUD53m53GOwU7jmNBOBugjzmUAJQR36NMuH6PnTmjMy5csODYHa1Z8VOIcSIor+qrDBQVsb7IwM9zlnXN46s7Xq6kE/Pnn1FTW7vhs2zxqVvasdCljbaWgKdAuH2wln9RTrlAQ8Jv28yH9qeSvUHa1ty85P1ZxLoJ2KOQb2iLgsQRmd6uVX7l5OdZ1joMsE8DaF+HVkweK2LqsS3+qRfOJm2sq1s2L1aTa2dwO4g+5DDSVHW8BqAfoGRt3H4wjMOVSZtUGjWOs2dvmM+EBIM/ozEgHQD4FWIrBcbywcHtf1ONZCCvBm/nEgQsceGg1ucfwfzLbCz2U695Wd/cdhQB9xv2b8x+ZKLm2tjSvpSZ7yweShl4OY5ca2/vvc8b1plPVteUaLYof3jetpDvdbbogMzqzj/4laPWbKwmfgwI/+jCR4nqXwDoYZvsm/p7kqqcSGyzQiEInawf8D/dBwG4mLagHbUcwKzi3wj4Vl9v1zc9bhDnE+NKx2/ViJgqaXLgPB4EsbKhuf0IgP+jnIEfAY8Mx/nkx7uTb6jKlWbYy29YXyTmyzVaZc72ZRWWgwNzv2e66CswPBxg4Cc8XPM1LwWBEWt7PLBl8jeOjjHjir5U11eDgFnf1H4CUX5xFz7uG5qNw70IKt+XOM4B09fK4FQs71mycHbf6uRvTBfnfq2JnWty1tXMvMTQYHQTiL+DLK6L8p2rgnEsgqpU1M/bfRfuCuLxQBFDDfMPDHHIAtSFnP1PbrXyqAlCt68x04mZVKdSmTf+GpvaLmfCV7Zm8N8b6+pO2C4WH76MCUpcX9awmJURL/Hp/T3JZ4wbZZgwFIJwMy2AHBGd2NfT2WHYDqNkoyd40MdNFr6GudWZ31QDyqhNYScqIQjmRVuM8z5AICUqKztZVHsUSQzO5gTe2uGd2jkbrmHwGYaGikoN+epMb9dF5Ta9/GaXpdsZOKgCDJ5mi9q8TlANzW1nA3lxYnEzDiQaVGrKg9m3kwAdOUoyfgeJ1qV7UTbXAVBdwL4ow8mrht6Z+5VypNrQkPgAYnSfRt3aoEruzsaxrEj6BhmMk9S1Lt7dysbuqaDvxTqUEspNgwPbn1cOAzU1G5rbjwP4ZjMSdswAdaOw6BjnnImaIPLtLZWMJGvj6473Emu7gdfsWb5744hBLzoAfr/xII4kVLeKi9K9XT8qt3aDlBsKQdQ3t11DwLmOio2uUEEaWkwbpC7daVSRFzN+bsf4osdWJ/9cSRuizKMRi6kbk7JEL/qsygK8BrD+C5Q3LmwC57W0xliqE/AvTGyD6buO39TVfdWIcSIrVyMHahbqRrL50L6+ZNqrnz62JYqd3gRjtaqDwdsR0AxA+dxyf69ZbC1KpTqecP/Q2Nj+UbZYaZ8VVaWNxEOjc2Rh+0fIzuffpfA3T3GnT19yYLzMhAyBNvr3xf/QoeZh7WZKash0E8C/B6y80oRfHVHY3fi0q+AXDY85DFnVfFG3uqIhmnPYjFS+G1valjBDiWOLc7lYRpaB/yaQmgtKQ/FjYHxSk+5pO549rL97xSsqY8PCpZ+CzWeOFGK/p3AgKEoXNjLjfiJ6p1DJAMfp6kx3xwvq/yYipnwdzYlDgTyxF8sNtLcVtDB/OZqf8Byy3JpOJ191z3tvEwElCubnQNQPpsH82iUsdPmPU8UZaXcG2b+qJoiPH3zStu96Z/BnYyc/BzrxBWlwqeggL3zzdLJV35RoJ6LbnWKpQn05gH8Hxo/tbHzlZHHGpyGIIjzqdH+jPTT89cce+5ky6it+SrvqYDBUH522J8qAzyqQw1vqfWj3XdHpFAUURAv/AiIlvtlKMETXpXs6FeGXyDVHZPR0D4BDXOP2NBPOzPR09bnzKd84xHQzAfXuPM4FX/xNdwNg4NpMb9d5JnOloan9CyD+8da0enVkjw3S7wBBdS3LPmZx9maAPu1oS47BX870Jp3izNGfG5oSyv/XjQ6MB4hw3uA7c2/VnLqpYeHST8JWbmHwUUcdZYnbBBtnGo+59vOcxRd4HZ6amhIfyhHdQMBnx9bnr/I9spnbSg7vnKNZYtyQHR7+lmtOo7Hx2J0Qm3UtMx83dm7i6nRP1wUlcywiQzmt1ILpzHSqU41n2a/kQOuxtjwOKp745B2Ovob9LbJ+7LptDDDz0kwqafo249uHqglCPbhRTW41AR901BToGlYWZUeCkSsYP+y0smbCxZmeru/rysk/oJqIWUZOvo8S4X6yrUdTqQ7lwyWShx+//nos2rIng1KxzGgt60B8VLonqZwMlnwKn5o5628k4DTHhvrUUDx+wJru5X91Z9Brk6FraDaf4fcAreqZtc36K8A432TBl4ot2bNNzjbmSS+GewE6zPH3c9K9XdeX9KVUiSEH5kt33xU/8JOpFx6yfwTwSY4yX0KOD0ink88569GRHQBte8Zs3q1L90LWfsi5roKQZLk1pdatVZNTD/ajJGRqIzSCMd2mnriK9fg94CoV+MHsvNsAPt7RLrWRnebnXy0/Z+ZsuA4jItLip31HDKrFZHqDUJU2Nrd9k4FvONbHytr4tonu7ls3++GswXhoy6F0Ubo3qYhy9MubCAzvdAMTPhcEH5VWiXrjWSwHqNUxFhl7aPhIN+mWmxO636smCI22iBIz3Jbu6VIaLaF/9aXiA7WL+54uR0hi/cWFQTaS4wPYROrxh7lzPG8YeoIob5CnHYcRcdo5mVSX4zRdOiQl12BAe43WiezUA5npZMyPwzYb7mDmdkcrXrM4d3Aqde9/O1umeSA0EjO5xVNeG5dugwzyPjWiFVaz0nkr0ilMaA5Qxtp91b7F+C0+zZgHUuDQvO+tJ5sO7uvrfNxdr0ZkCDBfkk4l1bua7yFMPWjHbVv5WPpIsVwtzhHdIPIEUarCbyRmKhVP6SUrGnzKHgidGBfet5RI9X2Fv4f2Blw1QWiYG34n+moZQ3lszHK8B8Dfj5ZlSEhK1GEx3YQRUUdA77L8LGB9Fzk7GaVGiYYgjAZbqwbsI+90joOGdLWbmGZT0Z6I/MZYt1l4zBeqb25TYqnRm025g4Cqt3RT1WuMaBQrtDcA37641CCB0luOZr4aq+0WbstKfPs2gP+0gCc5x9eGMf8aGxOftC06jcCfBmhPIuqdFVt7ounjq24deol6NXP6D1nLOsRUrdp9gldKGH/bpvbYpx6+Q+GS/6K8QWjXVnkxU8kDt5fGpbt/QQ5dxf5Xessptx/PKIIogEELmtv/X4z4LDDUlTeoh9eqXY/7nuxKxR5Gp5WRa/xOdzuv/QCMRH2axa4liFJ1veBvTfp2svbKrnkgfNoejh0YwMe/F7nqyOeWTG+XchBpLFbU3NpKNv+mpqPfbdNsZc3/ma3jHp1WUrkFH9bvpgShFbH5vHHp2qduK0R0NwHrADxBRN2cte9zEmWUBKE/eJR5c2lI7IY4dYPxgUKftG9IWlGXj8jca/zcN7pKbTbc5c9EgnBiQE1NS3exkTuMiNp55Gbh9kCrGxOl3vgTZPmCME5zY07zVVh/N7S03QrGKcXyTE7cKq0JQeiUEUzLdwNY+ogMrWtmzQOh742lZJPwuEE1Nx+/Qw7DDzpVO5lwQaan66ogG6h289ecLEtPd/lalDfhDsum2zZv3n6NgYpokKZFntaUIHRvlFF4H4iaIDS3bH8j3abE0USkHHmOSCoYj9bW8CK34aPmkJFjpqMyqc4HgwziiLpyfLXD75yRSLZcHVUThLqqwqJHGHjP6MZE+Gqmp+uKcpVX8rtO1k6Mn/alulSQkGq/PGGwZe/PrB7USKlo+hBG+DYVJddxj4ml62gJQRieREwIolptDmd7NfJrbzVUt5GRz+mzBDuPtB7vNQ8B9GKQCcTgGhrR5hqNeUIaoz6NjNhdzQAIT8Km27iGHsp0d6h2GN9kgrS5mrQjltZvftBG7GgQn1Bwgz8qqtWJmDQbt+dbRTVti5ogPG7oXsoGJTdUL8UEzVpQh6CVgFWiIOKHDzNvQ4Sjna6OKjHqC/0GUc2bQCUTIqgWUyV1FPMoVbJX1mIfMH0JjDaNXvYQM5+cSSU7q6nHmVcjrzUSE6kyoiQI0xOjCQ6axexjp1Di40srZtI8oHueoHTvZibtNkrj8R7mEyNFV+w6EFYwW7fusUvuiSBWykZt9E9E81uX7Tgrl30/29jbory7kU8xoIxTdTYQo6XpCMK9Xgn4K2w+qK8v+WQIbR0tImqCUBVpNOu03pw1ChaeYuKAxn6BIAvjLbjqG4ROhqZ7RArUM5/EhYV2rzNJFFdWdxPy7gFs++ZSYyd/Fw5B+116CjbXCJvCBOFpy6KZX1oxU6mmiff7yEQQhJoHPvYgftNkPQH/lhuuudYwfknQKZf3wVQzZ/2hxLgQhP00ByGjMnUEodkAfSO9GVWkSTQeBNFQ+q6gFTNpFCA8D3nTniDUWLmNQcJ6INFNFnddUZ1IdHV7GFYZa6WYTP4ZShC+GJZoJmlERxpNF08fXRNFEIXxp7qWxEeI6UIClBGYyZuXyurrnsRkbmnSUOPCxGfZxvUBY7XnrcxB+RvFqFX0dCcID9cbY8RMGpsPXy3EmUEQ7geZiHwx6R4XgdKTotKc2Jx9W0WjWwZYc5RpOhP+NYx3Ea1b8/Iqb8brd4YShK9tQIlqrOvxWTMvfK2Og4i4jAeugoR5R4prrQ9Sjo9hQjsB/1DG39YYVxMVVOnMQg3NbV/c4mH0B2VuDEUPs2tArNyv/LY2tu1TudzGHdzq5to3CJcn06gOdONxg1DglWjWud4INe9bf85ZfICXZfp43bAqnStVi5hUxWFGbvPriEbtUTmmKzkpelivGsvy/dqge6wNQ9ZXrHOyEkSYj9Rue4pym4bmgTDHzIuL7gRKH/r8VRA1ihUVaY5Uuui88uXnrb15X7B9EhiLPVSwy1phm7SrvnlpHcFWmjLOMLp59zME6xbYWD1rlv2Cl7tx0zep6fJIXcRUcxgZc7gpcQVURqXXHdSs3FowGdsw04RCEB5Xr40WW0emUh3KqK3qr+A3aDmIFjkK8zx5lvpA8XaSFaRxOtXGmUAQWg+pFWqP1be0jXHDzsAfeTi20M8flsY3/6jdgsaBo+8mqlW9NNT4CjJXqkmbV5B4DaeC6N+dJ/ww3vc8XF9sAuj0dG+n8rNVVoPKmCBK1S8R9M1QvUMNZWkFAx/aIhX4Hxv4Qxy5a5xxGcbrBqHGtMSwrTB3NGukrCGp/rCy9fBTzRwKI29YBAGtOT24e2g2jikXJMakI40tbScy41bTiHI6F+QmbifKtUWnIhl0wvvVMVlvEPmFUeLXPlpDOSdOJbfUgphpqDY+mA/itNUFfFnDQv0Ns7w7E/e4FYhG+UvaobhxWTbf4tTQyZ/UOXc4ETUwsI/yyprp7VIxJ0w+pS75XQIudiSu+pFXO4eByzK9XV83IYf8XND5RNM4zQzDfsbEIHFcCcLteqMgZtpsW3uM9SJc3n+Y9mYe0JCwMB4ftS1+gEaCgD1JwP8S8/WpVPIlk4nmlSY0ghi5RbRdCcY/j62selsBDze4vnJrbbAP5VM+bh1SdPlbCXAaZ3VlN6Qg9UxmgtC52mCmZUFCrlbhmsXtuiCvxsrMm13RuoxEiRrnhoFvvJpDS4lqbTV2LWreRCGj1unem0Tbc85jnXNIL1cbGkNBX4t493oxcdw4ngShcQiZ3wOQw77OIFU6mxjNXqCLORHU7Ys6SHx7Czlc6ig/lH0pTIJAwZpPyTWdbooVd9xRG7e/WEn4xMKmpCI4OV1imDiz0oGmDkj9HI8trYQk9EQVbuS8yUwQETnrM3YS536DUgtQLQhHtC4jv1UqT1PTkn+wKaa8Am81cisEWjK58XrM9f/MDQ0f7PSiqdmMAxFR2GFzC6RzIAFbYxTknTqaW+96Gf55EoQuXr2hsz69j7FSd/TjSRAKQzdBMvFpxHS4w9WNsXv2uqbEQRblY0444mSYH6z1rtR5RW182xPKeZwtd3gNlSDywLUkFoBJ+Z5x+TjiJ2zCKaZh8RRLU4zOLXhgHRNgpBgxrdxC9iYsqFCJ/4IcLzdxlVGQB6v4wSrKmfNRL9BiLzcY+cXrdrVh6IhwBHuXqw1DubqJJXWx7Tp330x4YLiWj6/E3TeBbpgVX3uuiZM493Wcgf8iZhtEn8q3z9A5YaEvXjfesoeZ/Ny06FqXe2at51yPoFXPkJ09vK9vha/ldr3G5bfhqdR3qmm9HwC3DA/M/UI5lx+KHDhG9xDwCXclXqJWL3ffRDipr6drjE2Ts8zCursIRJc5RMtayYGbIMqp2gdx960Ds9T1Bvds8QasDhsjTkSDeEBQ7tMtuN9XcwRcNjgw93K/MWloSPwdYqQMdVXwr+I3YDMv6k8lHzHZc/zShE4QqrLGlsQRzKQiRzk3U/WTilS1CuCra+Podd8o1IR49VVrLyZuA+XjBqgoVu5vFXLc7o5P69XJwg1E+UTR6Zvn/eEw497heOzJNd3LVSCe/ANda+sxc4eGane3LT6RgBMB7Oaqw+QWE3h8JjtBeAcMYs8Yzd5BZmC0UTpB9PBrNJIkoOzW+wDBTzBi52R6O1QMjTEPtl7Gbn6HFv37GT9rM8593654VGMpXbBPsH7iCgajdY0edJJ5RloEbuThmkt0BnnKvXmsNn42mC70st3wU9bQ3djyYTKZv11bg+vce8FIfTX/Bs6vPUcwK33AII2vJN/YHtUShAfpOYcikLaZR0AlNf1WWowLU6nk/7jG2ceGxfz2UW7uREIQeZIwi62qNCfUaV4BoeI0KAb2itegIqrdPjvO5wYVVQV0c1AOszzRBYjDa1LeaJrJThAjY7t4T7biD2jjK5uHHPUNZOQFmtYOZSRxWY0RXZk+YR7HhE/Nh3e18RkQ9ioth56HZR+TXp38vbYO/QmxQGp4kxj/yaB8+FsC782EfaMOJ6knrXyT1Kb9LMj6z3x4XhU2llEPwh4uF/mbQFjr8FaqRH2+Pth8Qo6O+KJiUvjFGPxpnU2IHwnr49XnAX0TTBsYnI2x9bmiVmW1BJFfByXu3kdH31hsOubw4x2SVSV7hYFeR/jb/bVq0IR0HNklPT0rVMCzqr/ICEK1rK7uhO1i8eHLmHBWGQOgch1RJHLe7rtwV6V+aepaln3c4px6y3C9j5SruuT3TWB8e2jz3GvKXccDlzwFREzFPhWC3d8NUEvwfvKzsGLHp1d3/C5oXq1MOl9IeY0Rr7oKsY3vqSBQvNqBnodFx5Xriy7yV4C+Z5W9z+DA3O+FNeeMIy1qG8nPkm2dbMe4mRijYVaZcWcm1XWynyaUj3ShDBxlXaR7iAy3Fut0XhcGQWhcbxQq07uvNxnvyvHJr4F+O547rhiz26S+cmkiJYhi5XkRA+hyIigbBtOIbiq7Cuz9ddO3gnKdzfucmf3WYiJ8M6BrgZGTFejurEXfMg10Uq49ut+nwg2i2O4R2xQ6GwQV03quQX9VlL7vz4rz1UFvgc6ydRo0XsFYDNqUT6IOM1Qz/B0ClFfgMW9eHmXk4wXnsjWXmvpIUnhxjL5BgIr3bVKHqvppsvi8vtXJ35iqoJr2OR/Sc/b680BQ6q0mLj/+okRCsHGzersr0WozfANSfs1iOfsqIhxjELhrE4i/gyyuK/deWI6Ene83YRCEh/2XsaKE1zjl/b4x/4CZlxjul8YYmc6NYrpxIYhiZWpQNmfpsxbTYpu4kYBdHBNTvU+8QcAzNvhXiMdWROj2mOpbl+5Jw3wIE6tYrvu62lIgBH6F2ErZxCtmx/k31WxqpgMzlQjCOa6DWeswgFWM4kYAOxcWfn5MAfQBlKyN2w+GgaFG3mysMVJuHPL+tgbpKDBOVnYLjr6orGu3xKH+Exj/kRvO/rzSmL+FmNZFvOpcotVNAL8EWKtg2TfvPg/PVHprLtfX4u9K3h+fVXMCg5Uixocc4i2lU/8iMfWAsXxwcPvfOG8wmrCtOSKc2tfTdadJ3flwornc50B0BEZi2hdJSmHwewJ1ZoeG7wqC84hyi3Uas63K/bCLiEdVoEMiCKWG7NYI83WtYYJLMY3CN1aTa8+PC0g9fhe96RbWFT8FWLeEta50bRtXgggCjqQVBAQBQUAQmFgEhCAmFn+pXRAQBASBSYuAEMSkHRppmCAgCAgCE4uAEMTE4i+1CwKCgCAwaREQgpi0QyMNEwQEAUFgYhEQgphY/KV2QUAQEAQmLQJCEJN2aKRhgoAgIAhMLAJCEBOLv9QuCAgCgsCkRUAIYtIOjTRMEBAEBIGJRUAIYmLxl9oFAUFAEJi0CAhBTNqhkYYJAoKAIDCxCAhBTCz+UrsgIAgIApMWASGISTs00jBBQBAQBCYWASGIicVfahcEBAFBYNIiIAQxaYdGGiYICAKCwMQiIAQxsfhL7YKAICAITFoEhCAm7dBIwwQBQUAQmFgEhCAmFn+pXRAQBASBSYuAEMSkHRppmCAgCAgCE4uAEMTE4i+1CwKCgCAwaREQgpi0QyMNEwQEAUFgYhEQgphY/KX2ChFobU28a3CY7gNh/2IRDByd6e36RYVFzohs9S1tXybG90Y7S7gt3dN16ozovHQyMAJCEIEhkwyTAQEhiMpGYSoQRF1LYp+YTefnsrFL+/uXr62sp5IrDASEIMJAUcoYdwSEICqDfDITREvL4l1yXPM1Bn8ewItxyrb09Kx4vbKeSq4wEBCCCANFKWPcERCCqAzyyUoQzc3tn8iBHwIwr9CzPwlBVDbGYeYSgggTTSlr3BAQgqgM6slKEHXNifkW6BEAOwhBVDa2UeQSgogCVSkzcgSEICqDWAiiMtxmai4hiJk68lO830IQlQ2gEERluM3UXEIQM3Xkp3i/hSAqG0AhiMpwm6m5hCBm6shP8X4LQVQ2gEIQleE2U3MJQRiM/Pz5Z9TU1m74LAjLmLgFwJ4A4gByAN4A0MfAHcMDc3+1Zs1NwwZFjklSLJ8tVgZLCwHsDCBWSLSWgKdAuH2wln+xZlVyg0n5mg10VCtE6ZlbeWMpOgDAHBDeBPNjDLqhXB8WLDh2x1hN/BgQ/hFEHwJjx0J7BgC8ANDDNtk39fck/wCATdpaSZpyBLFgYWLvmE1fAuMIEPYo4LkJ4N8TqDM7NHzXY4/97M1K6lZ56uqWzbPi2SNBdASA+QB2LcyJYpFrAf4TA/fmrFjy8dUdL1daV5jzrxKCyGOdtX60pT8nOftAjJ+yzeem00k19oG/+ua2owi43zDjWzb4oP7e5Bq/9D7zM6tUZ4mpJwf75uzmHforWauGbZ02yYQgfIYykUjEXnmd2gD8AMBuBqP+KkBnpns7f2myOTY0JObAwukc9GxmAAAgAElEQVQg+jqA9xqUP8DAT3i45mv9/Xdt9EuvIwjk+DOI0TGF/szR5mc6M53qvNH9W13dCdvF4sOXMeEs10aoLwbIMPHp/T3JZwz6FTiJF0FQjlfBsr4F4vPLtHMAhBtrY/y17u7k30wbMEKudCWAAx0kXi57jhk/z8Ws84MQRRTzLyhBKHKaNWfDdQCfESY5qLLCJIj9Fi7dI2bb3yJAkZg6vJX7XgVw0e67cFcymVQHPfk0CAhBeEyLhobE3yFGdwE4NODMyRFw2eDA3Mv9Tij7tSZ2rsnS7QwcFLB8lfxptqgts7pTndK1n44gCLiWgX/N3xr0359zFh/w2Orkn50/Nza278cWOgB+f8C2qpPlRenerh+ZEGaQsnUEQeCTbaKlxDjcvCzut+O54/q7V7xSJg81NLd90Zdcy1f6msXWolSq44lySaOaf0EIwoscALqjNm5/MQix6vobFkE0LkzszzbdUbjFlYPW+XuOgdtnx/ncavsSpNKplFYIQjNa8w9MbF+7mZLazTsvjsFqgP7C4O0IaAawu6uYHIO/nOlNqpNmydfYuHhPtuIPANin5Eez8lU2381Gs4EOARgE8O5CnesYeJRAbwN2PUAfBtEN6Z7Oc52beUNLYgGYlH+jnVxtzQL8HIj6wTQI8HtBWOgQORWT50B8Sbon+cMwSUJHEACU1e0ujnaqNq4BrP8CcS1sfAaEvdyYE/DI4GxO+InvGlvaljBDbUJuct0E4I8APVkQOcbA9r4g+rCWiBmP1tbwIr8NKcr5Z0oQebHWnPVfZeDSsTelcMhBjUHDwqWfgs1njoyH/R6AjgQwqzA+G5lxPxG9U/j/AMfp6kx3xwtjDi/e45IDQR10Ump+EnhvBuod83+0GCY8MFzLx5uKb6fSBl9tW4UgXAi2trbGh7LzrmWwEqU4Pn7WZpz7vl3xqPNKmhcDvEFHgXGTS0z0msW5g1Ope//bWcqIPJfuAXCIq+qnmXBmpqerz72RKrEGMd1MIxPc+T1tx7OH6U6/Hhuoypsj0DWcsy91yo7VFR1Wbvjx7qR6U8l/HkSWJcYN2eHhb7ll+AqLl17D/hZZP3bdNgaYeWkmlTSVN5ed1z79y/eRgRt5uOYSlyiOGhYu/SRs+3YAHx1TCeHqdE/XBToSU9jEbVtZ+X7EkectIpw7+M7cTt1NUYkPKWadxmB1YyuSsso+BPCidG/yV7pORj3/DAmCGloSF4Lpuy4xWtfQbD4jio20EkO5EYKxFY7Ow0uOCB0xZC9yu+lwiOyucYt0CXTDrPjac7u7u9VbhXwFBIQgXFOhsTHRwBapSbed46dVyHF7Op38P6+Zoz1pE13nPpHXN7WdSYTrgi68/HV/m/VXgKFk68UHbMBjY/PaQBm4ZXhg7hfKPdDlN6rhnW5gwuccfVYb/WmZVLLL7zagxGfxLJYD1FrMy0DGHho+spqH4RKidXlzLfxe9sZSEN90Ft4RisV6PoI2NLedDeTHrPhttNg6MpXq6Cm3kxTEHz9zzidiXNGX6vqqLm/k86+8N9dxJweFQ1CCKLzfLQfRIuf8NBFp1rUu3t3Kxu4BqM6Z12Ze1J9KKmtu+YQgtHOAGlrarwXzOY6N7Y+IW4e4r7a63CWnM2CMTF+JDmZtxgNbNH0aK9k489f+bTbcwcztjvq9byqlG+hGsvnQvr5kutwKaGxs/yhb/GvH6azsxusss6Eh8QHESOV/X3HjJqIT+3o6O8rVbfK79w2Cbhoa2P6ccgSoaR+gIfTW1lNnD2bfThbEHyNN06TzarMi2sHsTncDSIyOOePOTKrrZA3JRjr/VP1lbhBU35I4hZjUm5FTlBbZzaGISVCC0BEpA5dleruUwkdZ7Tnt+DPfBxvLKtXKMpm3Uy2N3CAcI9bQkNgNceoG4wOji5lwcaan6/smA1tQrVSb4t7FTZGZjsqkOh/ML87mtgMJUBpORTmrr7hBV6dm4wZr2qjfQLk/hprDenvvfqtcfxqb277JwDcqIbJiHncZAK+sjW+b6O6+dXO5+sv97kEQ2kd2r7JK24en7eHYgU4X0/mTaoz+kUfemvYlYHub+bj+VLK3XBuLv5dsyh7vEFHPv3IEoXtnGS/5fFCCqG9uu4YA9V5W/ErGrtz4aG6GRqq05cqdTr8LQThGU7OBryObDujr63zaZNDVaXFzdqc7CFA3hDVg/j3HrI6itlFjU9vlTPjK1rLMN+xiHt2JVLfxatVAvU+uY7qnzRuAKEdPhU2JZovy4rpt1d8YeDmOXGtv773Pm+Dpl0ZLEAFO9qpszab0ts18aJDN36QfpgQR9fzzI4iJJAePsfD05trcfPwOOQw/6BQR+YntvMaouXnJ+7OIdRPydjIjn4eat8k4T8c0QhBOgmhpO58YTs2j31q8+ahU6n6lqVLV9/GDT9r2Xe8M/sypGcXAtZnervOCFtzQ1P4FEP/Yka9kMVWzyWsWTs55EzJt74isN77acaMaYuCITG/XKtMyvNLpCZCWZFKdK0zL1m00TLgg09N1lWkZJumMCSLC+Vdsp07ExIw7CVAq3Y7HXu6JoWaRyW3TBINyaYLcIBobE5+ERY8w8J5CuRXNK734UCLsOcdKCMKBRkNL261gnDJ6mDA8cZeb/Or3+a3L3jMrm/01QB+v9rRS5zqZAyi5GntsoCdmUp1qI/D9NOUrUdhKwPprubzO35l5GyIc7XygZSajNpSrR9O/wOIB3W1MWQf3pbpUwJqqPjXeNdlcAwHLACj1za2aTF4ipgjnnw9BPAfOK2S41ZiNH+KrAqqQOQhBaG5ar7NFB/jZBXm1seRWb6CGHEZ/p0oZQhBbR4rqm9puJ8KJxT+FtVmo8lS0rCzHldbL348SUIUxlDWLyYwgDOsLaMAUaK7r3ksCFVBIrCGIP9vx7EIDg7cx1bkPBQgQo1mdQIeGNs6zrfjHLdAeTLwAjAVA3tbCyxhRydp0thCRzj9PgvAFn7uHZuOYKNRa3dUGJAi3i46KgwuZ3u4qmaPTIY8QhPeGo338rXTQIyYIJd8/OtPbpQza8l85X0V+/ZiiBFHRJlHf0vYVYlw+ikcZgii4dDiPAKVJ5jaQNJseGoKoRiRoVulIKo2mnTO7MkqrHaNGzXxJOpX8nolmUJB2TFqCACqaR9X0fTLnFYKYHgRR8rgqBGG27EpEDB4EEdQXlaN29X6ltMaK6r7aG8TEEwT352CdHGP7+y7bgnWwrEPTqzt+Z4ZoZakmzQ0CwRVHKuvx1MglBDFKEKX66iJiGp3Ek+5UNZ4iJg/DqtIVPuImRbmCyAD0W4splUp1vF7f0nYR5b3nFj7tDSLa+VesWn+D2OqPSrNRA+NgHzBpCELeIMbMayEIBxxT5ZHa/UhHwF9h80F9fUnlDyj/VXWDaGo/jIiVuCpvsa0rf6LPPxE+Uo+xclbGiTVz1t9IwGmuPq8HoYNtur/GGn5y551j67y8gprKuaOcfz4EscaOZ49xvN1QfXPbt2nEB1PxU95oz8mkupyac6FOgYAE4bYnqviR2m1Pofxy/W2b2mOfeviOt0Pt4BQtTAjCMXAl8mggsJprXn5PuAXMb4DoCeXYrza+7nZgr7jbIrfSG0q9Sx2SgT/ycGyh08CrGoLQqBHmmHlxmL6Uql0vmv4FVsXVaZa5taw8XF/8Cjk+wc/1irN/pgQR5fwr+hgy8cXU2HjsTrZV8zABn3D04yXk+IB0OvlctWOnyx+IIBa2f4TsvJV/0TFj4LFXbdCpnle6JqPAZDKUKQThJAjXyVl5Bw2qPldqDIdkbXzd8WqBTrihnKEWk1YlN6ARmoJVWX3bFj+w5TSqHKA9ScD/EvP1qVTypWonv9bdt4+PI119ms2/RBtMM55BN8oS7SQPLSbURzz/FAYmBJFP15Q4moiUW5RRbSxTP16VjG0QghBDuUoQriyPEIQDN42rDPXrOenerutN4NX5WnJaZupcbTDTsiDGXToZsamrDbemk0+fSnwCAQi+MZaKKgITrlcbPVyJ9A3NxuGGapmaPvKY/KZW635zIx95rianDAO3eo/1kHNHPf+CEISHaG2ImU/OpJLK0WGoXxCCyPej1NXGH7KWdUiQgEya95jAtjShgjAJCxOCcAzKyIYw7zaAjy/+WYlvjJ31NbUvJuLlDl9LY1x1ROSsT+sOpBoRk+p7XVPiIIvovrH6/GaO8FR+vStmXlEb3/aECH0xGcvK65uX1hFs5SNrB8cUGHMY0FnaBpRR6+T5XnYQiHr+BSGI/BiWOlxUf/Z0MV/N/haUICJx1gd+EDksEWd9W0dSCMI1q3UTzySgTGExqQ11NAiQkmfOqll3ltPHvM7dt4lDNC93315+7KslCA93ykbR8jzcaQ+E6U7ZJx6EUss8Nr26Q8XV0H66sQKgPYE2NrX9xOXy/C2GdVimt6Pfb0MciROCC8B0hSYEpufbVuTzr7y77zHd0s3XKGInuAminN8uj/mZZfDFmd6kcpXi6dHVy9132DFLqiHMyZJXCKJ0JKihpe1KMP557E/8hE3x0/t7lv/eOflGgpCosKF0syvkYcCAQfwsWTi7b3XyN+7J3dSU+FCO6AYCPutq7jNkZw/v61vxorsb1RKE9y1A/cIrLcaFqVTyf1z1UuPCxGfZxvX5CHVjPvPbh8niKBMwaBOYzx/avMPtTrffPmPlKTppbGlfysx3uqKqPU+W/XndWKk6Xn6D6sH4oSbAU7FrfmrDkc4/0zeIYkMLt96fO2N7AAiV7PM3G83DM5gv3X1X/MBLO8wrYJBX/O/82LxmLSLify8NTxru/DSZw1MhjRCEZpR8Qz4CrzDQS6CNPmE2B4hwUl9P1726SRBSyNF1ID4q3ZN8TFdHGAShyvUJtal+HsWiEH51f41PH6Unm44ju8Qd4auaBaLpnwpCr2J0H+goVxmprXKEh9W1T4WmvGronblf0cWQWLDg2B2tWTUrPTb7tQD6VPkjddofBehjJWEtCcrfkfJ1VAxC5esWJMr5F5QgVK+ampa22GSvHONTK+QAUFrFCFV53raENjA4G2Prc+4gTT7z0zjkqImEoJq5OpXzCkF4jN7IyYlUGNG2gAM8wMRfzPQkbzO45t4NUEvA8lWxz8KKHe9n3RoWQYyQROII5nxQeKe83rDZW42wDDMYJdP0709k0zK27H93RQrzKy9LwLcGB+Z+zy/AUFPT0k/bZCvx4a5GjduaKAumq7iGfoSs/RABHyz8tJ5sOrivr/Nxr/Kimn+VEITapnW36iABegxw87g5bc3p5eixoP2l1tt7DepxJsltEQmrYEhnGSo2BCx+6icXgvAZQ78YtrpsKqwmE5/e35N8xmRqjMhR6WwQVPjJuQZ5NhHw/Vlxvtov6L0qJ0yCUOUpH0Q1zD9g5iUambqu6ZtA/B1kcV0Uj346gohTtiWbjQ1S3PoRMx/nCuvqaqO3SE/XmQXN7R+0wLf5iI3cG88qm/gCNRd0j90mTgujmH8VEgQKrtvVo74zlrfRe4zBvM4n0YWqdeb1i/kQfH7ys0S4sK8n+UDUfqZM+z8Z0wlBGIxKQeXvUAJOUgdqADsXNp8cGC8TqNu2+Kd77MwZL3mpXzX5zS5rHQawCks5tnzgjYIYI1kbtx8sRwzFesImiGK5Sm0zVpNrZ6iwp6Q8084r/KZOY28A/BRg3RKkrQZDUJLEiyAKYiyqb166gGCrWBtKrDTSxry4gh9j0A3DA3N/VS4sqbvS4vuCZdPnmFjd/PYcJcuRsv+HgXtzVizpVrd0Ry8LEqM7zPlXKUEoLOqbEu1EdLtDS0/9+aHaOB9nOi/LjbU6NFHMOo3Z/hwo/47l9Io7alPkVc6IM8VcgoAlIPoQGDsW0g4Ayg0KPcyg5XvsknuikrVarv3T7XchiOk2otIfQUAQEARCQkAIIiQgpRhBQBAQBKYbAkIQ021EpT+CgCAgCISEgBBESEBKMYKAICAITDcEhCCm24hKfwQBQUAQCAkBIYiQgJRiBAFBQBCYbggIQUy3EZX+CAKCgCAQEgJCECEBKcUIAoKAIDDdEBCCmG4jKv0RBAQBQSAkBIQgQgJSihEEBAFBYLohIAQx3UZU+iMICAKCQEgICEGEBKQUIwgIAoLAdENACGK6jaj0RxAQBASBkBAQgggJSClGEBAEBIHphoAQxHQbUemPICAICAIhISAEERKQUowgIAgIAtMNASGI6Tai0h9BQBAQBEJCQAgiJCClGEFAEBAEphsCQhDTbUSlP4KAICAIhISAEERIQEoxgoAgIAhMNwSEIKbbiEp/BAFBQBAICQEhiJCAlGIEAUFAEJhuCAhBTLcRlf4IAoKAIBASAkIQIQEpxQgCgoAgMN0QEIKYbiMq/REEBAFBICQEhCBCAnIyFNPY3PZNBr4xti10d2187Snd3d3ZqNu4YMGxO8Zraw5jxtEAPg1gZwBzCvWq+l8DsAbMv7Sz8ZX9/cvXBm1TS8viXbIc7wHw90HzFtLnALwB8HqAehn45ew4/6a7O/k30/LqmhPzLdAjAHYo5mHg6Exv1y9MywgrXSKRiL30Oj4ZY1rMhEMA7A5g3mj5hDfBeIFAvyVGcvPm7desWXPTcLX1z59/Rk3NnPWHEvgsgPZ11DkA4AWAHrbJvqm/J/kHAFxtfZJ/YhAQgpgY3EOvta5u2TyrJrcKwEddha8jmw7o6+t8OvRKAagN6tW1+Ixt0+UE7AcgZliP2qhX2cQX9PcknzHMgxAIQlfVAAM/4eGar/X337WxXFsmA0EoMrZm1VxEwBcAzC3XZsfvfwHzt2Hj5nQ6qTbzoB/VNyXaiOjfAby3XGYGMkx8epAxLlem/D5+CAhBjB/WkdbU0Jw4FKD7AMxyV0TAt/p6u74ZdgPqWhL7ENPNBNRXUXYWTFfBtr9hsmFFRBCF5tPzsOi49OqO3/n1ZyIJoqEhMQdxnAOmrwF4d+W487NkWyf39XU+blqGujXM2mb9FWCcH+AgoIpXRHRRurfrR3KbMEV7cqQTgpgc41BVK1pbW+OD2Xm3AXx8oSAlQrAci/hpezh2YCUiHY+GUUNz+0kAX1dmk1oL0NsjZfC2Y0QfpQX/Cjk+IZ1O/p8fGNESRL7mZ8jOHt7Xt+JFr3ZMFEHs15rYuSZLtzNwkA9GajN+A6At0kZW69sp5nNn2wTQ6eneznsMNm5qaEp8E0Rf1ZDDKwz0EmgjwO8FYSEYO7oqy4H4knRP8ocGdVW1HiRzeAgIQYSH5YSV1Ny85P1ZxLoJ2KOwGf965F86oNCoHBGd2NfT2RFCI6mhJXEhmL6r2SjWg3ArE920xzz7j8lkUomRRr/W1lNnDw9v3s8mW+VVt44x4igCHhmczYk1q5IbvNqpJQjCbemerlMN+0bzW5ftOMvmD8K2L9ryJnJUSTuIOgff2f4kL1n9RBBEY+PiPdmKPwBgH/fGy8DjBLpyaLb9sAY7ampauguT/Y8MfEkjjhogwkl9PV33+uFX35Q4mojU/Cm+Kan59TxZ9uf7Vid/49z0ldjx5desRUSsxFC7OspdB8s6tNwNzXAcJdk4ICAEMQ4gR11FQ3Pb2QDUaX6EFhhX2BY2EON7W+sO57G6saVtCTPuGLtRIMfAjTxcc4mJDF81sb4pcRQR3ep86B1pPK5O93Rd4HXKDIEgnMNRlKff4urPgM28qD+VVA/RJd94E8T8AxPb126mZOnNQb9Be823hobE3yFGNwI4zpXmNYtzB6dS9/63Lq+qf9ZmPLDl4bnR8XvZm1ZDS2IBmNTD/U6j+YiuS/d0niu3iKh3hXDKF4IIB8cJKyUvk47hXoAOKzQix8xKo+VVl6ZN1Y/VDQuXfgq2/asxCx4YYOIvZnqStwVd9NoNBNhosXVkKtWhNJVKvpAJIk9JuhsRA7dkertO1/VpPAlCiQ+HsvOu5by2kPPj7mwcyx7vTr4RZPLl3xHmbLgO4DPGMKXPrUnzvuVLoM5y3YcXEJ5DllvT6eSrQdotaScGASGIicE9tFobGxMNbJHatLdThTLwchy51njcWjc4TPeBsH+xsmoeq/NEZGE5iBY5Gp8j0Ll9vZ03BCWHYhmNLW0nMkPdJLaKm5jvg41lukfrCAgCeg0wfmooHj9gTffyv7oHazwJoq4pcZBFeeUDh2gHa+x49pj+7hWvVDKRChpQK13KBV6bPtU3tylFhNO21sUPIoclJkoF9QvbP0I2K5HnLoX8b9vMh/ankr2VtF3yjC8CQhDji3fotTU2tV3OhK84Fu/K2vi2ie7uWzfXt7R9eayYCRU/Vuu0pNQpe3hg7heq0atvbU28y01kADaSzYf29SXTbsCiIAhVR31z2zUEKNFH8XvLBh/U35tcM1EEobkdqqa8xbAOy/R29Fczmeqb2hcT8fKxWm+lYsjm5uN3yGH4QYDqRutjOjOd6lSiqrKfbrwmymakbGMlQQkCQhBTeFJoT76OxdvY2P5RtvKnt6IMuKLHao2WlEKtapHV1ltE+1JmvtN5i/C67URGEKVk6nnSHa8bhPt2mMcrJBm+x7vCn3MWH/DY6uSfi2Mzotq6cSHs3EIiamDQ39tsn2J6AyhVoECOmY7KpDofnMJLb8Y0XQhiCg91QbNkhWNjHbNp60+gPHrDMO26RkygsiZr4+uOD8NCe4TosvcpNUlmupcYq3bbzX7BrQWlKh1HglDiOq119HgRhOZWE6p4prG5/YtMfD6YfsnglbPjnAliUW4yfzQ3z9fZogMyqzuVhbV8kxwBIYhJPkBezdOf6ks3/4am9i+A+MeOcgKf/Oub2k8gyp/wt0oZmE7MpDrvGm/4xpEgJlTEpBXtgPtjqDmst/fut8Yb90rqK9xSfg5Qq2PieL4vVVKH5IkWASGIaPGNrHTdqZ41m7bmio+gj9WNTW0/YcLnip0h4K+w+aC+vuSTkXXQo+CICKLkIbb42N/be+/z7qaMxw2isTHxSVj0CAPvGcWd8dO+VNfnxxvzSurbb+HSPWps+2aXaq6vhlol9UieaBEQgogW38hKL1EfBErkx6pyj/cD48dq/SOyt4ZPZB0uFBwFQSxYmNg7ZpN6q9l760kXj9bW8CKdyGU8CKK+ue0oAu4fg2eAx+Gox0FXfn6u2LGPw7aVfyhl1R93pMsy+OJMb/KqSjXeJqJPM71OIYgpOAN0m7af3n5jS8kjsPFjtd61RfB3jLBgjoAgqKGl7Uow/tnZRr9b1rgQROmj+dAWi+kjMr1dyiHjpPlGDiA73a38Nvo06i/M/E+ZVLJLyGHSDJ1RQ4QgjGCaXIk02i1DAC9K9yaVPUTJ19CQ2A1x6gbjA1t/NNNlj2BDrgrMMNtTsO34F41/IV/L4gkiCM83kaoArTKzh5qyo1T+X7Jxym674TGd0kGV1Uv2iBEQgogY4CiK12i3lBMZUUNL+7VgPsfRHqPHat1mSBMoC6+WINSGNjRk7QWLj2HgTAC7lYwR8yXpVFK5KdHGMRgPgmhoabsVjFOKbZvIdx+/ORzAeeLTZPF5br9NUawPKTM8BIQgwsNyXErS2T4wcG2mt+s8vwbUN7cduCVewy+dhlEmj9XazZBwcaan6/vj0mFXJQE2pIqaZ+IwcCIIAsCf4pRt6elZ8XpFHYsoU0FZ4l4CXmGQsp+IbVGZ/TQB/+B6g1AtkHeIiMYhqmKFIKJCNqJyNe8JRrrxerXJ8pbVWhuIYN5TQ0UiYoIwcjk+HgQRxLI7VIBDKiwfXbCm5htMUD6knI/VRt5jQ2qGFFMlAkIQVQI4ntk9bB/6hmbjcD8X2cU2akKSln2srlakEzY+ERGEchx30e67cJeJnHxcCKL0kXpSvkGUGV8vb7l/yFrWIY+v7ng57Pkh5YWLgBBEuHhGWpruNG8iJholCJdjv5G/+z9W624eSgzzt21qj33q4TsKwYAi7faYwkMgiHxAHQKeY+JHmKyVutgVfj0aF4IoNU6cqi4qVKChi0H5+CFbv0musjt+M3py1yQEMbnHZ0zrNLYPYbTe0zGeKlyvxji97CCCgjgeBFHXlGi2KO+lV0XiK3C5uZO8oH2KMr1HvPTQXLVE2faZXrYQxBSZAR7O1UJpfblbiNtj7ERq1EwGkdd4EITWAn4CtceqnGg6l+FTym1Ilf2fstmFIKbI0Gk9e4bXdl81WZ0GlM6tRzXNaWhuWwbwV4npPrb4vtrYtk8pl+XuMmcKQajwrIPZt5MAHemQy4S6qSrxYRZZpYH0VyLcT7b1aCrVobSktOq91YyvxvX8pNTKqqaP0zGvEMTUGFWdHUMWwGsAqX+DfcTbu4LK+z5W6w3twvPmqnt897qlzBSCUAOq2VSNNNZMJ4Pu0BGVjYsQhOmoTK50QhCTazy0rfGIeGYc1ctdqC74T5nHah1BhaZVo4lboQ6xWu2smUQQOlzUBj6rZt1ZIbhZ141pyUFBOQ20LTrNAj7CwIfAeM7C5mNSqfs3BVk69S1tXyHG5Y7bkGfEviDlStpoERCCiBbfUErX2D6gGhHP/NZl75mVzf4aoI87Guj7WK09bfrEMQ7Qca0vJADnpHu7rp+pIibVbw9Hi6F4RG1qWvIPNsUeBrCrA+MSUWMh8ty9jjRGFvjOcfMQlwWOSxJgTknSkBAQgggJyKiK8XCGpvXcGqQNGpsIEOOKvlTXV3XlqMhitdtsuIOZ2x2/54hwal9P15hYEYHa0dK2hBl3uGIue+rJz6QbhMKxqWlpi032ymLMcfU3BjL20PCRjz32szeDYF1MqxQeajdT0uWKG6yxkK9WtVrVOdms8SvBbKbmEYKY5COvEzP4eW417Y5erONvWd2wcOmnYNtK9bIYwlRVN8DMp1XiqbOhJbEATL9wlZdjxjmZVJczyNFot2YaQagDwlB23rUMVhbJzm8VctyeTif/z3TMVbp8CNE5G64D+AxnPgb+y7KHD+7r+9k659894gOxpu4AAASJSURBVGK/hBwfkE4nnytXtwcZBb6FlKtHfo8GASGIaHANrVSN7YOv51bTij1uJuUsq6mhJXEhOG/0FHPUlSXGDblszaX9/XdtLNeGRCIRe+V1Uo7orgbwblf6h2rjfJxX6MuZRhAKm8bGxXuyFX8AwD5jseJnycLZpg7wPIL4FEl+aSaVHBt/olBZQcy03OXH65HB2Zzws+BX5EIWXesMNpUvknB1uqfrgii0pcrNPfk9GAJCEMHwGtfUetuH8IzUdG8b5SyrvU6gBWDWg3ArE92ks05Wbx81ueyRxLgYoA9rwHyG7OzhfX0rXvQCeiYShMLC47alfsptiRPxOIGuHJptP+zesNV4zZmzYZ9cPvY02lyivHx+EF+S7kn+0GvD9hAvquRP2BQ/vb9n+e9deamuZdnHiHM3ElDvuqlUJR4b1wUolanok/JNVgR01rTljNqC9MXDwtX3sTq/WTUk5iBOKsiOCn/pvEk4q3ep4fJ7NbeF0fRKxEE5Pq6c2GKmEsTITSLRwBbd43pYHjvkhDfBtGHkj1wLYGffMSL+yu4748pyPqjULca24vcR8AnNHHuFgV4CbWTwdgQ0A9hddwBAjheVG+Mgc1jSRouAEES0+FZTuk4NsezmHbRCt5V0Pj/RdemeznP9RABlxERBmpED4U57qOZcE/HUTCYIBaqPmCgI5irtazbzqf2ppIpQZ2QYV9e6eHcrG7sHoLqglQHcz/HY0kx3xwvB80qOiUJACGKikC9TbzVR4IJ0ycNCu1wAotEqlFtna1bNRQSoOMRzA9SdA5BhWF/K9HY8ZrpJzXSCUPgqcn51LT5j23T5lhgf+/ncEHTDsZ6Af8sN11xrQsjuAkZujzgHTF/zuxE68q2Hsn+w+fp0OqkcJco3hRAQgpikg6V/H9DbBlTTBQ8tlXKP1SVVKjn17Nkb5jMhweDPFEQbTvFGQeSENWD+pZ2Nr+zvX742aNuFIMYilhcTxrNHgugIgD4BuER5eZETXiDQb4mR3Lx5+zVr1tw0HBR3d/p8qNGsdRhgnwbQvgDmFdIo4n8DQB9Aydq4/aCXwkG1bZD80SMgBBE9xlKDICAICAJTEgEhiCk5bNJoQUAQEASiR0AIInqMpQZBQBAQBKYkAkIQU3LYpNGCgCAgCESPgBBE9BhLDYKAICAITEkEhCCm5LBJowUBQUAQiB4BIYjoMZYaBAFBQBCYkggIQUzJYZNGCwKCgCAQPQJCENFjLDUIAoKAIDAlERCCmJLDJo0WBAQBQSB6BIQgosdYahAEBAFBYEoiIAQxJYdNGi0ICAKCQPQICEFEj7HUIAgIAoLAlERACGJKDps0WhAQBASB6BEQgogeY6lBEBAEBIEpiYAQxJQcNmm0ICAICALRIyAEET3GUoMgIAgIAlMSASGIKTls0mhBQBAQBKJHQAgieoylBkFAEBAEpiQCQhBTctik0YKAICAIRI+AEET0GEsNgoAgIAhMSQSEIKbksEmjBQFBQBCIHgEhiOgxlhoEAUFAEJiSCAhBTMlhk0YLAoKAIBA9Av8fOT8cXA9P5EoAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="jeVlbFHk8Qahp5zcIn_D-55"><g><path d="M 511.61 171 L 550.39 171 C 567.29 171 581 184.43 581 201 C 581 217.57 567.29 231 550.39 231 L 511.61 231 C 494.71 231 481 217.57 481 201 C 481 184.43 494.71 171 511.61 171 Z" fill="#f27979" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 482px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">MP1 puts system back to sleep</div></div></div></foreignObject><image x="482" y="187" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXtgXEX1/+fc3U1SUChflAIWER+o+OJLgbxrFKgtAi0tm7RApfxQHlKpIogiX/EBAiKi/VaQNwWFJmuhIMqjiLFJNptC+QLW6hcRVJ4t8i1QaJpk954fc7Ob3L079965d3dDkp37V5uduffM58zMZ+bMOWcI+tEIaAQ0AhoBjYAEAdKoaAQ0AhoBjYBGQIaAJgjdLzQCGgGNgEZAioAmCN0xNAIaAY2ARkAThO4DGgGNgEZAI6COgN5BqGOlS2oENAIagYpCQBNERalbN1YjoBHQCKgjoAlCHStdUiOgEdAIVBQCmiAqSt26sRoBjYBGQB2Bogiirrn1G8S4TPY5Yvww2dPxbXVR3EvOaFn0rqp0+vcAfdJZioFjUt0dv5HVrmtqPZqAu4uQoR/ASwC/CMaDJuHu9+6FxxKJRKaId0qr1tfHP4gI/R7Ae8F4qDrGczs7E2+U+jv6fRoBjYBGQBWBshEEwH0RxOZ0d9+2VVUYt3J1Ta2HE/BbAFVjTBAykZ4HcO70vbijVEQxY8apsdiUV68h4GTrg5ogiu0yVv36+vgUGDgFhMHe7sS1JXmpfsm4ROCQmQv3ibJ5NmV4ZTKZeGxcCjkBhSojQeBNk3l2X0+iu1hc6ppaf0bAWbL3lHkH4SV6x2ANn7rhwcRrRbaP6pvj54DpEgARTRBFogkgHo9Hnn/JOIrZvBSgjzDhvFRXx4+Kf7N+w3hDoLb2hF2MqsGvgukcAGkTfERfd2LDeJNzospTToJAKcxMXual4cV2WU1MPnqlawf7d126YcO1Q2E7QENz6wJm3Apgysg79A4iLJzDO4fm1qvBOD33Ek0QRcE5bis3NBy7BxuxPwA4ICvkVk0QpVVXWQkC4ORgDY4sZpXtZV4KQxBehOKEVph+qqq27kFRo5nZvFCsRh1lMkRYkuzq+GUYtTQ0xz/PTIIcdsurrwkiDJwjdeqbW28G4yRNEEXBOO4rNzfP3yvN0S4AH9AEUR51lZogBgEMAHhnVtzXyeTZyWSiN6T4VN/cthzMS7P1xUrdGDHFhNhBBCEIu8zCnk0GLWfCF+1/Z+BxwxyalUzeuSVAG6muOX4SMV2Vt3MYmdH0IXUALAuKaoIoBr2JU1cTRPl1VWqC+DvAjwM0Pyc6Ad9Ldnd8N0xTamsXTTNimQcBfDxbX/xbrBb2G51Lg5mYwhKE+N6Mw+O7Vu+gBANH2NqTYeb5qZ6EkreURTQR4yIGL7MTnd5BhOkh8jqaIEqH5Xh+kyaI8munDARB3wf4ulGPo/Bmpvqm+GyA7sq+K0Ogsxh8tm1LGfgMohiCEOpwyGRpiIHlqe4OMeF7Pg0NbYewYd4iMVWJndeoh5Y2MflB6fm7Joii4JswlTVBlF9VJScIkyLzDU7faotZCGtmcpqXNsIwvgDTTLydBCE9NPeZ0IULXoz5cmZeACDqUOt9TNxOTDeN/F0TRFE9XxNEUfBNmMqaIMqvqpITRJTSzUMc/abdLTWMmanAvES0AmnzUkToj28nQbS0xN8xMER3gfDZUfW4x3zIy1s10wKXgf6pl8WmvDo7L6BvDAhCItffhe66uu54UQjX2Bj/sGnQV2FiNgj7ZM1h/WD+KxN+aQ6mV65ff+crKl20tik+wwCttR3GB/I28avvgbFcPMLK3q6OJW6yi74XiWXaGNwGog+DsXu2bBrACwD6AEpUR817xyqY0TJvDtDRYHyBAREwOi0rkwjafImATczcnhlKr/HSy6Ez4/tFTCsgc9RMG8IN2IoxiWA1QHNyODLTiame9l/JcC2V/JK+4NkFVSwGwi362RcjBxOZS8CW+Xhf20JuMwFPgHBLVYTXqOrbb3wJchvi2JkEcwFAH8x+Lw3wUwxjdcagax5et+pZSeOotjn+UQP0FcfY3AbgSSZeURPBr1Xl9Bu/5SKIj+UFtoWY8BymnEGA50Yp87jDa2HMTUwuk1He5GoHXVaegRQTn9LXldgkyhZEfIfAy0/Rzt/dOnA6HXkVhvE9EH9NstuxvybNwFU1Uf62X2f0m+D9ZPerXyqCOKQlvmcsY/zUZacnE3MbiH+ANFb09iZE1H3JH6ttGfoBGKdJHRoKv5gmxtWZdOyCvr5fvS6bYOqaWm8YCcoUBUL0N4lOnskYfNj6dYlnCvp/CeUvMUFQw8z4Z9jEzyVm36L0XYrxlVtA5tzo6+vj/0FR4ypmPs71/HJY6hfI4MXJdYmHiu2QZSGI7ZHqIUdqjEArRgAF5iVzKHJ4VdWAMS4JwmOAOTrKywCWOaOwxwtBRJCZZXLkW05PLe9Oxn1mNHNcX+cdz7muyCfADiKb6kScd+V86gOMLe5MR7Ho4c7ESwEq+RYVEwIi1A7gcN/CjgIMPAmD5qXWtf/FWVdyjhZ0fMKZZoeBm1LdHacMH8kNP+WQv1QEoeQs4gq6f5+XEQQymaM4GrmCGEcq6jMD4m/1diV+3NAw/72mEb2LgE8p1t3GwPxUd4dw7An9lIUghJnCGf0cJFhJZl7q7Wo/q7l5/p5vN0HU18ffgyh1giG2hbknUR3dcnxnZ6cwQeQ9oqPsSNN1xNwDEzfIVprjhCCeYeBeAr5sa4DY8m4AjMdBXA0Tnwbhfc42ErB2oIbjbvEufjsAv97rV7+lZUnNwND2pSD60PC7zM8AuX9bf1gP0Gj6BTbX9/Ykbsh918U7Tfy8DeA/AcZG8R8G70JAE4DpTpnFBDnUP/W0YoIm7e+0YnCmvLYC4FMd37LMfCDjUQBiAqkWFkGwZTIajsQfebhzsAbznHqReAciiBlY4FW1A78DqCH7KWuH39uduG8E0zLJX9ey8H2U5q+K3RQz70SEYwDsYpPjHsD49wgEBv2id92q/1HEFiC8QoxHGSR2QhEGH0zAxyS76U1kpo9MJu/4p6z/SgjieYCfsJvkhvsXHgToZYDfDcJMmzkz99qtICNObJ6X5z3J+AcM/BFMA4D5KYBmFMoY3kEo9/FyEkR+/qQA21iZeUl0Psmh1JibmBqa2xYyswiMGxmMQchP1pnGCUHYRcswY00mYnzNYQel+pkLD4QpPLFGXI+tegS6uiq6+SwZSfpN8MUShLN+0EPq+sb4KSC6xqbTfiIsG9g+9WbJhO+GQVhnDGnz65oWthDM39nMShkGLqmJ8mUyk544W4iadI3TBZuITkx2ta9yfqQwfY36ZNLQEK9ngwQZ5CbmjWKH39d3++bcd8otv/hOyEPqwtQ2ltD8V5Nx1nv3xkPOHGuHHnrs7kZV7PsEy8xnJ+H7q6N8nEwfPmbPrUQ4a2D71HZ7/8rmDvsmiESSU/t3bF6O/IhJOClnns7hbeWiMjPCQ7LFpusC4vYba87fy0YQEm8f1W2sw7zETwxGo4dt6Lz93283QYjQftOIPeDY5hU9MYwzgsgQcNFA/9SL3VbDLqYDVxzGM0FYu4/0mwmAjrINjqW93R0/9xpMYiWLtHk/Afvnyqm6O6sM0oIJnGiF2EXbTTjO92RX9mvyJwm+pzq6c7yz8+Yd9vKSSV65Hzc0tX6XgQtHVpmSzM3llj8sQbiYqB5Ehtt6exP/56EbWXCrayYFD4LYAuKje7sS62Xfkh3+21bznjv1cvTJshGEaFQYM5Nz+2sfdG8nQbgwNMB8F0wsKuaQcjwRhKqpRNYZ4TKJjWeCEP2NYpl1toledSGD+qbWMwGsGB3opclg/MlZi3d+x/aBO+27ARVvHCGH83yBgWejyLR0d69+2j4hScxESmampqbjd8tg6F6AarPvKyCWsZA/JEE4zzbFazxNRY5JnOqaWsVO4gKbzqXphDwIwn/x0dh2ApFlpbA/LxicmdXTs/rPnguXgsSm8gWCyiJFlCk3QTjSdPO9yGCB12Ra1xg/hojuyG6xBhn4fO6gZYwJgma0LNo9ls4cSIQTwWiVeJFsZRhzUt2r+lQBl5UbRwSxhUw6LJlst2zufk/BfSCEp5Dmlt7ehEiJPvKMZ4KQ9CnlLMTDgY8sJvI3ATxqAI9xhpcXs1gQoEk93zxcSPOwbpk/3UhH1gIkziYeY9CmGJk/6+pKCAeJvKfwPhd/M1NhbrTCOmMlf1ATk8TFN3AuNck78uaoHMAuBCH19HLqRbbLkTkByMZnfWPbaSD+xchvAUz7sveVlSAkZia/CUgwtM0Fb9S8JIQvBUH4TXoBfh/xMPDa9qu8b/wQBN1WHd18kuwcQUpsM9s+SiYLn/q9sr9LB8t4JojGxmPeaVKNuHDq06NtLI9XkkpfyJahusbWW6yFSfbx8koK8N68og0NbR9nw9LfHtkffMlRknpftiIeE/mDEoRj8SmWx9IFjReeLS0t0YH0HreJrPK5crKs1S7xUlJzn/N7TU0L3p9GpJNgxR9Zj1eMib1+qeeSshKEEFyxQ1ltdHoIOW2644gg0iA+f/qe+EkpLg0qtVJVJoxiVnm598smVyacnerquNIuw3gmCCGn06aelf1VEFYZJq3csWPXDaXyTlLRjSWTxBkCQD8DdxF4ZXV0507nuYLqu3PlZOcvXin6Jd5Prgu+sZA/KEE0NLZezITzRyZ2YO0bO1Uf+8QDt4odoPJT19x6PjEuHq1QaMaRjS/V6w8k7ZIuvGQCl3ouGQuCUDYzORi+YDUzDghCRK2mTOJTnV4Eyr1LUrDUSlWRRdKBXyWTZiWT7Q+r1M+VcXoMQRKlPN4JIu+6V3nj+0F4DCat5Bjdn+pcJVwbR/z9g+ClWjbrOXMPAXUudUSg4p+J0c4RWrPPNPPJMIuVugJ7t/s5SmH8hLt9eyzkD0IQspU/wH8DDHGfRMCHDwRwqI0gCm7PlC7AFCPWg7TLKXip55KyE0QAM5PTvFQA+pgTBOEVZrwCxnrDwP2ZwchauytfwF7lWrzUSlWRS9KBnzGj6ZleAW+y9zpXZQAKYkLGO0GIdtU1ts0n4pttqeq9YNwCwh3Mxs377JV5JMzErKKjxsaFB5tkiuC9vRXKC5/6e8ikmwcGdv2D6o5HYlN3MzMVHPD6mT3KLX+QiTRwtL0C4LYiBZkUitmhB2nXhCcIa/A5T9aZTu/taRd+5yOP07wk246VgiBUvUGC9Y/iSo8TgnBNF+LVuoKDTsmh2EQgCNHG2ub4AcQkzsDcVu0yKF4l4IrMUGy5S2qLojqH8J6LZMwriTDPJ72C/Tsi7ceNUUpfnMut5SbE8Mp62kqAj8+VkbnrSgJElQ5cyyl/kIl0XBCEx+2Xdv0EaddkIQinmangsMbPvCSA0ARR1FyTV9kvmZjqlyYTQWTbbCVDI6ZzCBA5b3KXX/lBspENapWltvCrqPK7NdGa5jICFtsOlf2qKuXkkZiORmKPch9wHvCqetXk6pdD/iATqSaIxBt+nUX2e9lNTOKjEjPTi2zQYbbB5Gte0gQRRr3udUpGEM7d4QQ8g3BDycryudnYnzI8jwltLikX7NU3mtH0nKBmuoCapbqWhfvSEH8OBp8EhrCHj95nXviyLTCM2c50E/ZiksPnvENRyS6jmAjdkslfLEEUmwHBS28uSTqPSXV3CI85zydIuybFDkI0wsvMVGBecrmFTu8g/LqW+u/ODkzAv2HyEclkYjRnkf/rZO6MBZcnTRQTk19zLa8fc8dBYHMxGOLWxJx7qL2qbyCU33eC/C5yNtXUvLJ/hiKLidEmy5XlFsBo/45X5LPknKIgtUYQme1li5E/yEQqC94jxvXJno4vhZVdE4QPcoWBNnC1Y0sCbEbMTI7tq2vIvyaI0nVlyQpH2ZUuJ4UkqhaQnC/VFcZLKEcrW4uLxrY5RCxWXrn8NJ71g+ZiCoOq2F089wKWgOi/7St4kbgwjOtkGBkkdai+uXUWGCJX1gh5iRgKHorM9HKwqG2MNxlk5Vfaefi9ozFITpdVVXfNEG0KJH8QghCyNDS2XpeXqbjIIDJNECUkCKeZyZYC4BlHcJw0dF2IogkixJBzqSL10w54f7jqzkCit0AutXXNrV8jxk9sTSkZQdQ1LawlzhxJRPUMHJC1rf+XItLCNHoJAefZyoc67M9bzQtCZfMoMNULCy0DyZrolsXKAYzN8SV5NxQCvoQsy9CazWLwe8f9Eb45m6wFwRjIH5QgCqKMAb/AXVk3yHlziUC550SWYGZzbU3s5dV2/WgTk1jZNbd+gxiXqQ6OgtxMTCeSaXba02d7pR3WBKE4bSkUc4n0dCVn2SsLA8zkqRqkemNakOppFylVPB+XZHqlIwhnHw64qizwQIP7Ltqvrbnfi434VSVupzzO8Sx2CmzyVfnp7RXSceSnywkcsawqf1CCkESOi2CWi1LdHd9RjWuRxczIvL40QYQjCIc3ExJEaGeGuBRFmA88VyeaIFSnGP9yLl4dIs330lRPx2guF5dXNTYu+JhJkQccPvpS+7vUFKWQnVR8urFxYbNJ5j221NLizyUjiELTCl432Diqp2dVlz+K0kXSHw3ecXRPz90iLiHUI0m1EChnUAHBAEoxLgUTKOMhEK9467Y1kS68KtsY3zOWsZI/KEFk02nfDqK5NsV4Zle1K1Ac1g+mpy1n8Bm2v/czjCNT3as688sWXk2s6mIftF15u8+m1qNLeX3xmHgx5Rog8Wb6Oxj/HL3f2Xt1ogki1HwjreSVjpiBE7xuoqodTgj3a1tGT/GNv6QN43Nu9+gWXHMJ+A5Mj1veAhGEl81cltVUNcOnLKNtKezzstgEKOAllCBP+V0YvCjrFJJU0y8S4SFmnJAtr2SSGSv5JfOBby6p2sb4EQaRCD4c8fxSzHNFdY3xViK6Kc9rzCWbs95BhNhBiE4myc000lf93M40QYwJQYiPuN21nL3D17gO4PfbpPFd4UpWtSKZ8NNkmF9KrkuIdAcjqSuEZ0us5rVWIhY5nd4tabUnQUj62CZkeG5vb+IpGYINza0nMkNEUdsuaXG/QEYIPnyXcQEOSimZVbTosnN6mZm+NrRj1w5ZpLQI9DMYKwE62L7CNZnn9vUk1qp8tzD1hr2WeurosZBfsuCE8Exik89yy6jrsguwdqVgPhcmbnPWra09YZdIdOgiJoidQ1Rl96EJIjxBOM1MObx9D9I0QagMcbUyLjsI281V1nu2EZAS1y9mr9v8rNy1k64d7N91qVd6B89AJdv1iW8lotuPhyOZRwLUhGcQCP3M1vWS1mA2wUf0dSc2SFfCzpTHw4XEdbAvAJQmxrqq2JQzcsnuXEwPw7Xyr6CEJR/hIMnVkCXL7pttk/Do+QkY4npN55N3Fap1XSUgrv+cVljUXzf2OhKX1pGf/VJrOL5ddvldzqesfjt8jadQn/ndZHfiVrtsHlfMimLDebeY/jRcx+06T/Qz8ddTXYmrZX1QE0RIgpCxvgWwwuGgJghZVwz3NxcvpmUm8P8CXIyeAeHKwe1Tz1fJ/VM/c+F/wuRfO3YfPg0YTr0dzdClYJyULexJEC7nI7bvFCakO6Qlvmc0jdsdVzaqgpsWzhUD/VMvU8FB9aWWjtLGVQCL6Omgj7ii9JaaKJ8luxLT7WUu5iFRXCm1hv29YyF/XWPr6UTWpU2O+7iHJXEz+Q2b4ujat8yJ4p6XoI9IZXJub3fHVW6H25ogQhKE0ITMzORnXhL1NEEE7cfu5d06MGW4h6LGVcws0kxIB93wW/mvZOBMp3nIT8IAuXlEttKraqL8bTHBOWIb/HabsushR0Rzu2VN7CQ4QhcSIK729IpOtjdzIxm8LCgOfjjlfrfiLV6k098ybVwEYKpiPXFh07nT9+KOMIkEZebAoKk1xkp+BRIqSB7pkE0QxOUA3qOCLYsdNfEpftmcNUEURxBOM5PfgLd0pwlCpQurlfHpwFTXtPBQgrkMBBF4tXv2reJS+ocYuHGfvfgPYSaf7HusfEcG6CtgHAFg36xtV6zM/sGgOzIGXWM/8A5IENZnhm3yJK6HFKYxu/nF8zAzO+nMAVj4uourNUU21ZztWZh3/gUYD8Iwb5g+DZuKwEFNWeLwecapserq1z4DwiImsxGg6TYS6wf4OWKjJwPzhvSO3fqK2cnIUm8APLe3OyEC6UI95ZRfkOjzL9E8Zj4boE/k58/yvwZWyBat2VobgXGKBFtL3wy6xwTduL67/W8qLrGaIEJ1E11pvCBQTAceL23QcpQHAQlBlCy1Rnkk1m8tFwJFubmWSyj93vIjoAmi/BhP1C84s7uWwnV3omJR6XJrgqjQHqAJokIV79NsySG1b2oNjeTkRUATxOTVrWfLNEFUqOJ9ml2YjoLvRQYL3OIKNIqTGwFNEJNbv66t0wRRoYr3aLY4rK2a8toKgE/NFssQ0YnJrnaRakM/FYiAJogKVLposiaIClV8ttkiXUpkkAeTyTutgLL6+vgH2KBL7VebMvC4YQ7NSibv3FLZaFVu6zVBVKjuNUFUqOKzza5rbJtPxKs9UPBNn1LZCFZG6zVBVIaeC1qpCaJCFZ8jiKZWt5Q32RLBUnRUNpqTt/WaICavbj1bpgmiQhWfMzE1xWcYIJHAbzcJEh2DNXzqhgcTr1U2Srr1miAqtA9ogqhQxWeb3dIyb+pAuvrHIJ6XjZRPA7yBiC5/z568ZiyiwytbAxOj9ZogJoaetJQaAY2ARmDMEdAEMeaQ6w9qBDQCGoGJgYAmiImhJy2lRkAjoBEYcwQ0QYw55PqDGgGNgEZgYiCgCWJi6ElLqRHQCGgExhwBTRBjDrn+oEZAI6ARmBgIaIKYGHrSUmoENAIagTFHQBPEmEOuP6gR0AhoBCYGApogJoaetJQaAY2ARmDMEdAEMeaQ6w9qBDQCGoGJgYAmiImhJy2lRkAjoBEYcwQqgiDqmlqPJuDuEXQZD1XHeG5nZ+KNMUd8gnxQ52qaIIpSELO2MDHfVhN8RF93YoNCdV2kghHQBFHByvdq+kQgiPr6+BQYOAWEwd7uxLValXIENEHonhEWAU0QYZGb5PXGM0HE4/HI8y8ZRzGblwL0ESacl+rq+NEkV0no5mmCCA1dxVfUBFHxXUAOwHgmiPrm1qvBOD0nuSYI706sCUIP8rAIaIIIi9wkrzfOCeJmME7SBKHWCTVBqOGkSxUioAlC9wopApogJk/H0AQxeXQ51i3RBDHWiE+Q72mCmCCKUhBTE4QCSLqIFAFNELpj6B3EJO8DmiAmuYLL2DxNEGUEdyK/Wu8gJrL28mXXBDF5dDnWLal4gpgx49RY9U5b5zLjawB9AsA7AWTAeJZAnabB1++zJ6eKuMSd6loW7ot0Zr4Bms3AAQD2BBDJKjsD4CUCNjFze2YovWb9+jtfCdsRDj302N0jseg8Impj4JMApmXf1Q/gHwA9YJJ5bV9X4i8A2O07YQmibmbbR2HyGgL2t727n5lPTvUkOry+GUQWT3wIK3u7Opb4YXjIzIX7RDmzEExtGJZX6F48I1jBMG+YPg2bitC/nxgFv9fWLpoWiWXaGNwGog+DsXu2UBrACwD6AEpUR817VYI9iyUI4Vb87IuRg4nMJWAcAWBfANGsTJsJeAKEW6oivEZFHi9ArH6XNuYA5skAHWTrv6Lt/ySmrqBjsrl5/l5pjnYB+ID17fxAWaprWngowVwG4LO2720D+E9EdHUp2hW4E4yTChVNEIODxkfZMG8RvvQ++tgI4qW9XYk/BtAbNcxsbWaTrwDo4AD10sS4OpOOXdDX96vXVevV1p6wC8WGfkDAlwBM8amXAbiLo5GTU52r/iErG4YgGhrm78tG9HcYJsHcUxQ5iJfIZCmGIGqb4wcYTD8BcLiNqL1euZEMXpZcl/hDGIJT1eEhLfE9Yxnjp8y8wDYBe1XfBuIfII0Vvb0JQWrSpwiCoIaZ8c+wiZ8rjBHxbSV5ZEJaQY9RLAXTf9mI2qPt/FcycKaKTtwIYmAAVRQ1rmLm43z6wTYwvj+4Y+rPNmy4dkhVn5OhXMUSBAO3EOG/1TqjpWoxAM/t7e64ym+SELuSqp1e/SGsXcnITiFQf2HgccNMz00m7/inX8WGhng9G/RrAHv7lXX8voWBE1LdHQ866wUlCFdyIP5yqiux0g8zL7lLSBBU39S2GOAVAfSeEy1NwPcG+qdeVo5Jor4+/kFE6C4HuSqqkzvTUSx6uDPxkqxCGIIQEzZFjIsYLFbWud2uqjx9ZjRzXF/nHc+pVKhrWfg+SmdWAVSrUt5WJgPClYPbp57vpRMZQRjgk02iG7KLBNXPdgzW8KkbHky8plphoperVIJ4DWRtkXe2KVBMlg8R6HWA3w+gUbIS7yfC4mRXx2oPxVN9c/wcMF1SMLAIr4D5fwFjo1WfuBrMtQB9ULZiJMb1VbEtZ3R2dorttfSpb44fCqbfANjDUUAMnmcA9IBpgMD7MVAnmRi3wDBm965b9T/2+kEIQqx8o2ncDlCL7R39XAJyEO9raVlSMzC0fSmIPjT8fvMzQO7f1h/WA/TYyLfZXN/bkxCD3/646wVIM/BnAj1imReJPwHGgRL9Zwh07UD/rstKSRIzDo/vWr2DEgzLfGN/LDNHrr8weBcCmgBMd3YGBm4a6p96mkyuoARhLXCmvLYC4FMLOh3hFWI8yiDRtyIMPpiAj0n67yYy00f6LXBcFhbis5aZlwkpMSa92g7QtYP9uy5100kBQYCTzPQiEcROLfeIBWAPQE9nvyXMTc4xJQat57c85oUJ+VNlEkT+vPE0GeaXnFtVMUHuSNPXCbjA0flfMDgzq6dn9Z9lGpcMRtGpnjbZPP29e+MhmS1bmIeMWPpbAJ/j+NYWMumwZLJ9mFAcT23L/OlGOnovgI/bfsowY40Z4XPXr0uIQTzyWJN+hi4E46t532G+CyYW2c0UqgThMrkp77bCjJr65tbAgXJ1jfFjiGiVY9JPA/Rjcyh6idOc52HyyDD4G6nuhDBRleSpb4yfAqJrbAsKsRBZNrB96s2SSY/qZy48EKYwjebp/XUyeXYymegt6CfBkvW5EClmlP2ZAAAR1klEQVT/1WScJevD4tzLqIp9n4DTHIui+6ujfJzbuYRL33Htv2Ig1TYv+oTB6RscZltPnRQSRB5CUpOuOHd57kVB2NYuw74zL7n+S9KJyvSSSicIv1UO1TXGW4noJvvE4rGyp/rmtuVgXmrT17+Q4cN6exNP+eiQ6prjJxHT9fZBxkwnpnrafyWr29DU+l0GLrSTA5gvmL43Lvc4VJVNAIMAz+3tTtyXe5cKQbwd5CDkC0oQDQ3H7mEasQcI+JQNq61EvDjZlfitl17qZy78T5j86+yuMlfUc5EQZKxau6P0mwmAjrLVW9rb3fFzr/cIswzS5v12ZwAGlqe6O4RJKO8JsoOQL3DwIDLc1tub+D8PmXL9V5hgc2dgGSIsSXZ1/FJWr6659RvEuCxg/x0+k0obVwG8WGWceRCE7y7XxfT3TMbgw5wLsCB6nyhlK5kgtoD46N6uxHrfibupVayOxE4i90hX9jNaFr2rKp3+PUDCeyj3+A72XMGmpuN3y2DoXrstlgnfTnV1/LBg0NcummbEMuLsYGT34GVmsNeX2/Tpturo5pNy5iw/gnAZpGXdOeTaEJggmtsWMrOYpEY8x7wmLifWjY0Lm00y7wGwy8hvhJ/2dnWcXczZiniX8FiiWGadbaJXTsVd39R6JgBxnpJ9uC+C2Jzu7tu22tsQgCBkCxy/RZT9U1RXMFY4OViDI512e9FuZ/8NYr7J7lruoWGzqfWIM6Jkd8d3nfpzJQjmb/X2JARBuXrziXfVNS2sJZhip75bmHE9UchAJmfFEoRbZ5KBdOjM+H4Rk34PYD+vDlJfH/8PitCXGRDueQeCkIlw5nPd3aufVu0kzskPLi6bWZPJHbZJT3liEbI0OCZNBp7kocjMvr7bN4vfvQjCxUadZvB5qe7ElX4DThULt3JBCEK+Qud7kcECL88fx7cLJ07CU0hzS29v4vli2iOZvN40mWf39SS6/d7b0NB2CBt8JwNvAnjUAB7jDC93tkuVICT93HMHoDhWBhn4vNMRwtn/AKjutEc+W/gOfmIwGj1sQ+ft/7bL5kIQG82hyOG5/u6FdUtLS3RwaI+rmfDFUS6ujDtlKpUgPG37ks4iVkY3EHCy7bdEdXTL8V4HyH4DXPa7MkE0tf6MgLNG3xFs0mtqWvD+DAxhXsmA6BECejjNq3NmBDeCGOqfep/kAHPMyEG0NwhBiHamEekkYJ8cVl5mOzed1TbGmwwiYYLLOTZIJ76gOm9sPOadJtUIJ4NP23Tp6ZUU9BuqBFGw6AhBgmIyHUjvcRuAeE5OYvww2dPxbZvcBeNJ7H5T3R2nBFlcSHQrJVcZQQRZIA7vIloPJ0CMl6psO15kgw5LrWsX8UST9qlUgvijwTuO7um5e5uqZusa204gsswU1uNccau+x6+cCkF8ctbind+xfUCsHEe8XtxMUX7fc/tdRhBEOA6MT/CwuW3U9ZHxnd6ejouCDO6wcgUliLrGtjlELCbgnLyhBrbMfFiqNOOSsyTRzFdBWGWYtHLHjl03FOM1pUoQDY2tFzPh/JGJHVj7xk7Vxz7xwK1ih6L81DW3nk+Mi22Ed091dOd4Z+fNO8TfXEypZ6e6OsTuU/mRkivT6b097eLAf+QpZpeWe0nWIWSdzYqQYaajUz3twvQ0aZ/KJAjFSFu71p0rSAL+DZOPSCYTo+6V4boJNTYu3IsN87PMOCG7khwNdJPIKlsRMdOCVE+7MDmV5HGJPRDnNTOc7rsMpMzBoaOKiQAPInSQHUR9Y9tpIP6FbbKSmiH8vi9bGbuZ//ze5fw9exAqTJjvdanbD8JjMGklx+j+VOcqERvjaTfP67sKXkzS9oH/BhgiODDgw8JF+FAb5nlnI7Jd3Vsbw/sB8o35sQvC4BgBn7N7GUl2K5CMl2fMaHqmapyG+KZ0R10BF1VVJEGEWW2rrsLcRpLoYNvTxt4R4gMA/hAzHfTWlvUQR9qCwuoSghiL1Uzg4DTFA7+AM420eBCCKPCUKeI+cpXdXdj21TW2zSfimxUD+LaAcAezcfM+e2Ue8UsDotJ3A+s7WEP/HqV0c1fXHS+Kai6eUsHe6FZabUEV2IKgaDorTRvG0VsqlSACX1EpmZT9DoWptjn+UWI6hwjH2PLpBFO/jCAUVoTBPlJY2mfCEPmjBgDsZKu5lWHMSXWv6iv22371xw1BFEE2sjaKFCDEJM66Rjxz/LAQpigCrsgMxZa7pWbRBOGZi0kB4uEi5VwgKAsxxgU1QSgCLtkWuxKESAIXyZhXEmFewDQFW4a99fDuEbHGH0FY6Q1Axm0wLddPexCRZ2CUItS+xcYNQQDlcFQYXVgAIkdQLoGgHy4b2aBW2aGpJghNEH6dx31DFrbmBKpX19R6NAF350Qup4nJJbBKhtbmtwJ9/k6gbpOpcyhmPCzc81RWKRKyKvmBmcsOIi/3jSzQiRlLUz0dNpt/6TvKuCGIEGdZQdCwsqhuNvanDM9jQptLSgv7Kzea0fQcp209LEGU6hDe2WYVeYLg5FfWJ5urX3Xrd5m7dLnwURJojApV5A4izOGi85CagWejyLTYYxxEZHHVDqxx5CQSqnyegV8Cxn1DUdq4ofN2kc5besioQhBv2yE148rBHVPPy3nUyIKVRDpqr1QkpejXQQiidIfUhRHPIqI+2dMhsueOyWNNUuaOg8DmYjDmy3MFoSAwU2VClnnGlat9DQ3xA2HQWgbelQWu5Ascu0Kc4yWMB6Kqx9SYdIQx/EhFEgSFcN+TTDQFUauS4B+x4r4OaT5bNShLhSBknTXMrqiuqfVGAmYB/E+2kt0Zt+TOEPwiqXN9tLYxfoRBVhbSEc8rImof2L7r4mJcM73GQBCCcPqvh/U+k0Q8gwmBXTNLNbatXEEvYAmIREbiUewlfVuFIIRcDY2t141FMJgLloHPBVWxlCyoArs6S3btJYmDUW3D21WuIglCtvr3UsCwB8O0lQAfP2KmcgT2yLwcgrp/SlcpLmaMgsEMzvM19+tQUl90W94nVYKwokzT05Yz+AzbNweZ+QupnkS7nxxhfg9CEOM9UM5K48CZI4moXlwmlQ0YE3ciqDwi4OwSAs6zFc7zGBJ/VyWIwkUQggaUis/los5FoNxzIssus7m2Jvby6tE0LrL8U8ECPa12DacpEfmodiPgf03gL4bJNzldzyUEkSGiE5Nd7SJ5o9JTiiBCpQ+Ns0IVSRAiejhIB2loaPs4Gyz81HPpfwvqSydcl+Rpbn1A8h1xZC29HU2yWwk0mLN3SIjI4Fx+obxVlSpBiLa4JI57ElHjc24XEhUzDoIQhDTVRkDvI2mqhRBRxrI2F+uG6zxfAxCaIGT9j4GLUt0d31GNu5DFdMiSCBbmkcLrBhtH9fSsEje/KT0Nza0nMkO4BueCIKWrenmqjfzcY0EXiIB6faXGjNNClUoQ4gDgccMcmpVM3ik8h1wfkXcoNuXVaxxpNgryuMgibYPYcIevPn3tVmYW11+OPi4EIcsPpZ6sT5JbBvkruCAEIYSVDFYQ6Oqq6Oazyp2OxO+wUGb6Kz5ZH63o7WoXqU6UA9ZknUySwiPQRClxFCjw8VfeQYhb3QzcDqK5NllVk1rCZTfZzzCOTHWv6rS3v7FxwcdMijyQF+QGrB2o4bjKhTwuqe4fzQwOzXIGbLrkYuo3mef29STW+s3NEjNqQfZjv3dM1N8rliCEwsQEziaf5XY+kLXzngsikUZiJLWEbEKSpr8AlFbRw3cP0E/A1nWhebd3MeOXqZ6OL8gmImm6b+Jv9XYlfuwxcclSmA8y0yJ7JHZQgrDaUDi5iOtGF6Z6EiMeZKUYKM4dhCx61v4dl3Tf25hpiV/0uYtXmvSSpTBtyzo2/O6tu8IbbPWVMqjKdm4yLFQJQnxfdqYkDnVh0DyfvEPS1PiQ3DWSbSfVN7eKPi/uJrGviG6tjppf9rrb2rrtzqDleeclgLhHQupB55bNVaVdUv0H3IGG6RfjpU5FE4RFEsDatMGnOXO7Cw+dSHXsCjBOzJ+0uXOwBvNkqxx5Th1+xCSc1NeV2ORUevZq0s+D+RLXO389OqPLxJdh4BpzcOg7zpWU6yVIRVwYZG+TLC2y6k4tyICoK0hUiE3I8FyvOzfcLgxyu/87e+XmyQy+1BGLIG6c8yPhIM2R7r4A9wt6RLcdvivauE7lnoogBOGyCxDt2Qrmc2HitoJssbUn7BKJDl3EBHEOJW5qzD2euw+XXYBY2zzCiCxNda8SqV3ydmhuwYRiHLvtPnwuDHr5rbYtm74Xd9gj0rOLwzYQiXTq9jTfgXZ4gTrCOCxcmQTB+C0IYsWWU3xGDEjASFnZTWF+CiCRc8je2a2b4WDQcc7rOXN69cipk3d9okU4bB4Eoo84bjgTEcpPAviALWukZ1oAjytHVa/RlK5Wg+4gcqvCwvsArBEeyI7tN04kh6miiriW9QWAxA1h66piU87IJYcbXbG6XAWrfuVoWa6cdNl9DYudf8UnrKtjCQdJIvNdiSsIQYhPelyBKn4ezgvF9KdhAd3GCsRlPF9PdSWu9tKnR/+12g7GOoBetq7nNfFpEN5X+D4xLs15vesSWZnyS8gOqYXgAGIjJe3fAotA1cMlQYolXxz49fW3+/eKJAgCvsnAG2+lbr5ccu+wm05cI1XzVtHBcurYq25jIpESucdgvt/mI+6bWCxAcF5e27y22CEJAi67mpKuumT263ylyS/OsVbeTW1nSHYFKuNQEO4VQ/1T/6sc7rsu93qryGWRo0hfPdA/9bJS3EmdI4mqHXQtgFZVIWzlAl0cFbb/ZhnUc9EmykgIYiuBLmCwGG/2TABeTQ3UphCYjcsqFUkQ2TOEy+ub2j4P8I15qS0K1SS9s9ZnVfRpMH7hajbKrywmnlszhnHhw+tWPSs57M4w83w/O76419plmy8T1bdNYQlCfExuznE3zYUYGbLrLUde4+fGLMwUBluTn8h5lHfmI5NFuCtH2PhKT8+qR0LIqlxF7CQ4Qhdm7/kYzejr/YaNZPAy553q9ipBdxC5usP3MpMgCLGQeo9KQwRWTHyKzKTqVV/0X4oN/YBgncOptN23D+e+JyMIE3wERSOvUDpzC0DNXrKFbZMKXuO9TCUTxI+EcoY75uAXSZw1jJp80gA/xTBWZwy6RkzcQRWZ9X6aTYC4N1eYs/a0TUabCXiCmdszQ+k1jrOCgtvLggSeibOTaFXsBAYL++mHbaaIfrAwo+E36Ujker82FUMQLp5fJb/sPTvRi7spPgtgmk1HKrey0aFNbR+KMp/EZKWM3t9mUhD3hDxJjPvTRCvXd7f/rVhvpSD9J3ud6xyARSxBbXaVmzN3bgP4X4DxIAzzhunTsKkU2Vy95BP6jNZsrY3AOIXJbARoum0St+Rh0D0m6MZisbLMWwN0NBhfEHEhznEj0tOAcaNk3Lg2wY0g+roTGwQJPv8SZjPjTICasn3AMgnDwH0m8/V93YlHx1L/QfpKuctWBEGUG0T9fo2ARmD8IuBFEONX6vEhmSaI8aEHLYVGQCNQJgQ0QYQHVhNEeOx0TY2ARmACIKAJIrySNEGEx07X1AhoBCYAApogwitJE0R47HRNjYBGYAIgoAkivJI0QYTHTtfUCGgEJgACmiDCK0kTRHjsdE2NgEZgAiCgCSK8kjRBhMdO19QIaAQmAAKaIMIrSRNEeOx0TY2ARmACIKAJIrySNEGEx07X1AhoBCYAApogwitJE0R47HRNjYBGQCMwqRHQBDGp1asbpxHQCGgEwiOgCSI8drqmRkAjoBGY1AhogpjU6tWN0whoBDQC4RHQBBEeO11TI6AR0AhMagQ0QUxq9erGaQQ0AhqB8AhoggiPna6pEdAIaAQmNQKaICa1enXjNAIaAY1AeAQ0QYTHTtfUCGgENAKTGgFNEJNavbpxGgGNgEYgPAKaIMJjp2tqBDQCGoFJjYAmiEmtXt04jYBGQCMQHgFNEOGx0zU1AhoBjcCkRkATxKRWr26cRkAjoBEIj4AmiPDY6ZoaAY2ARmBSI6AJYlKrVzdOI6AR0AiER0ATRHjsdE2NgEZAIzCpEdAEManVqxunEdAIaATCI/D/Ac6OmMpaS1RPAAAAAElFTkSuQmCC"/></switch></g></g></g></g></g></g></svg>
\ No newline at end of file diff --git a/Documentation/arch/x86/suspend.svg b/Documentation/arch/x86/suspend.svg new file mode 100644 index 000000000000..a69073c018d5 --- /dev/null +++ b/Documentation/arch/x86/suspend.svg @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- Do not edit this file with editors other than draw.io --> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> +<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="407px" height="1132px" viewBox="-0.5 -0.5 407 1132" content="<mxfile host="confluence.amd.com" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0" version="24.7.10" scale="1" border="0"> <diagram id="46NsKM0iVOHTgNer6hpB" name="Page-1"> <mxGraphModel dx="1964" dy="1073" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="0" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0"> <root> <mxCell id="0" /> <mxCell id="1" parent="0" /> <mxCell id="8N6JJebqrzA787TgpwUj-21" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-4" target="8N6JJebqrzA787TgpwUj-12"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-4" value="SFH driver notifies MP2 to stop all sensor collection" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="420" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-6" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-51" target="8N6JJebqrzA787TgpwUj-4"> <mxGeometry relative="1" as="geometry"> <mxPoint x="330" y="400" as="sourcePoint" /> <mxPoint x="170" y="450" as="targetPoint" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-37" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;" vertex="1" connectable="0" parent="8N6JJebqrzA787TgpwUj-6"> <mxGeometry x="-0.2133" y="-1" relative="1" as="geometry"> <mxPoint x="-22" y="16" as="offset" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-9" value="Abort suspend; details logged in dmesg" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F27979;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="425" y="140" width="100" height="60" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-12" value="Failures?" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="280" y="420" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-18" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" target="8N6JJebqrzA787TgpwUj-9"> <mxGeometry relative="1" as="geometry"> <mxPoint x="380" y="320" as="sourcePoint" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-19" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;" vertex="1" connectable="0" parent="8N6JJebqrzA787TgpwUj-18"> <mxGeometry x="-0.3265" relative="1" as="geometry"> <mxPoint x="-27" y="10" as="offset" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-24" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-12" target="8N6JJebqrzA787TgpwUj-28"> <mxGeometry relative="1" as="geometry"> <mxPoint x="340" y="570" as="sourcePoint" /> <mxPoint x="180" y="620" as="targetPoint" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-38" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;" vertex="1" connectable="0" parent="8N6JJebqrzA787TgpwUj-24"> <mxGeometry x="-0.0038" y="2" relative="1" as="geometry"> <mxPoint y="13" as="offset" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-26" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-12" target="8N6JJebqrzA787TgpwUj-9"> <mxGeometry relative="1" as="geometry"> <mxPoint x="410" y="530" as="sourcePoint" /> <mxPoint x="555" y="230" as="targetPoint" /> <Array as="points"> <mxPoint x="475" y="470" /> </Array> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-35" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;" vertex="1" connectable="0" parent="8N6JJebqrzA787TgpwUj-26"> <mxGeometry x="-0.7458" relative="1" as="geometry"> <mxPoint x="-1" y="10" as="offset" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-30" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-28" target="8N6JJebqrzA787TgpwUj-29"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-28" value="All devices go into deepest D-state or F-state" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="570" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-29" value="Failures?" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="280" y="570" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-29"> <mxGeometry relative="1" as="geometry"> <mxPoint x="330" y="760" as="sourcePoint" /> <mxPoint x="170" y="720" as="targetPoint" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-64" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;fontStyle=1;" vertex="1" connectable="0" parent="8N6JJebqrzA787TgpwUj-31"> <mxGeometry x="-0.0683" relative="1" as="geometry"> <mxPoint y="15" as="offset" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-29" target="8N6JJebqrzA787TgpwUj-9"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-36" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;" vertex="1" connectable="0" parent="8N6JJebqrzA787TgpwUj-34"> <mxGeometry x="-0.8315" y="-1" relative="1" as="geometry"> <mxPoint x="2" y="9" as="offset" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-46" value="GPIO driver suspends non-wake GPIOs" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="720" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-47" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-48" target="8N6JJebqrzA787TgpwUj-50"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-48" value="Suspend initiated from userspace" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.start_2;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="120" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-49" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-50" target="8N6JJebqrzA787TgpwUj-51"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-50" value="GPU driver shuts down clocks and sends SMU messages" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="270" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-51" value="Failures?" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="280" y="270" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-53" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-54" target="8N6JJebqrzA787TgpwUj-56"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-54" value="ACPI s2idle driver notifies EC using _DSM" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="870" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-55" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-56" target="8N6JJebqrzA787TgpwUj-58"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-56" value="uPEP driver (amd-pmc) sends OS_HINT" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="1010" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-57" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;labelBackgroundColor=none;strokeColor=#E07A5F;fontColor=default;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-58" target="8N6JJebqrzA787TgpwUj-59"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-58" value="Put all x86 CPU cores into ACPI C3" style="rounded=1;whiteSpace=wrap;html=1;absoluteArcSize=1;arcSize=14;strokeWidth=2;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="120" y="1150" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-59" value="s2idle loop waiting for IRQ &lt;br&gt;to wake" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.terminator;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#0CF232;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="280" y="1170" width="100" height="60" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-65" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-60" target="8N6JJebqrzA787TgpwUj-54"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-66" value="no" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;fontStyle=1;" vertex="1" connectable="0" parent="8N6JJebqrzA787TgpwUj-65"> <mxGeometry x="-0.144" y="-4" relative="1" as="geometry"> <mxPoint x="-4" y="14" as="offset" /> </mxGeometry> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-60" value="Failures?" style="strokeWidth=2;html=1;shape=mxgraph.flowchart.decision;whiteSpace=wrap;labelBackgroundColor=none;fillColor=#F2CC8F;strokeColor=#E07A5F;fontColor=#393C56;" vertex="1" parent="1"> <mxGeometry x="280" y="720" width="100" height="100" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-61" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=1;entryDx=0;entryDy=0;entryPerimeter=0;strokeColor=#E07A5F;fontColor=#393C56;fillColor=#F2CC8F;" edge="1" parent="1" source="8N6JJebqrzA787TgpwUj-60" target="8N6JJebqrzA787TgpwUj-9"> <mxGeometry relative="1" as="geometry" /> </mxCell> <mxCell id="8N6JJebqrzA787TgpwUj-62" value="yes" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=none;fontColor=#393C56;fontStyle=1;" vertex="1" connectable="0" parent="1"> <mxGeometry x="440" y="620" as="geometry"> <mxPoint x="-14" y="160" as="offset" /> </mxGeometry> </mxCell> </root> </mxGraphModel> </diagram> </mxfile> "><defs/><g><g data-cell-id="0"><g data-cell-id="1"><g data-cell-id="8N6JJebqrzA787TgpwUj-21"><g><path d="M 101 351 L 154.63 351" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 351 L 152.88 354.5 L 154.63 351 L 152.88 347.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-4"><g><rect x="1" y="301" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">SFH driver notifies MP2 to stop all sensor collection</div></div></div></foreignObject><image x="2" y="322.5" width="98" height="61" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAD0CAYAAACB1LEoAAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXlgXFW9/+d7Z6YpoCxPpICg4i7uVppMkol5lFWWQsskLYuAuID2gfioKKDoE+GHqEgfi7IIZWuTkQpYQUAxNjPJpFDl8RARkUWQpcpjE9Jk5t7vjzOZSW/unHvn3tkyk3zvX23mrJ/vOedzzvd8v99DkE8QEAQEAUFAENAgQIKKICAICAKCgCCgQ0AIQsaFICAICAKCgBYBIQgZGIKAICAICAJCEDIGBAFBQBAQBPwjICcI/1hJSkFAEBAEZhUCQhCzStzSWUFAEBAE/CMgBOEfK0kpCAgCgsCsQkAIYlaJWzorCAgCgoB/BIQg/GNVccq9upbuHjb5U0S8hIE9AewMIJQvOAvgaQL+zMR3MRnrdp9nPZxIJMygFUdjPdeAcWzQfH7SM+H09GD/95xp2zp7DiHgVtvfX7DA+44kExv9lOuWJhZbvEuWw4MA3jmZhrBqeLD/uErKrWbe1s74fAN0F4Ad8uX+NUzZ2ODg2meqWY8qqy3W81VinN+oWFS7v1Le9CIgBFF7/Km9qyfGFv8AoE8ErO7vAL4Jk1cPDydG/eYVgvCLVHXSCUFUB0cppfEQEIKooUyi0fi/Udi4lJmPsJ0UyqnxAYtCR40Mrr7fT2YhCD8oVS+NEET1sJSSGgsBIYgayWOv7vjO4SxWA9RdpSo2wTAOH16/ZqhUeUIQpRCq7u9CENXFU0prHASEIGogi/nzPx+Zs9VLFwP8eUfxWSbcCcaPyeTfj4/vsGnjxsszKk08Hg89+6y5U4bDnwDhRGLsByBsz8/Awwgb+6cH1jzu1ewigmDc3RLhRQMDiX/VoLu5IuUOQu4gajW2pNzpQ0AIogbYt3bE9zWIbgGwVaF4tbgzheJ+1URt3UvfTllzDUCt9iYS48o5kU0nDQwMqEtt7ScEUQOhehRZzxNEfXsmtc12BIQgqjwCuru7w2PZeasAPtJW9N9g8sLh4cQjQaqLxeJvzjJ+4SCJTWTRwqGhvgeEIIKgWbu0QhC1w1ZKnl4EhCCqjH9r67J5FDHXE/AeW9HLh5P9l5RTlfY04mJqWihfThDlIF1+HiGI8rGTnI2NgBBEleWjWSxetZgPGEklkuVUNX+f+HZzNuM2gNon1VWM69Op/k8DYF2ZQhDlIF1+HiGI8rGTnI2NgBBEleWjWSxeJIv2Gxrqu6fcqto6ey4i4HMAngWIibF+TmSrkwYGrtksBFEuqt75urvjbxjLGgcC1vEgWgDGm/I5ngJhnWHxj1KpxJ/V34IQhNPRjYFD08n+X7S2HrUtRTJnEHA8gJ0AjIL5ISZcb41nV23Y8PPnVV2lHOXa2+NRNuhXALbNt7esDUpn55J3ZBEaIGD3fDnjDByUTvb/2gu5KbiBPg5gXj69ujN7gpgGLYOv3H1nTvt1Ai1yltxidPHqhI8RzlViyBt1PAfgbiZcEqSO2oyi5i9VCKLKMmxvj38UBt3FwI7VUDGV0zw5QZSD2kQeZU321DPU87pX9EUA3uxRUhZMF8KyzrZC2NOvJ7WOIMgwnoLFPwP4Hfr6eF1LeJu42hCUIgjdiZMY5w6l+s8MgkpbR+9RRHz9ljw8ND4Xn9r468RL2k1JNL4VwlgOpm8AeGPpuvghMvClofWJ37qdhAtl6AjCzGR6Qi2Rr4FxqouPkS9CK93O2Z1CCKLK8u/sPHIHE5nbHRfLD5KV/dTQ0NonqlydtjghiPJQVrvfzVlaSYBS3xVCoHgWpqzKTPAqg0iFGSkZasO5wBNwCgNfwEToFd1nEtHRQ4N9a/ycIFSaaGfPlwBcbFvcR0KIHJhM3viCH2S6u4+bO5Z9NQHQwYX0BHx7KNn/LV1+N4s7H3WZIFw4/tr2ZxTMvXV5isOt8ADDSBH4a+5y8iY0H22TJACEIGowDNo7e77FwNlTi+aHLMbJb90Vd/s9WpfbNCGI4Mh5+K6owjYxcDeBXibwHgy02XfJRLiVGV0Ats/X7BqLSXMCeN6mvlLhVFIAPZo/TXQA+KuVCe0zMrJaqU5KqphUmvb23g+ywb/Jq6rUnwLtptu6et9PVi7/Lvn+uMbVam9f/DY2wrdpCM4E40kmpBVuDN6WgE4AuxVLhy4fH91uuRtJaOJxvQKgBcCcXFmMx2Hgd7l/W/gkCG8HULZhSPDRM3NzCEHUQLZqR4WsdYfDkqlQ098ZuB4GrSo3GF+pJgtBlEKo+Pf2WM/RzLjGsSN9GuATdtsFd9lJXZFJy9Yv9jJjpe3UYC/UP0FM5uIRDoeW2p0g1Ylm1MRbRwYTDxaSlVIxqXS6EwADK9PJ/lP8IBPt6P0CiH+8JS3fDhNLnPHAlDqrZTMlGNjXVq7JjJutEK/YsD7xmKM+ao0t+5DB2asccclMBn81nUz8UNc+bcDGiYSjBHztLbvwJTb5UGfnkj1M03hxeDjxf376K2ncERCCqNHoaOvoXUzEasHx0seOgnAfwD83LOu2zZvf9LDXUdtvUxuEIPw2N3i6KkdzbW8/fCfLiNxJwEdsjXkQJi/y8l2JxuILwPQL2069kD0QQfj1kFeF+yEIla491ruUOXeHkFeV8f3j4fDCjQOr/+kFeFTdJYRwE0AH2tJpd+NFbQFMMJ+12664wOuUnL/IvhTgY2x1uPoKuRIE89eHUwkV2VZrzRd8YEkOJwJCEDUcE+1d8b3ZousA7OqzmiwDfzQYv7RCdH25J4waxGIqGb5aE2rDZ5fLSFZtgihaTPGywcbBqdQaFWbc82vriPcS0bWT6o6J1IEIwku/76zcL0Es6IrvEbJIqYn2yJfhS83kVE8x8GQYZncyedOj9rYofx8jYiqLpg9u+bu3qsief8GCw99kzImsowl1Xe5zw8GFIB4zDV6oOaWUEpn8HgABIYgAYJWTNG+++J28mepk6A2fZb3IwFURyv4gyNsCQhA+0c2pY5Tn+043KgMm20J3Y0v4uWO9wpkU0rrsuIMQRCAzVL8Eodbbts6eq/Jms7nm+lEzFV9wkxaL4hMKAkcL8HvKcSGIREt405F+ZOR/NEhKJwJCEHUaE2rHFIrMOQJk/QdA7/NrJZNvnjpZXDo3zGf6CbgnBOFfqNFo/C0I0wAY7yrkYqaj06m+G/yWUmwSGugE8ZgVznaNDKx9yk99AQgC0c74AUAuJtjEZS7wgP3C21lfTvWToVtA2Dv/2xQLKlt6HflcnU72nxBE3aPxtdCSpY4gmHBmerBf+T/IV0MEhCBqCK5b0RM6WHQSG71MvI/esqM4NwP/QyYfUSqmU4MQRFO8KNfaEe80KOdYtk0e8WfYoIXp9X1/8js0NL4v/k8QASPtBiGI+d3LdpyTzf4GoA/n+zIO8KLhZEL1t+grcvgjPIIsdw8PJ9TDVZOfzpSbCV9JD/Zf6Bczla6j49A3WjRX3eF8cjIf04nDqb6f2MvREkTewTBIfZI2OAJCEMExq3qOvP19GxGWgKFszzWmgJPVlvSpaJBL6qYgiLwxwU0FdHNRdzOhroJZqR9ha+Jv+ScIIJCqJAhBqLa3d/R8lwlnTPaD6OLhwb6TdTv94rL1aTU7f1X8HQAF8vNhcISA/e13dDqnPg1BBFLL+ZGhpNEjIATRgCMjdwE4J/tpMCmzxLc4m8jA1ZnR7b/gZvEkBOFfqEWLYsAdvapJo5rxTxABL9wDE0Rx6A2tmknjge16qa0JLeIf8FIpNXhoCKIqm49STZHfxVGuocdA7gI0jBX58AX2x4NeJosPGBpKDOs6IAThX6wznSA05KVVMxXHcGJX72shCP/jq9lTygmi8SVI0Vj8NDCdZ7/Y9oqvIwThX6gznSAUEkWWSRo1k9P738v0VgjC//hq9pRCEFWUYFtH74FE1lmAsTPA6tLzIYM3H5JK3apCA5T96QKwAVsCuDkLFoLwD/VsIIii0BuOy2fNpbPnCVVDEDVV+YiKyf94rnZKIYgqIuoMUeDmZFROlfmQ3+pyceLz0JULQfhHWOPgV9Ip0Fm6ZoFtmDsI1VaNr4fJzIvTqYQKMIhiSy59aI1CvzVWWyYzHZJO9d3uH3n/KYUg/GNV7ZRCEFVEtK2zZx8CfmmzO58yESupqi3WcwYxvisEkXO5XTU82H9cJXhuWex692KD77QF2gu8G9ZY9TQUQai+Op3SlKFDwW+haPNRItCd7tVELvHKYSWyEoKoBL3K8gpBVIbflNya8AYA8y2wsMwZ6CxItTpvX/Z4VU5OEP7R1T4Rq7HF9yqxrSN+KBGttd0RNRxBFI3NvJppvCU85vCVKOkHogsGCHifOnT45bFXQS13IODPFvAnw+Krh4YS99nTC0H4H8/VTikEUUVEJxbyeasAPtJW7DgzfzqdSvSVW1VHx5IPWBRSu9wtMZ08FjEhiEBIF3kFe6nvnCW7yLzhCEKdu6Kx3pVgXp7vQ86MlZk3OxwFffllFIfk8B+/avL0VhxBV2taKwQRaDxXNbEQRFXhVN6hS2MWWetsTz6qGl5g5uPSqYTyGg0UedIlpLJnoDIhiGBC1YSkMIlw3NBgv+1FNX2ZLvJuRIIoCr2hLOFUr2yOdG6hNYo6r9u0EHDX2FyOu706Zy+ktXvxbkY2rO4sbMH+8HtzPLNf4XnVQnohiGDjuZqphSCqiWb+QnA8O28lg09yFJ0l0BUhynzHb+C91lh8T4OxyhE7XzHMOelk/zfdyEYIIphQc/4mBlaDaJEtp3ok6CivN5ij0fi7EMrFOnK+BteQBOEMvZEL3cJsgehjuX67hNZwQZOisZ4fgvHlqb/TdS1h64teMcMU3mTQSiZ81pZXvSOxPJ3qt71DMfGrEESw8VzN1EIQ1UQzX5Z6X4BDkXVg7KUpPgti9TrZdWTx+s2bd3im4BGtdLvj4y/PYwrtA8KJrwdaUxN3ytOXfnZpQhDBherytsMrIP4OsrjYfoc08W61eiSHrnIJ5d6QBKFQ0b92mMfLIwyHDlGXU4Dat9zLCC1PJ9dscG5i1KaHmFSU2ckw3xPc5H76EIIIPp6rlUMIolpIOspRTzFaRvgWxyM0FdWmdnyGlV1U6m1rIYjyYG6P9Sxhhnq/wxmW/RUC0gx6DOA3g9BleyZUV1njEkRx6I1C+z0D+bkh6vFoklr11XOq6wH6B4hbbM+BOoqjR2FYhw2vT/yvrh4hiPLGczVyCUFUA0WXMtRbEMaczEowjg4Y3ttZYu4ZRzPCy+8ZSDxbqslCEKUQcv2d2mLxY4npUg1JuGVS9xVrmHkPgNrziRqWIDShN/JN9vfinA6EaNfSj8Hin+Xf0Q4IviIHOmJ4/Zo/uGUUgggIaRWTC0FUEUyXonLv8BLM84ixHwB7TKVStZuvW5rcYxg4fWh9v3rdzNcFtxBEKVi9f3dTg2hyvcJEZ2ZarGvnjNJa2zsKDUsQqg8aCyTX19z8IlnGw1hZYlxmZiNnjYzc8LJXPUIQfqVQ/XRCENXH1LVEZZEU2Uxd+RDHSgerwnrPs2UwATxLwIPMdFM2RLfds37Nk0GbKAQRFLHi9Oqe4clnqY0YXwLQBWDn/ClwFMwPMeF6azy7SlncNHI0Vx0SbV297yeL1XOku+R/9wytEQTNnNXdGB0Cxqd54vK+gJsq5rnX36H+Kxg/NTPZm53WSnKCCIJ0fdIKQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAii6UQmDRYEBAFBoD4ICEHUB2epRRAQBASBpkNACKLpRCYNFgQEAUGgPggIQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAii6UQmDRYEBAFBoD4ICEHUB2epRRAQBASBpkNACKLpRCYNFgQEAUGgPggIQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAii6UQmDRYEBAFBoD4ICEHUB2epRRAQBASBpkNACKLpRCYNFgQEAUGgPggIQdQHZ6lFEBAEBIGmQ0AIoulEJg0WBAQBQaA+CAhB1AdnqUUQEAQEgaZDQAiiSUTW3R1/w1iGbgFh70KTGTg0nez/RakutLYetS1Fxj9LjKNB9D4AW+XzvMLAM0ToawmFVo5lzL5yyi9V/0z8PRrruQaMY3V9Y6aj06m+G6rR77au3veTxb8BsIujvBcs8L4jycRGXT1tsZ6vEuP8CtrwCkD/IPBfmfguk41bNiT7/gKAyyyT2rqXvo0y5vEgOgiEt4PxpnxZWQBPA0iRRdeMjW33240bL8+UWY9kqyICQhBVBLPcolpj8T1DFp1qZkNnjYysfk5XTrkE0d4V35stug7Arm7tI8aVcyJ8arkEVG6/mzmfF0EASLSENx05MDCgFr6KvmhH7xdA/GNNIbUmCF27HyCDTxlan/htEKJY0Nn7nhD4pwDaAIR8APIPAKfstgv3JxIJ00f6ipL4mX8VVdDEmYUgplF4sdjiXUyOfIPBnwPwRJiyscHBtc9UiyA6OpZ8wKLQnV7koOpiwlfmhvgKIQj/g8HzBAE8GYbZnUze9Kj/EotTdncfN3cs+2oCoIMbhCBUM0wQLhx/bfszfOzyKdrZewTAVwF4YxlY9I/P5c9v/HXipTLylswSZP6VLGyGJhCCmCbBdnb2fsQE3wFgXr4Jf602QbR19lxEwMmaLj4H0KsTf+cdmWnp3Ig1KAThfzCUOEGgGmomD/WSauh0nCAKAJkg/vrwYOL7XieJ9ljPEmao02tBpVnIn2XgjwS6VxEOgffgidOFhkTo8vHR7Zb7ICP/wgMQdP4FKnwGJRaCmCZhtnbG5xuguwDsUAuCcFFJPcwUio8Mrr7f2e1yVVjTBN+0V1uKIAC6sSX83LGVqJk81EvlEIQnoRSPh+Pmjo+/PI+NsDq9nA3gzY40LxtsHJxKrRnUCSMajb8LIVJ3J2+1/Z4F6PtWJnzeyMgNL9vzxePx0FPPUM/r8+EiR10mg7+aTiZ+WE2hB51/1ay7mcoSgpgmadV6gKrjc5bDavK+s9DFauxqpwmuhqtWQxCvAGgBMCff2MdMgxduWJ94rJzGR6PxrRDCTQAdmM+vLm0Nmw4/6AkiEEHY2xyNxv8NIeoDsM+UvjDfAgvLhocTo44+UjTW80Mwvmz7+ygRjhka7L/JC488sdwCYE9buoqw1NVX6/lXjswbMY8QxDRJpdYDVEMQL5JF+w0N9d0zTV2eUdU6CYKBNDHGQejKd9QkoqOHBvvWlNPx9vbeD7KRs17aKZ//5wC6bSfOuhGEqr+9ffHb2Ajf5li4tW1Y0BXfI2TlTg97TG5OgHPSyf5v+rnc7uhYGrPIWgdgWxt2y4eT/ZeUg6UQRPmoCUGUj11FOaeBIMreQVbU0RmauegEwbgbwO0gXLCly+WrmaKdPV8CcHG+rBeYcQYRzp0uglDtcLRpomlMJw6n+n5iF3NbR+9RRHx9uScAzelJFVU1yzBVWK3n30wZ9kIQ0yTJWg9QzQlCCKKKstYShMFfB9PNNp+FslQjxQsk324xzjWIbp1OgtBemhNWDQ/2HzeFIIqNIwIv7ho/jt8ZvPmQVOpWpcqr+Kv1/Ku4gQ1SQMMThHOg2J3D5s//fCSy1YsHEPgkgD5uswhSg+hhBt9ojWdXbdjw8+fLxXvBgsPfFIqEDwPhMyB6r8O55wliGjRhXZXdvMNIKUuLts6eQwhQk9zPN2VB93OJXPritKjaScspP+WXarQHVkpH/ThAd1pkXT4ymPiTH1WDsz4l7/DcF1pDME5g4hiAtwEI59IRngfjcWLckSVaVaFTV6muQkcQ45FQ75xs9mqbWWpZaiaNemm5BU47jBrqqmJSgOjutZw7+w/vd8w2b3ht7OcM7FsAkQlnpgf71enH96eZK55Wfn4KrmT+uZVfzfXBTx/qnaZpCSIai38SjB8DOc9gry8LpgthWWdrLtNc8+3VtXT3kGV9m4BjJhch73r+DmCFl3NPJQPUzwI+XQShPLVD4cw5TDjJD1ZKX8/EJ4wMJh70OeCprSPeQ0Q/APAWP3nKqMNPsZNpdATREuFFYxnjKLtjGwNXp5P9JwQhRIcqZxNZtNA0rJaGJAjG3arfAwOJfylwcr4bmdeWg9AO0EdAvB0D30sP9n8vCMBtsZ5TiWGzXOL7x8PhhRsHVv8zSDn2tJXMP2edtVgfyu1XLfM1H0EwLyKi3YGcrtdpX+2KFRNuy7TwkX6cbvx4H7tUZDJw7dwwn1yYMNUaoI1KEO3tvXuxgTUAvyPgQFWnihXDyf5LvRbPnPnj01gBonN8euHamzHKxF9MDyZWBVmg/fTDjSA2W8buU0JjEB5BlruHhxNqA1Hy06mXYGKJFcKeDUkQGhVTyU6WTkBtnT1XEXB8ISkBd/1r65bD77/zurz/TulCnCmqRRC1Wh+C96j2OZqOIACsBnCYjRyUZ6cyJUyBaczL6YYJp5fayXg49wSpR0tG0a6lH4PFJ06I1doxr4oomEW+zIxbiei1vNhHOUw/Sg+seXxiZ1Y6FlO0I34CyFig0jPz1kQ41GYJMg7wOsCw78D+ryU8dv7AwM0v+infORyjsfgCMKlYUAVLm0KSLMCPgGhEyQTgN+ese7bE3imkK+lw5SIPE4wnmZAm0IQ9PfGHwPioZtPwAsM4MJ1cM1LN6eRGEMA2WYf3s8nMi9OphC/Vok69pKx3NDrzuquYNG0AMc4dSvWfWU1sdVZQ1ainkvlX6F8t14dqYlitspqRICYXF2bcbIV4hdPWPLcLC2MFmL7hUHk8YGVC+7jFO5oYQNavHAueSYQ1IWRXOMNgeDj3gECXzQk/d7Kbo1TQS7KgC3jQS+qg5buYPWaJcVk2k/m2895HYfW3p7G3QcaPHaeNUWZeqltAc/rdOREVKkTdL+U/HjANfEbnX6D6sDlL/0nAWVPlXr41kdtEcyMIdXJ0OrgFUTPp1EtDQ30PNAJBtHf2fIsnnOZsc9A/+fldtDQX1C+TxQcMDSWG/ZZRKl3Q+afKq+f6UKr99fq9WQnCBPNZu+2KCzyCeVE0Fj8NTOfZVBOuu7kcqRhYDaJFNvB9qUFauxfvZmRDPwOo1Z7XYl40kkoob+miL+gADbqA15Iguru7w+OZnS5jwmft/WXm49OpRL+XOmev7vjO4aw6BZKy6c996r7AGs8c7CSVts6efQj4pc35zJPg88VRtCN+Oign98L3DBu0ML2+T12OV+XzIogiax+faiaNemldS3ib+MDANZunmyBc4nqVZaXlJQD9qZRvV2q2IHeIpYQcdP7Ve30o1f56/d6cBOHuwTkFt9bWZfOMiPlrAB+cXIxcLCra2+NRNkidHiadcziAc482tIBHO4MO0EYiCI0apKSqyC4YDVZaa5+ii0qf+m6t3KsYgju3m3SG+7Zd1mqC7PlSMxXhavMxmE6CaI0t+zCxmSDgPVMmGOFHw4P9X6nW/c78feLbtWymhN0CCoBnSI9yF8qg86/e60O5/ap2vmYkCF+TTQGldrpj2Z1uBBCfBM5lkdEEtvOzW50iD40jkaueOOgAbSiCcKga3E4AXoO1WF3Bk7vlQj6NqsGvPT1FO3uuAbAQ4CcYdB9gXFvNewgvgsgRiDNMN9HFw4N9KnCi63sKjv5OOfXUkyCUOvDZZ82dMhz+BAgnEmM/p3UaAw8jbOxfuCOrdGFS5DBnM10OQMVjmvyCbNKCtCHo/Kv3+hCkL7VM23QEwQFDKbd39HyXCWd4EURn55E7mMjcblcRlXMp1tm55B1ZhAYIUFZWE5/G01T9OegAbRSC0LbDx+W/cxC3dsQ7Dcqd2LbJwaSRa1tH72IitsfuqZlVUtBJVoogNE5lnhuOYlynEmYVCCJoF73S+4qr5LdCpb4hg1Y6VJZQlktjcznux/LQb12FdEHm33SsD0H7U6v0TUcQAAJ5VBbtQjUniPb2+Edh0F0M7JgHepyBg9LJfqWe8v1p4/e7nFiCDFDVgEYhCA0Jmsx0SDrVd7tvoBRB5u5twutt8XqKMNdZs+TeIwD/QfnAWNnwOjeDgyBtKSdtKYLQjAVlRbZoOJlQpFj0FY0Hx8aigQhCvTR3wnCy72fVUC3lxnXWuBRg5W80+THwP4aVXTQ0tPaJcuRTKk+Q+Tcd60Op9tfr96YjiKD20H4IQnMZWvalZtGJxeFIVM4OppEIwrnzB6Azny05fjVmuLo3FJQ9/H/lrZLcynwKhHXMuGlueOukutAtWXkVEpQiCFVFEDWTfZzqTlMNQBBZIropQ7TinvVrnqwChMhHiVVPsx5QT3JQdQUhiOlYH6qBbzXKaDqCgM+LygI4PgnCGQKjbLf+ovpmGEEEdDYKNEZ1fipKNx0ZoxuJ8SkfhSn/i41gXGFmsjdXEmKlVF1+CMKvmklzOiy6a5kGgngO4H8ClGTgl5m5vL6aqh4Xyz91KBmxwuYRIwNrnyolg0p+D0gQdV8fKulbNfMKQaiHcotjJFWPIABtWUEGaCOdIOpNELmd+IRfy/K8X4vfpyvVW8ZphvGf6eSaDdVQh9gnnh+C0KmZdKpL51jQvdtRBYJomGCNeYe1nxV73/NANoxl9wwknq3mIqcrK8j8m471odb991u+EETNCYJHQogcmEze+IJdKEEG6GwniAJuOUc4E0cQ47R8DK6Qj4GeJeDbY6Pbn18qmKKPsiaT+CEInZqJgZXpZP8p9rpKqZdcVCJ196QOgo9LWhVT6xAiUhZmhZcUC0lr+v60sz1B5l9tCUK/PlQB66oUIQRRa4KY+Sqmsk9blYzgnOppM3URsAyE/TRhPOzFV/3ZSr8EUaxmmhp0zo96aYYQhDI9/qImhpoKYXMFsvyVajrClRpbDUMQLutDqfbX63chiAmCcHrsln1J7bSXdrtUDzJAG+oE0dF7IBGr+Eu53TsB/4TF+w4NJe6r16DV1EMdHUt3sWAeCMLnAJqviSob2K/Fqz9GGfWvAAAgAElEQVR+CaKUmskxDlzDgzezikmFaW/Z6sUzeSIEiv3UlwXxGbvtjB96RESoybAKMv+mY32oSafLKFQIQhFEV+/7p0TgBMoy3dTFwifGlUOp/s9VcsRtJILQmPz5dlwsY3yWlUWFYg5b5rX2cB7KPLYcc1y3BvgliFJqJodRg2voimYliAlyeOkiBn/eQQ5VNZcNOlACEcQ0rA9B+1Or9EIQAKbDESbIAG0kgpjfvWzHOdnsbwD68OSg9OEl7BzAKqyEZfBtr5uwZl+PznsfAX8h5ktSqcTfcotqNP5vCBkHA+qNZ6tNLS4hWAclkzc96mcytHYtebdhhX5rfz/CTzRfP2Xn2ucRasNZhkbNlLuXCoczmbEM3QLC3hN53IMKNiNBKHKYs/WL54JxqoMcNjFwVFA/I7+y8ZMuyPybjvXBTx/qkUYIIo+yxpX+T1nD2D+IzbcmNMRMDLVB0VjvSjAvtw3Qv8HkhcPDiUd8Dlqdf8MUtV40Gn8LwjQAxrvyZQY6qVTL47saJwiNmulVi/kAEEZtbzx4vj7XhAShC5apSPBRGHTE8Po1f/A5VmqSLAhBqAbUe32oSafLKFQIIg9aTYJxwT0KZdAB2iie1Aqu1o74vgbRLVPfXqDLx0e3W+7HUkgfNpnXtoS3Oarg6KaLo0VEfWOvbXeMrzqKCUY54i1Jp/rWljFPirIEOUHkThyO2EwqlItl4CVinJ8v3DMyarMRhG6M5OI3GXRYNaPqlivLoPOv3utDuf2qdj4hiDyiLuF8sww+PZ1MXOhlR+8W7tvtnYPcItsZn29/IaxUjKlGIggXrEwCzhkb3f67Xgt43nu2D8A+tsE8qguN3h7rXcrM19vDtYP468ODie+X8GsoCvldCt+gEysoQWjuuTbm61QX6p7qJd14AdCwZq7t7YfvZBmROwn4iA3XTSA+ZHgwoXxSpv0LOv/qvT5MO0D5BghB2CTh9iCIepjIDBmnOtVNKurlk08bi4j4vwHsOlWo3jtq3cW41xsXjUQQuR2x/nEltW6vMxinpVKJPzsGObV3xf+dLVxS/I64Hiv1YJAxJ7KOAHUHUfhcHyXKLaStR21Lkcx5BHxhit67yqGpgxKENk7Xlj6VVJ810wlCo2qtuplxpQto0PnnMebNWqwPlfavWvmFIBxIVuNJQT9RKLWXvbmNJJ4H00sMzobY+GwqtWZQ/bnRCEK1yQMr9fNTDCTVk6AM3paQu4h1Pk2q+jscRnaJ87W+glj06qzcr1OfNSVuAXMrQOrOIuwQ64NkZT9VzcBvQQkit8A4Q4AXGunjQaFmIQjdWxxVWqyq6m8TdP4V+lCv9aFKmFVcjBCEBsK2CVt/9dD9mwMirMI7KI/Qk3zEraForOeHYHzZrQ57yIVGJIgJkogfxEzXaTxjfUDnK+4OtcXixxLTpZr3pn3UgQfYoJ5q673LIQjNrjXXfj9PkjYLQWjUgn5k5CdNVQlCbcWCzD97A+u0PvjBpOZphCBcIFa29BHmC5h5iWZHqsnFDxHhtKHBxG1+4/7ont+0F2x/k6JRCUK1NzhWeAXE30EWF/v1nm1v792LDf6p/XXAErNjlIErOBP5xsjIDS9XeyaVQxAuaqaS6iXV9mYhCI21T7WgrzZBIMj8c3Yi+JgPvj5UC7hKyhGCKIGeGgghy4wTsARE77WFdFDvVT8O0J0MWr37Lua95XiD5h5LCRnHM1ufBdH7HLvkyaiejUwQBQiVeiEUMXsZ3AvQOwHMy/+mTlbPAnw/YFzdErZuHxhI/KuMgUsLOnvfHWY+lgn7g/B2mzxydRDwIAjXjrXwL3yc4spowkSWcggil6/opTk8gix3Dw8n/u7VmGYhiCJcyka4KGPVCSInD5/zz60btV4fqgdfeSU1PEGU1y3JJQgIAoKAIFApAkIQlSIo+QUBQUAQmKEICEHMUMFKtwQBQUAQqBQBIYhKEZT8goAgIAjMUASEIGaoYKVbgoAgIAhUioAQRKUISn5BQBAQBGYoAkIQM1Sw0i1BQBAQBCpFQAiiUgQlvyAgCAgCMxQBIYgZKljpliAgCAgClSIgBFEpgpJfEBAEBIEZioAQxAwVrHRLEBAEBIFKERCCqBRByS8ICAKCwAxFQAhihgpWuiUICAKCQKUICEFUiqDkFwQEAUFghiIgBDFDBSvdEgQEAUGgUgSEICpFUPILAoKAIDBDERCCmKGClW4JAoKAIFApAkIQlSIo+QUBQUAQmKEICEHMUMFKtwQBQUAQqBQBIYhKEZT8goAgIAjMUASEIGaoYKVbgoAgIAhUioAQRKUISn5BQBAQBGYoAkIQM1Sw0i1BQBAQBCpFQAiiUgQlvyAgCAgCMxQBIYgZKljpliAgCAgClSIgBFEpgpJ/WhCIxRbvkuXwIIB3FhrAwKHpZP8vnA1q7YzPN0B3Adgh/9sLFnjfkWRi47Q0XioVBJoEASEID0FFo/GtYOAEEMaHk4nLm0Sms6KZQhCzQszSyWlGQAhCI4B4PB76+7PGwczW/wPofUw4PT3Y/71plpVUb0NACEKGgyBQewSEIDQYR2M9l4Fx4qTqQgii9iMxYA1CEAEBk+SCQBkICEHoCeIaMI4VgihjRNUpixBEnYCWamY1AkIQQhBNOQGEIJpSbNLoJkNACEIIosmG7ERzhSCaUmzS6CZDQAhCCKLJhqwQRFMKTBrdlAgIQQhBNOXAlRNEU4pNGt1kCDQ1Qcyf//lIeO4LrSEYJzBxDMDbAIRzMiA8D8bjxLgjS7RqQ7LvLwBYJ5/u7vgbxjJ0Cwh7+5IfYdXwYP9xXmmVqeyTz4Q+QWQdB8a+xW3jPxOoz8yE+kZGVj/nq14AbbGerxLj/EJ6u3NYa+tR21Jk/LPEOBpE7wOwFQATjCdh4FcW+L9HBhN/csPBbxsCpqO27qVvQ9ZcbIAOYGBPADsDCOXLMQE8S8CDzNxnZrI3b9jw8+dL1dHIBNHaumxeKGL2MrgXRO8F4035/mQBPA1gBKBES9i6fWAg8a9SfXX7fcGCw98UioQPA+EzjnpGATwO0J0WWZcHkbnX+FLzLbLViwcQ+CSAPg5gXr5trwB4mME3WuPZVX7k5+zT/H3i27WM0SFgfJqBD9vKLmt8lIGZks0TxDRowroqu3mHkY0bL8/4kU3RWGTc3RLhRQMDiVfbu3pibOFcAK35tUnN9buZcMnuO3M6kUio/jXs16wEQW0d8R4i+gGAt/hBl4E0E58wMph40Jm+mgSRI4anaQlRblBMevl6tDFLRDdliFbcs37Nk6X64jKB10U7ew8C+KcA3uxVhsLBAh27Idn3cKm6KvydJiYH/wCgTwQoK0uMy8xs5KyRkRtedsvXiASxV3d854hp/IiZl0xuVLw7/gqIv4MsLh4eTqhF3denNgKhcOYcJpzkpx6vse+s0I0gorH4J8H4sfILKtHILJguhGWd7adPubln0nfA+EJ+Q1MKA1/jw1nIXl1Ldw9Z1rcJOMYPZgD+DmDFbrtwf6lFXEcQZibTE2qJfA2MU22bIXuzxhk4KJ3s/3WpDk/n701HEGoBfupprADROS7Ae+E5ysRfTA8mVtl30dUiCFXO5iytJODTZbTtaTL4mKH1ibu9OlA0gZkXqYlFRFf7nGCq+BeI+JihwcQvazH41E5zztYvnusxOUpWy8D/GFZ20dDQ2id0iRuNIKLR+LsQolswcUIK+PFANoxl9wwkni2Vsb29dy82sAbgd5RK6/hdEdCK4WT/pV4nSN34IqLdAVwQYHyBCbdlWvjIjb9OvOTWzmg0/m8IUR+AfQL2RakCHoZBh6XX96kTsefX3hXfmy26DsCupdI6fjcZuHZumE/2OukVj0UeYBgpAn/NfR3gofG5+JQXPgHbWpPkTUcQ7bGeJcxQwlbqk8KXU6MwIU2giV0n8YfA+KhmUL/AMA5MJ9eMFDJ3dx83dyzz2nIQvXvib9a/A4V/5/6wAaD7Jmtja8NwKnGVXSK5I/JmSjBy6iTn9wqpEwzoMRC3AOgAYw/N4HmFmY5Lp/rWuklbs8M7n4DP2+IMKUXa4zDwOzCNAdZHAJqv2TVtAvEhw4OJDVUeWRSNxU8D03lF/cup/fjPgPFAXkYtYG4F6F26XR0xrpwT2XTSwMCAOv5P+RqJIDxk/wrA/1voL4O3JaATwG7O/jBwdWZ0+y94qTWisfgCMKlYUzs58mcBfgREIxMy5zeD0GVTbW2ZJ8RfHx5MfN+NJJzjC8BqAIfZ5pEJwmMAUqouAu/BQBuANxb1ycPBNLeJ2OqliwFWY9f+jYL5IZDx+5x61HO+8MD4XBzmtci6rBeqviD98CQ7zVhUKjc1z+fkOlaYj7mlBZ8E4e0Alg8n+y+p8tyrenFNRRA5neucyJ0AlP4z//GAaeAzG9Yn1KCd8uV39P9JwFlTFyC6sSX83LG6hUcVEI31BHKU6+7uDo9n563knG52yvc0M/3H7rtatziPqbkjr2ldSJSbfAV9vMq8CYZxwPD6NX/QSVszgW3J+CEy8KWh9Ynf2heAHG4tkR9A3U1MresBK5w9cGRg7VPVGlmawHiKrR+12Drxrbvibt1xXalMjEj26wCf5iCKTWTRwqGhvglCsX2NRBDRjvgJIPqJDdtRIpwy9tr212gWfIp2Lf0oLOtaAB+0dellsviAoaHEsE4W7e2L38ZG+DbHCSWnbslmMt926v3VSftvT2Nvg4wfO04bo8y8NJ1K3BpwfJnMuNkK8QrnXMvFLAtjBZi+4ZDfA1YmtI/ujq2tc2k3wVL9KWz01G79vLlhPl+3W1/QFd8jbNFPHBswk4iOHhrsW6PrS7Rr6cdgWb9yEKpJhDUhZFcMDq59xp4vp514hnpe32xd5FTVEuiyOeHnTva7WcmXO0rA196yC19iG/fU2blkD9M0XhweTvxfteZdrcppKoJo6+zZhwClFplgZsB1ANoAo2hH/HRQbkdb+J5hgxa6HU+DEkRrR3xfg3LqBduphkessHlEicWXop09Xyw6vjPfAgvLdDpcd4LwVlO4qeYI+PZQsv9bVRpgFI31rgTzclt5f4PJC4eHE4+UqIPaYvFjielKO4kx09HpVN8NjUoQudNn9tUEQAfb2lhyd9jWvfTtyFp3EPCeQj4GVqaT/ac4+5rbgGR2uowJn7X9phb649OpRL+Xykjdi4Sz6hRA3bZ60tZ45mDdZbLL+DLBfNZuu+ICD3287uRoMvNiHRm1dfZcRMDJk/0hunh4sE/9X2tIotKpk9qczbjZ3heA17WEt4kPDFyz2Y5bPtDmahApFWzh86Vma+1evJuRDf0MIHWxPJnXYl40kkqoqMBTPt1mJZeA+evDqYQyKHHtU5XmXc2KaS6CiPWcSowfbhlUpa2JVFplVWJETHUZNLljc1t4VPogBKEm71h23iqAjyxjUVRZqK2z57/yp5ySg1E3gXP62LCxf3pgzeNeIyVvhfITAo63pfNDsr4G4PzuZTvOyWZ/A5CyQil8JRfLQsLOziN3MJG53T4xmXBmerBfXfiXnJTTEe5bjS2KmOttC73vUOLRzp4vAbh4S8d4JITIgcnkjS/YO9ve3vtBNvg3tp2wUr14qors+fP3Iyr/W/N/d915awnCY8Nir0c7zzTy+/B+x2zzhtfGfm4/DbjJzin3aGf8ACC3GcttEhl4MgyzO5m86dGpmMWjbJA6PWxb+DsD56ST/d/0s2BrMFMLvnbj5kIQj5kGL9RpNnxNpgZJ1GwEMcXEE0CiJbzpSDdVkQ1jtVO/BsBCgJ/g3H2Cca39HmLKhAqgYmrr6n0/WbnJu0s5i6IrgQFXp5P9JzgHs8sOz/cirFEBue7ygo5RdelIIfoiT6gAPwqCGWJzf+fk9SrXSc5wMSluFBWTph2vWswHjKQSyVL4TVw4s1ooXwXwewO4j01e6Tw5tnf2fIuBs20LnesJwK1OZxluO2/N+PI9PiY2SzvdCCA+2Q6N/HRGIV4btikkNLG7vwsgpeO/j0EPRsi6aHAw8Q97uqITij9twxT4igkcWvJ3IQi/a1OpYTKtvzcXQXT0Libim2yIaa2SKkU0yAmiraP3KCK+3jZ5tTuaUm1q7+j5LhPOsO0m7x8PhxduHFj9zykD3+EHASDQTkWnEnFTbZRqcy1+bzaC6Og49I0WzVUXx5+0yc63VVIpDLWLaRnRhVs74p0G5XbU23jtvDVGEIHGc9E41hM8tXX0XEuUuxPLfUGskkphpjuJEuPcoVT/maXy2n/v7FzyjixCAwQoK658Q+nE4VSfum+a/LSbFZeTb5D6GyFtUxGEuqgKWaR268oCqPCZAP9B2Whb2fC6IE5nbgIIQhDtHT1XTNUN63WipYStuV/R7lY0O7zAO5W2WM8ZxPhuoU0E3PWvrVsOv//O69ROdlq/ZiMIBVbx7jwH4YsgrDEsWrV583Yb/TpdOcHXLFImMx2STvXdHkRQE3r18Hrb3NHa4WvG1+8M3nxIKnWrsswp+RXldzkBtsd6lzLnNlZ2A41RBm553TJqVUt4mwHnvULJyvMJ2tvjH4VBdzGwY/5PZfkcaO+XNP0Jcpr124dGSddUBOGir3di+RQI65hx09zw1slyBplfgqjmbly3W9HpZYt2eGXsVNo6ew4hwG7F8tcwZWNOq446DVLq6Fi6CxvW3sw4Kr8T33LZ3+AqJoWRVl89FbxREO6DRas4QnekB9Yo3w5fF5fOnT+AcaUeAowpJ8tSsmLmrYlw6BSdvMYAwDm+gm4e/BKEsqwz5kTW0YSJrO7LMvBHYvRxiG7efZ71cCmHtUIhms2Wp1GKF3ZFJ6ItXtKTHvCVqBlLyW26f282gshZMkTG6EZifMoHeMo+fCMYV/gN4ZCb8D7vIKp1/Fd1+t2FFDsy0RIvvwkdRn7VDT7w9Z1EYfVa1tg1RLwnwO9mpo+/bpG215QQJLrSmoAgVLPbJtSf6p6ryB9A061NIKxlNq7ZfRfzXq+FT0PmvjEvlVD3UqLfBd6t7CD5OzqWfsIiS104+3FgUyeYdWTRNWNj2/3W60RWzQ1QUX/8EYRvQ4VSMpru35uOIPI7NmV3vTxvd+1nQqpsKuZJmmH8Zzq5RjmHue7gGoYgNLpmr1g5fgeT5qK6FgOaWmPx9xPTabmd65Z4RH6bOZGuSQhCNbU1Ft+TmK7y2BXr+v4iAT8wM5GVutAiM5kgFBge/kBe40SZq/40TNnv6k69NSUIoOi0rdnc1WI+BZs3VUrdlARR6HvOEc7EEcQ4LR8jxq7PdIMoq2z/x0a3P99tF9IwBOFDBeDXPNAORq0JosxJr5q4KUcJ9nhSTUQQeYy3ECNwhM8Thcr6ABvU4/TNmekEURiX+VhJp+RjJTk9xd3msjY8TW0JotgUWQiiSmxUy2JyqqfN1EXAMhD2K7FjNRn81XQyscWnwta4hiEI4NB0sl9ZyEx+1ThBaHS0VbuDmPBe5Z/5iBX0HMB/JVDSYhrIRIx7lMVWM15Su43rXODG54z3kMmHMaGXgA+UCBRX5NlezcXOz/wLoiLSlVdp/tw9Y/fSt1GG94fBx7qEy7FXXRR5oJqYiYrJz6hpvjS5i08L5oEgfM4lFpGrg5h/gij2oi3XZFTjT6G1VtFcUn8lPdh/YRAR5fXlNnNh1prUBilTpdV7uuZK+TsD1wPGrzJhemDjwGoVzlur4ptJBOHEL2fUYG3+ONg6BozFmphKKssUn5a2jt4DiVhtEnKnYwL+CYv3HRpKbIkNFlRQHukrXeArze9smnLunDv3+feYFDqGGL35OEZTkzm8sKt5Se30p9Bd2ssJoooDcDqKUsfXsGVeO9VFH67mgn4JQvXFzwDy02e/C4HmBKENz+BVp8bnQhuuwE+77Wk0posqINoVyPJX/IR+VmXNZIKwY5UPfXIciP7bHqLFuQBpTDZ9O64FlV9uPDv9bHy8fWKvp9L8JdpM0ViP0g6oOFaTaijlQ8GZUFfBxN3vZqsUPjqPbxVAcijV/zl7XiGIUkjW4feJ0MCGinfTBVjKNC4UgnWQXy/d1q4l7zaskApiN/l+hM6KQ7dIuaXLTahpd5QLFja4mpZXdrHrvGhzb0+4xPzRDRmt01mD30G0dS5tJTY/RURR9RiSisqaTvaroHV+PhVm5TwCTrclnqLu04Yv8RG3yFm5CtdhGXzb6yFdVGTc+wj4CzFfkkol/lbNBd4PQeQWcLYOBlNUHTwZGJob3nSMj4gIuaa2xeLHEefC2xe+KZfCDeAoJ5fUfkZ/NdNEo/G3IEwDYKjQ0OoLtJMKsjAGOkFoQm14EYoOk4kTjnUHgPcXfs8vNH5CbXhGAS1eKIpi1ATK7yZTbRwll+BzbmVoYg41vBWTHx211zzwoS+vJABioWpdvC+tb4CfBd6zPz5OIG0d8UOJSIW0nzAqITyCLHcPDyfUIz0lPz9GFppQG3/KGsb+fh7lKjRA4zToN9SGEERJKVY5gW6HSkR9Y69td4wfL1UNwYBZ70MQhCBqFKxPOUMtGk4mVGiEKV8lwdT0ES75dphY4lcF5CZW3U5Xdxx3zT//85GWrV+6jpl7p6Rp8BOExpHtZYONg1OpNYN+poAfz2V9tGC6fHx0u+W+xr427DWvbQlvc5TTkbQeBKHzDifCcUOD/ZMhazxJyEkwwGNWONtlj5zc3l6DYH3QzxVRMfkZ6XVIo9dx+4pqWRTy2y0KpOqGkyBKxXHRTeBSL6Ll4dKH+wbuaAnzEbq4+C7B+kwCzhkb3f67bguGyytv48y0LKijnU7ULhE6fUWZnXhPgH4IVgYFU0IvgBnXp1P96oW+KZfafh0LVVv97DjLHb75i/nbXn//ud1WxoNkZT/l9hre5A5VE/JbN9ZcQleXlHluLOtfbRt1C11dD4Jw2VT5esDKxRCiKNyMC2ZZBp+eTiaUUYerH5RbuG+3dzSEIMqdPVXO5+Ke7/poSm5xaD1qW4pklJ5XvXm7xU+C8KPhwf6v6AaK5nj6IExe5PamgceDQSrC5Cm6d23VoDIRvoAZS4seDPJ46c2FIFRXcw+6mCHjVOcxOq/CWpl/GWxSKkFOYH5EqY9JxPdahGN1b4HnSesgMJ/n+taxxnNVtaVRCEK1pT3WczQzlBe1zQ+HH7IYJ7s8kkTtXfF/Z8u4wmEO/LTB5n6p1E1/dOLt8viNGr7rDMZpqVTiz448+TpwSTG27qePehCEamdHx9KYRdY6e+gPAP9gplMzm7fr1210lCOiwVjleOPclezcHgxymycT78kbi4hYGQ44vLvdMROC8LM61CmN/ridq3zqs4vqqUL35yw9d3jRjt4vgPjHji6py72nAVKEtH5OZKuT7MfzKj05ql4iO2ZosN8esbaUiikDwLAtThMxbED3TmRUF/q5h+adToS+drlBxOoRk8j5JGwIbH0clGvX1Kdj1dsWwDttj0Jpg8U1EkG47FYnoCM8T4zf556bzf2X92DCx8t5DtTj+UxV9FMMJNWTu/mnTffWmtEShsPILnGLu1UvglBQRGM96tT4Zc0Ym/JUa+4JVUCd0OYVp/VWtVXjyVFlWTY2l+NuT5sKQQRZJWqftvDymHp83b64+K1Z67Fqz9zRseQDFoXU06YuMWL0D7tMHH/pMgDq2UI/Xt32atXu6dhSUTo1J4jVBHqRJ9729Vmnr9fu/OI5lcCCxSSy532FiVQ45pTBfIctEmeRfrnRThCqPbqX2wIAWNK7v1BWeyx+EDOpN9l3CFB+PmlpudeRIJAzHMkal77uMHlM8L5APVF67dwwn6xTxdrLy5uQr3I+I+qjThWep398Lp/k9e61EIQPJOudJP/Yyk8d7/p6NUOFEr6CM5Fv6GLeODJ6kpDX/YXtXdsL7Ca1Hg3LEuiKsbDxTefbD7o8ugm82858wpPPGl8kZhXC2ys21Ssg/g6yuLjSS2kvoKOx+CdV+HVXtdHUzOrEc51pGGcr1ZjmsltrrdZIJ4hCd9RJgkN0dv4pTb+blwfI4FOc74h74atUhhHmC5h5SQnP7EIxvuVeT4JQjcvPlxNf78c5ALb3uY4oa6cVOtWtW/7gmPFDRDhtaDCh3s32jLwrBOFTatOQjBZ09r47zHwsE/bPeVluCQqn2P9ZAh4E4dqxFv6F1y5A1/YJnSedBUAd1+3H25Kvhin9ekvLS/8OwjImjjmilj5HwP3M3Bckyqxqo9cEVgM1w5EvEXEvOPdmhjpRTBzXGT+1snMSPsixKmLMP296QD62jlIP7Gw74Xj1v8isU3dX0ogEUQAuvzM+EGD1spp611idRMP535U8/gYYv4ZhXbXbPDzoN4y1UzDqic9QxOxlKMsvUmq5whjNjX2A7weMq1vC1u2ldtmFsutNEIV6p84XqwOg3WwaglGAnyI2Uiasq7KbdxjxY72lG8gTMZ/MOAFLQPRe23qhAgA+/rqxwZ0MWl0qyq69bCGIqiwZUkg1EKh0AlejDVKGICAIzA4Emjqa6+wQ0dReCkHMRqlLnwWB6UFACGJ6cC+7ViGIsqGTjIKAIBAQASGIgIBNd3IhiOmWgNQvCMweBIQgmkzWQhBNJjBpriDQxAgIQTSZ8IQgmkxg0lxBoIkREIJoMuEJQTSZwKS5gkATIyAE0WTCE4JoMoFJcwWBJkZACKLJhCcE0WQCk+YKAk2MgBBEkwlPCKLJBCbNFQSaGAEhiCYWnjRdEBAEBIFaIiAEUUt0pWxBQBAQBJoYASGIJhaeNF0QEAQEgVoiIARRS3SlbEFAEBAEmhgBIYgmFp40XRAQBASBWiIgBFFLdKVsQUAQEASaGAEhiCYWnjRdEBAEBIFaIiAEUUt0pWxBQBAQBJoYASGIJhaeNF0QEAQEgVoiIARRS3SlbEFAEBAEmhgBIYgmFp40XRAQBASBWiIgBFFLdGdp2a2d8fkG6C4AO+QheMEC7zuSTGx0QhKN9VwDxrGTfyesGlYfS0YAABFYSURBVB7sP26WQufa7e7u+BvGMnQLCHsXEjFwaDrZ/wvBShCoFQJCELVCdhaXKwQRTPitsfieIYtONbOhs0ZGVj+nyy0EEQxTSV0dBIQgqoOjlGJDQAjC33CIxRbvYnLkGwz+HIAnwpSNDQ6ufUYIwh9+kqr2CAhB1B7jWVeDEERpkXd29n7EBN8BYF4+9V+FIErjJinqi4AQRH3xnhW1CUGUFrMGIyGI0rBJijojIARRZ8BnQ3VCEKWlHJQgSpcoKQSB6iMgBFF9TGd9iUIQpYeAEERpjCTF9CMgBDH9MphxLRCCKC1SIYjSGEmK6UdACGL6ZTDjWiAEUVqkQhClMZIU04+AEMT0y8CzBQsWHP6mUCR8GBH1MvBhm9XLKIDHAbrTIuvykcHEnwBw0O4UygfhMyB6LxhvypeRVaaXxDRowroqu3mHkY0bL8/4Kb+eBJHzD8gaBwLW8QB93IbPZPstg6/cfWdOJxIJ00/73dLs1bV095BlxglY4sDqFQAPg/mX2VDoynvWr3lSV0ZbZ88hBNzqsw1TnAur4Qeh2h9mcymYegG8B8Ab822ZHEswrKt2m4cH/WLVFuv5KjHOL/TJ7rw3f/7nI5GtXjyAwCc5ZJPDi8E3WuPZVRs2/Px5n5hIsjojIARRZ8D9VtfaetS2FMl8hwBlI79ViXwmwIMcDh2fHljzuJ86JhY769sEHAMg7CPP3wGs2G0X7i+1eNSDIKLR+FYIYzmYvmFb6Dy6wQ+RgS8NrU/8NiiRKqwizBcw8xIfWGUB6odp/cfwcOL/7A2aLoJQjngG0w8B7AMg5EPWD5DBp/jByo0gorH4J8H4MUDvK1FfFkwXwrLOHh5OKKKSr4EQEIJoIGEUmtLeHo+yQT8DsGvA5m1i4Kh0sv/XXvnau+J7s0XXlVG+ycC1c8N88sBA4l9uddSaINq6l76dsuYagFoD4mOCcOH4a9uf4fM0RNHO3iMAvsofCW1pDasThUGHpdf3qZNd7psGglDtPwbgi4O2H0CWgG+PjW5/vhdWRQTBvIiIdgdwgY+NzSRgTLgt08JHbvx14qWAMpXkNURACKKG4JZTdDQWXwAmFV9nJ0d+tbg9BiAFpjEC78FAm2bib4JhHDC8fs0fdPW3x3qWMEORg/NU4rd8lJrMtSSI9vbFb2MjfBuAPYvwYTzJhDSBXmbwtgR0AtitGAe6fHx0u+WlSMIDqywDfyTQvQBMEH8IjI9qMH3ACmcPHBlY+5RqQ7Rr6cdg8YkT7bF2BOhgAHPy7XuZGbcS0Wv5/49ymH5UOBGWoWKiaCx+GpjO05wa/LbfJNDlY6PbneKGlZMgAKwGcJgNiyDj6vT0YP/3ypk3kqc2CAhB1AbXskpt7V68m5EN3w7gg7YCTGbcbIV4xYb1CUUQk19u0TDpbDC+PEX1wXwLLCxzHtknFijrVw7yMYmwJoTsCmeYh3g8HnrqGep5PejeRQDebK+bQJfNCT938sDAgNL1T/lqRRDz94lv17KZEgzs6wcfqCNGbNmHDM5eBdAnpuQBfzWdTCi1i/ZzwSpLjMuymcy3nXrz9vbDd0JozkpmPmLKgkz40fBg/1ecaq2gl9RBCaKtI34oEa1xkJZSf33fyoTPGxm54WV7xz1UdiZ7YKUhiEKxruM2X9eKvHrQrt58wMqE9nGLR1XWpJJMFSEgBFERfNXN3N7Z8y0GzrYvZGA+a7ddcYGH3l+3UxwHeNFwMqHIIPflJqWB1SBaZCtf6XxXDCf7L/XSy08QV+hnDpXOqMW8aCSVUFFb60IQmsXI9IEP8hfZlwKs7lsK399g8sLh4cQjzvZ3d3eHx7LzVgF8pB0rZj4+nUr0u2GlLmXnbPXSxQB/3pZvE1m0cGio7wF7PbUkCEVWlhG5k4CP2Op8gYiPGRpM/NJr1OZPOT8D+B22dE8bbO6XSt30R2deF4LwIxfduDWZeXE6lfB7kV/dCSilFSEgBNEgg6K1ddk8I2Kqu4PJ0wMDV2dGt/9CKVWIbncJ0I0t4eeOLezw8/caijC2LXSZgXPSyf5v+rm0jUbj70KIfgPgrZOQuZxUanGC0OED+FMVqfYqay1jTmQdTajlcp/SsQ8l+7/lHALt7b0fZINVX7eo+Zi/PpxKKGsdT0uxnKWQZakYS+/3qqemBBHrXcrM19tOMuqUeNzQYL/6W8mvo2NpzCJrnX2swOUkpCUIl3HhrFg75glnpgf7zy3ZSElQFwSEIOoCc+lK8iqBtbZJ7fqGgq60dseioC5JORPqKhzX2zp7LiLgZFvewMf5aGfPlwCoC8/Cp21jLQjC2T8AricAN7SLy+D7x8PhhRsHVv/Tnkez6P0paxj7u5mvFhGM4yRIwF3/2rrl8PvvvO7VQtpaEUR393Fzx7KvJvL3G/nq+HaYWBLASoiisd6VYF4+2TfCI8hy9/BwQlmzTX66U53fU8DESW2nGwHEbfXIeyCll4u6pRCCqBvU3hUVL+DBJnVn55J3mDCU+sAE0b0EpDjLNylTy87OI3cwkbndriIixrlDqf4zg3Rf1ZFFaIAAZaUy8TGdOJzq+4m9nBoQBLV19lxFwPGT1QJXp5P9J5Ta0dvbpWn/qxbzASOpRLKQTrvAEl08PNinyNWXn0lrR7yTiG4kYBOAe4logLPWLfYFulYEoZMRMx2dTvXdEETWqg8GkTpxbpPPN87AQU4LOY2Z65NhmN3J5E2P+qmvvaPnu0w4QwjCD1r1TyMEUX/Mi2r88H7HbPOG18Z+br985Soetdvb4x+FQXcxsKPXZC8FhX7xLH4BrtoEoSM4JnwlPdh/Yak223/v6Dj0jRbNVRZin3QjOKX2oIi5niYcySY4kGlJOtWnTndV+2pFEG0dvQcSsepjwd/hGTZood3c1k8n5ncv23FONvsbgJRzZn4vgCIrI80J4ncGbz4klbpVOcOV/Iryy4uCJTGrZwIhiHqi7VKXejgmy+FBAO+sxaLU1tmzDwHqdFEwqSxr0VBtK9rxMe5uifAiu19E9QlCc3IB7gDoiSDiY3CEgP3t/h/Ok5Sm7S+SRfsNDfXdE6SuUmlrRRDRjt4vgPjHW+rXq9FKtc+v+se5wOvUaV51CUGUksT0/i4EMb3452rPm7euB7BHvjkmMx2STvUpk9eKP42DlufbA4EmdB0IQrOYVozJZAGOHWt7e+9ebPCdALZXaQj4Jyzed2gocV/1KgVqRRBFC65GPn774ee98EoX+Erz++2LpCsPASGI8nCraq4gO+5yKq4pQQBFZBOkP34WoXoSRDWx8pJVUxKEhmwqXeArzV/OfJA8/hEQgvCPVc1SBllQy2lENRe9Yp0zj4QQOTCZvPGFQtuC9EcIAjvkcavKi3I1PUEAiZbwpiPtzpGVLvCV5i9nPkge/wgIQfjHqmYpNZYnomKyoR2EcCoVktN6R1RMONZNHaf+XukCX2n+SuUt+b0REIJogBHSTJfUTnNcnzb+rj4dfk4QGiusqhKofQjIJfUEGjqLNWJcOZTqV9GFJ79KF/hK8zfA9J3RTRCCaADx6swvyzFzbevs+SkB+wH8BIPuA4xr08k1I21dve8nK+cZvEu+u2UtsDpzXN2iEWTH74cgtKanVGxyWQ1RagwGApu5Ks/28SytZeC9BPzZAv4UhnmR3TegZncQDou1ck9ALpgXmRZXusBXmr8aMpcy3BEQgmiQ0dHe0XMFEz67pTm8riW8TXxg4JrNfpqo9RXIO0g1u6NclbyDkV/07iBgh8LCbVh8td1CSeuTAqxMJ/tP8SMHlcaPQ16tCGK6HeUQ0I9BCMLvqJqedEIQ04N7Ua2aUBLaIG9uzdXEWpri66AJtREofIRW3wzULdSGJszHywYbB6dSa5T/iK+vPdZzNDOusTmRab2DNUETA4Ul8RPSo1YEoSXTgKauygdiPLPTZVM2LH5DbQhB+BqLzZJICKJBJLWgK75HyMoFwyv4Qqi4Dj6D9WkmNKaG6qhJsD5HHQUoq61iUuV2dCz5gEUh5Z8w+YiSuv8Ym8txP4/MuIRS/705ntmvOHR37sGmKYEN4TNYnzZwoiZUR60IQmGl2WxUIVifPtxIpSeASvM3yPSdsc0Qgmgg0WrDfRN/fXgw8X2POEDU1hHvIaKrbbH/x5lpmT08hEu47yyDT08nEypkhWucIbdw38y8VBeauRYEoXzWorGeH+bfvrBJja5rCVtf9HrhTvWdDFo5VYUH9V7B8nSq3+Z1PFGsW7hvIhwzNNh/k9uQyb2f8TRWgOicUkEXnRgx4BnDKMh7EC7hvl9hpuNKhQxxCfft+ghVpQt8pfkbaPrOyKYIQTSQWF0mtnrm8yfWeOabzp2uWjQ2Z+k/CTirkgeD1INEZsg41RmtVC14Tz5tLCLi/y5+ntQ91HaNCKLgce58UElx272M0PJ0cs2Good5YvE9iUkF+psM861EXur0oTuxABgF83+1RHCxk5BUOPFQS+QHYBzt58EgneGA19sWQQgipw50eTBIPXhkZiNn6R4MopBxPIP/n+OVQvVinusmpdIFvtL8DTR9Z2RThCAaTKweT476fSbyQbKynxoaWquNU1SNJ0dLLa61IgglKg981Kr/PBjrAfoHiFtg4ZMgvL1YxPQoDOuw4fWJ//USvwdWoyDcByaVP8TgTxDwgSkkXYKEdMHwcm3J9YFeYnA2xMZnC3csQQli4sRV8ZOjqhjP51krXeArzd9g03fGNUcIogFF6nLML9lS9QYEDDqsVOTOfMTPVc5nREtWoEKJA/3jc/kkL71/LQkiRxITbzs7Xz3z0fzcCvwoDDrC7c1uZyHtsfhBzKTe8C54PPushweyYSy7ZyDxrEsGF5XZltT2MN1lEESus+2dvSdpTgV++qA2JD/IjG7/Da8Hqypd4CvN76cjkqZ8BIQgyseupjlbW4/aNhTOnMOEk5w7U03FubeSdaoDt0aql88izBcw8xIf5SvNzUNEOG1oMHFbqXcRak0Qqk8KH4pkvkOActzayocwAmNUKFNhFTKtC4lw2BT1kb7SV0D8HWRxcakHevbqju8czmI1QN26ouyRZsskiFyxrbH4ngbT5Urz5KP96jIqHWLjP1KpNfeWwrXSBb7S/KXaJ79XhoAQRGX41Ty30m2H50SOYnAviN4LxpvylSp9+EMAfpENha70+9qZs8G5xc8y4wQsKSofePz1TeidDFq9+y7mvR7vYk8pth4EUahw/j7x7VrG6BAwPs3AngB2ti2Cz73+DvVfwfipmcne7LzDCSq83HOipvlZEB2Eifci3pgv4xWA/5dAfdnxzA1B6sldoCvdP1uq3Pc5yG4y9lElBJFvIy3o7H13mPlYplzIc0f78TAx7sgSrdqQ7PtLqU1AAbtKF/hK8weVoaQPhoAQRDC8JLUgIAgIArMGASGIWSNq6aggIAgIAsEQEIIIhpekFgQEAUFg1iAgBDFrRC0dFQQEAUEgGAJCEMHwktSCgCAgCMwaBIQgZo2opaOCgCAgCARDQAgiGF6SWhAQBASBWYOAEMSsEbV0VBAQBASBYAgIQQTDS1ILAoKAIDBrEBCCmDWilo4KAoKAIBAMASGIYHhJakFAEBAEZg0CQhCzRtTSUUFAEBAEgiEgBBEML0ktCAgCgsCsQUAIYtaIWjoqCAgCgkAwBIQgguElqQUBQUAQmDUICEHMGlFLRwUBQUAQCIaAEEQwvCS1ICAICAKzBgEhiFkjaumoICAICALBEBCCCIaXpBYEBAFBYNYgIAQxa0QtHRUEBAFBIBgCQhDB8JLUgoAgIAjMGgSEIGaNqKWjgoAgIAgEQ0AIIhhekloQEAQEgVmDgBDErBG1dFQQEAQEgWAICEEEw0tSCwKCgCAwaxAQgpg1opaOCgKCgCAQDAEhiGB4SWpBQBAQBGYNAkIQs0bU0lFBQBAQBIIh8P8B94GApsa48RAAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-6"><g><path d="M 211 251 L 211 276 L 51 276 L 51 294.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 299.88 L 47.5 292.88 L 51 294.63 L 54.5 292.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-37"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 292px; margin-left: 132px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="125.5" y="286" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-9"><g><path d="M 336.61 21 L 375.39 21 C 392.29 21 406 34.43 406 51 C 406 67.57 392.29 81 375.39 81 L 336.61 81 C 319.71 81 306 67.57 306 51 C 306 34.43 319.71 21 336.61 21 Z" fill="#f27979" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Abort suspend; details logged in dmesg</div></div></div></foreignObject><image x="307" y="30" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXt8XFW1/3edmTQFlMdVeQmKr6ugcrkUmkwmiUGeBaHQMknLW1FAqCgIchW86hVU5CrCBQrigxaEJkN5aOUhyI3Na1KocqUiKoIK8ijyK6VCmmTmrB/7zEx65sw+5+wzSabJZJ1/+mlmP7/78d1r7b3WIsgnCAgCgoAgIAhoECBBRRAQBAQBQUAQ0CEgBCHzQhAQBAQBQUCLgBCETAxBQBAQBAQBIQiZA4KAICAICALmCIgEYY6VpBQEBAFBYEYhIAQxo4ZbOisICAKCgDkCQhDmWElKQUAQEARmFAJCEDNquKWzgoAgIAiYIzCtCaKpuf2rDHyltLt0a338xVO7u7uz5jCUpmxsbj+agJ+O/ZXxUH0dz+/uTv+z0jIlnyAgCNQeAo0t7V8gxuVjPSMsG+jpOq1WejptCaKhYfEuVl3uQQAf8gzGerLp4P7+znWVDpIQRDhyB7Yu2jPO9vmU42X9/elHw3NICkGg9hAQgpiiY5poTh0B0N0AZnmbSMDX+nu7vlpp04Ug/JFraDhxe2vWyOfAdAGArA0+dLA3vbZSrCWfIDCdERCCmIKj19bWFh/O7rIM4BMKzRsFYAGIFf6/zh6NHTI4eNuLlTRfCEKPWlPTcTuzVfe/APYppNggBFHJDJM8tYKAEMQUHMnm5oXvziLWTcCe+ebxL/P/0sGF5uaI6KT+ns4VlTRfCEKPWkvLgt2yHO8B8B4hiEpmluSpNQSEIKbgiCaa288BcE2xacT4hm1hY8llESq/rBaCEIKYgtNemjQFERCCmGKDkkiktkEMKwGaV5QWmHkBE/5ugR4AsFPh7xVfVgtBCEFMsWkvzZmiCAhBTLGBaWpKJdii+wBs7yiXgGfiyLXF49b64VG6G4SPjkkWFV5WC0EIQUyxaS/NmaIICEFMsYFpSrZfxoQvbWkWr6qPb5fq7r5pc9lgARVdVgtBCEFMsWkvzZmiCAhBTKGB0do+MJ010Nd5g2pmU1PHh9hyLqx3LqqfKrmsDiKIOXPOqKvfdsN8ZpwH0IcBvBlADoxnCNRtW/yDPXflTDqdzlUKnVNH/caD2GJlcNMKYFfXC60XCfgtCMuH6/lnax9MbzSpp60t9SaPhPXnOGVbenrueL6hJbWP5Rj7OJf824DwMpjXMGgpYK0n2Pe6VHeB1TFwTKa362cmbTJNo8Y9VpfrYHAHiN4PxlsKeZUx5HMABgFK18fte02MGRuaU3M86shIr7Eqza/GNT57Q0MM1ulM3ALgnQDiTl8czPEXYtyfJVq2prfzT3kBOfjzblBu/NWTZKob+SQxTgLRB5yxLcxVWLjPBv/PYE/69yb1+LXCmVdZax5gfxyg/QHs4hqbvxJTT9Q1UfYYwmOoqmxwYjafSeAFAPYa6xfwAoDVTLh2PGswb+OTWwQbi124ZRn4HcDLeXTWDwYHf/Kq6qcQRNgMreLvjcnUMUR0h2uzLLln0NxPqLk/JmGYNtWPIEZGrL3ZspcDzmIL+taBeMlAT/pXpnWqdE77LZwOov8E8DaDvEMM3MijdV8uTtjAhVyqgvszcvwRxOhYAFcUFllZdvUAgAmf3hoEcWBbate6nPU9Zl44tpEGg7IJxF9HFtcMDKSH/JJWusEXy6sgPzUmU+1E9B0AbzcYV8UMGSY+fbAn/XhQeh+CWJVo7jgK4B+FzSNVjw06dU1v5x9N2lVM48zVOJaA6cuFQ1JIdn6CLJzTvzqtnkkHEp8fQagKNmfpMgLODp8P/ATZ1in9/Z0Pm/ZLEWosPnppYb7niVv/vQTQJwZ6O3/e2NJ+YRRL6kRL+01gnOoqduygZtrOaqabNpbUGtsH7eafSHacCeLrXSBGvqzWEQQDy4nwP2aLwaldbVAXDvR2XRe2IFRiZzPM0nIGDq1gAqxji9ozqzvVaVD76SQIAq5m4Ft+5ADgaQY+S8CyahNEIpF6L2KOIWTR5iICLNydjWPxw91pdaIs+yrY4EvKiJI/lUrFnn0OF4LoUtfBxrQvQ0x8dqYnrfDXbqplBME8X40nEf04YFy99W8g4pP7e9I/N2lYY9uivSibWwFQg0l6V5ocCFeOvL7jl9au/b6yXdJ+OoKAnTsHMeumiHVuYqbTMn2d6lAZ+DW2duxNNndpPDP45Rti5o9bRHsV1lA+XYirDSGIsJGo8PfCACr10W7FIpjppExf50/cRZbbSKgxi2ZZrSGIjSBHFbCdq671DDxEoFffEDvfDSCpWZBDRDi5v6drZVC3m5oWvJOt+D3azTCvelgN0EsM3p6AZgB7aMp7zmJrfl/fikd0dWkIYgTAsIvwiv15DbAbHSmJaGmO7O/GbOuzqm/MvC0Rjik+EAAwoiQ0wPrHWJ0WXT+wesVvKhxmJ9ucQ1I71G+mtIYsNwH8GGA5blSC8GDgx6NDO56p24iibPC6fkTJ39TSvpAZN3vmhqOSZEImP3/UxsIfBmM/zRzawLDmZXpXDOraopEgLifgjBJCZ/wFFn4FpmHA/jeA5mhO4OtBfPRAT3pNhXO1pE/Bc5W+PzK0wxI/kvASBAP/R/m5OtfVtk2kpCzQ0yCud9Yf410aEv591rIOf3j1imf8+hWw/nIAPwFYmcJ8O4CAD7qw2wBGPwhHjZUtBDGepV95Xq/tgzrd5iw+eM3q9NPuUvWSRrTL6jKCKGk2PUWW/SmvqKw24M1Z+jwBl3gW33MW5w7r61v5O9+NO0u3Azjc8/s6JpyV6enq954e1Z0BMf2QgEZvHjuenTfYfcez3ro0BFFMkiPQVZyzL3GrZZQeFlZu1H0Kr5ahXCKZUmo2da9UtIxXRPvZ4dd3vEmzqVCiddF+sJXqr8Qv16tk8xH9/ekBLxZRNvjxEMTcuce9JTar7hcAlG6+8HF3zsInvPNW/eg/h/xtejQPM1xN1qt1nHbV130H6m5iC8Yq3zq/+RNA3Dlm3GXH+EJNn6ihZfGHLc7+EKADXA3LMfgLmd70d3X4auaZO9lLzHTe6OYdurxzoaFl8b4W59SB0eufbclAb9e1urryd4obb2bmDvfvBDyQtfhMb58Udtasuv8i4EytRCgEUfkmX2lO3eamToiZ3q7TdaJ3U0vHIma+xTWAkSyrAwjicbKzR/b33/FXn74Udc0l4j0xfjCrbv2ndR5mG5PtZxE5Rn/FzVAV3TUym88IuoBWE3vWtq98A85luSsv4XsDPV3ne3HxI4igk7a3j9UgiLa202YPZ19LA/QxV/2+C7yYRqk9kLXvJ+Bfx7Zi4OpMb5eSfkq+ahFEY3P7IQQotU3RX5jJqzpKJFMXgeibrkY/zxYdrFMh+hNEsJrNT/UVJG1r6sqB+ZI9dscVQY8yChfZ1wF8sqtPf0OODx4YSD9pMM+KSR5Hjufr8hQTaKWBAG/MDcnUoRY5qkx1ge98ar2yzecG3GNRoiV1AdgZI/e6FRVTpZv8ePJ5bR8Kqo35A71pZQ9R9iUSqbcjTt1gvHfLj3wvclgYdHk5ttl43X3nfzASwdX8amxuVycMJUkUP+09iFKlzNqMewBqcm1qGXtk9GNr1tz5chhmPqcfrcTiQxC+p2xd3dUgCPViiepyq10bvfELo3IpkwdjqJvX23vrBnd/qkYQLe3nEWPLKdnQFbTutZ5Onar6pCMIBv6IuHV4pnvFX4LmkJo/ddu8cgMBH3el05KY3ntysKrIXXfh5L3KLfX6kZGPBDFkM88f7EsrY9jAT6dtsOPZVq9krdM2OJf2BuvPB7tQgghr+1T7fVpcUjc2t19FwLlhk9j1OyVaOq4G85KwTVo3IDoJIso9xtzW1LtiNqn7EqUTLX5lp2DNCVPp9H2JT9dWzdNeMOGiTE/Xt93p9QSh30D9Jmk1CEJTx2s28xGDfenesMXT1NRxIFt8JwOvAfi1BTzKOb7aeyioIkGUxgoA0vXx9ScYxCqhRHP7TQAOBvivDHoUsJbr7iF8JIhQiauIpQaLnPJMkOlLb4mHop6Ql0vlvhKA3ziVl8G/HYnHD17bfduWOywAWoKIEJOlIZlqtsgxpi3eGWoPGZr7ykiaBt3aC7ukDpvDU+33KU8Q2tOUj+rADa5m8zW+rNYQRNSXUEqKUHcE7pNZ2eagMfrTnniDJk3+FLTzrQBSW9KVP+3VqukYt2T6uk4xeWWlyq4GQSSTx7zZptnKjuIjrv4EvkqKuqiqRhDJjgVE7H6gEPoqKWpfNAShvZvzK1en0uPy9VU2n4NUvH51aTZkLfnrCEI9t+7v67rYBB9NPVqC8D6bL3pl6O1d+ZRJPdq1ZyglmpQ/FdJMeYLQnFyMTpTNzSfslMPovZ5ncSY6YGgI4lcWbz66r++nm0wHrTHZcSKRcw/ifErs59FYa9EF+b6Hnbzdm14fVqfdsWetmoVpVJ3maW/Z22otQWgkjaAKq0EQqn59pEC8AsIKy6ZlmzfvsDbomWQYaNUiCB9JUr2M+Q0Y19vZ+KpKXdIX+6ghCFMpZQymxpb2LxHjsuIf1AXtP7etP+63v7hZSWLQrSUmnJ/p6boyDGv371rydxm6FtPqCIKZFpo8V41ykAnyymDar8YK1Yim5W/tdFOaIHxsH/pHZuNIEwtizUZjJEKWEUQFpwKvmEvAP2DzocXoa3PaFr91Vjb7S4D2HZsEmsViMkFMRGq9BFH+THgqEETBBkKp6N7h054hEB6FTcu4ju7PdK9QjwZCrY6LZVWLIHzuo7xdehaEVcxYOTu+ba9yGWMy5n4EwYSLMz1d34hURvmdW8kBQ/d0/A27zvsB8nusoa2ewXWUf623+xgZaSQDDUHkmOnoTF+nsugP/QwPMtSYbFe2Teoll/Opy+n+vq5PhVbgStCY7JhHxErizV9WV7BXRKmv2mmnNEHobB+i3AVoLrfVPhJ6We0liEoWXdgmpD0lVeimIqwuNal8XoJFcothuPAmZA435tUzSg+vXJmEfetBuIPZumnP3XKPhLk5McErqMIo+dVDhLphupUYR4Z1QkXoA3gtGDfmRrN3mTxUKDeUMz9pjxGmR2fvVbVo+mvQFcMkmg11vPPMJP9ESNSqh2XYCEEYDvwEJNO8RpiAUhH6ckdDEGWXvmENaWhbsIeVja92XVSX6EEnmSDUcbpk859uBOEsPn97jyD4XyHgO7nRuqv93I9E2eB1FUXNH90thVOr8uWVYVifz/SuUMZrZpbUFRwywvojBOE/3YQgwnbCSfpd9wR0oqoKk0ImgiDCLsommSDK7mmmI0EUxpsaWlJ7E9MFBBxvKFGorL7uR8I2xLB5Vml+xxAuh+OJcUHBn1fpG3p9xVk1X4eHdrxcd+8S5KwvrB9jEkSI80IhCCEI07lUtXR69dCEVR94WS0qJj3OJqL7hI2QpiBl3PXMi9a/Uo6PZUKHx+2BrmqtZXClG7zphmqCgaN62kytBOUxFIe5PNTqsvtaHk8EQWhe/JXcQYwXLxM83GnGO89M8ouKyWxUpuodhM6OoeDamdS/0T7iHTwLMPCyejIuqb163Ym8pPYucO+FuAJrGksQvmPtPNG0N+8Ptk8GQ7l+Lrp5d+cpswkY74Y33vyaDlEyuWg3G7l5IHzKx1eS9lBTRhAVvC4q3Pe4nuOW2ic0NaX2g0UPMPDWQtsjXRpHW6zjf05tQhCqTWWO8yq4Pyh7IFJBGVHxqWb6KUkQeqvN8MtlP+ASzakjAMecvujyIPCy2ksQ3md/JgNU/vS01ChN9/68klcUqi3ep3beJ7W1ShDucSi4jjgNRMrj7ha3CZ4nmw5eec+dbsePxtbaTn7vyxUgUv6w+ePEI7Bzyq18myutdlPWSBBa9yJBdYY999RYt2uNMcP6Zfq76QbvV55pfu/zXkQwxivW7X3OLq+YTEd5HOk0tg/wczVgUo32tA7/y+oyFVMhrGk0A5pdlgF8QrF9OsOirW4oF/FC03ThmYyJX5rG5kUNxLkjiSjBwD4F3FTMAZNPGXR9k4CLXInLbEI0/XiFbDrMNHZA2dt3DUEkEql/QcxS/qRa895xEYvBPsp0DjW0LnyfZcdU7ISx+BE6C/lyOwg2fgbue3Dw2Mfo/WNFP7AViEb5y9rpDdXaH2zg95bNPy4+/S6O2XjnmWl+DdH7+rzymXzlBrEiQZis08rT6C2D9Z5bo9SiM77ys87UGMoZ2U8U22Ma2U7naoOZFpsaBKn6dBeIpq42okZ/M114UcbFm7Zsw4t4qtOMnQlBqAOIkSGWjzPBMglC4w9M68LCDytTHbnGUC70lZ67Ts1dnza/5kXhqxZbH+vrW9FjOt5NLe0nMUM9XS5ezo8wcFSmt+tBdxnjnWem+XU+23Rrx69/WkNIIQjT6VBZOq1voQDPraa1aP2m+MSs1vliUj7pLXv0sP7+O9cH1RnFAdokOevTugWZLncQGqO/SBuRZsMss4LXWtkTXTPQ06n8fQUa3CWTi1pssle5YmKo6VBGELqDDhF1Dr++w8kmVuDazUtDYlpfTMx3q3CZYY4pCxEMbwORCjJU+PSSQTK58IM2xZTr8i1GbsADw7M5ZWK0Wnj2rQzd3K64f50bGT3Ma+9husH7rcMI+XV3naY+pnROOcVQznRDrjSd5qQS2YGdrm4fyUQrGfi5+w5zA+znQjnoVKJz982Ee0br+YRK3H0TaOms+Ivneh3CTRJBGLk9iTIXfJ43h7lZd6rQufz2kRJ1vrJCvfUGRLnT3kHo3M6D+IsDPen/DiGiMpfffn6CfJz15Qi4dHhox8v8yMjHXfxIgARLiZb274LxudLxpJvr4/bZQbHAFRGRRVcz4ZOuvCqOxJJMX5c7+qPzc4QNXju1ouTXER8Q7CrdsZDPh5Atj9onEkSU5R4trX5z0Ht8jFZyPrXubkNnWR0UMCgokIg+CAt3j8zGsX6bfd5Xvi5gkH8M32Qy9f4c0VICDvLg4LuRTgRB6O5ywkizsnEqU0Wo/fQJm3HuO3bHQxpLaWpqTR3EtnVjIbpfsVrfYE2a+OZq3WuDQTlS4eyN7USsfA/pYoVrCULn4lpZSxNjaXZ09Gs6S2kVF5nqRtU9SmlAGp84HwEBg5xAPrmYdZ43mlr+Ety+GoCKRz72hUk4PlKAGptHGLElOoM+P2NHtY78pI8oG7xufkXM7xPbQb/+AgKD5ZsiAYMqWfJmeTTqBWMPrCY16F9HlV9Wa0KO/hwEFbNhp0I97lCEOf8wjvQULDo+LATnBIUcDTwBTwRB+OjfFSSbVEjU/Pqwv9rfm1YhNiv+9KqPQnGEl4nxayfUpFMfv4sJ+2vsCHJBp/WACHtKyTQWotMpPx+5b8zlh9rcQBhidsKvqs/3FZMuIE0hj3Kr8SSIBp1QoCpsJnMDQCqGiQpv6/58iV9DECrWs+XS82cZ+B2BCqFoC+FkvYFuACMpLdGSmgsm5Xuo/EmxKzyu0x8bHwFhr/KJoNaFfezA6vRjE7DBlxURkSDgSFPbbLwGYBWq1fs9y0CvExrWPyzsljxCEBWv+7CMOn1gpAu3sArU7+Uvh9QuU6p/1jxz/Q8G/vmGf/krIgSC97Xk1bUzfzqL3QpQi0k/StPwE7BiJwQR0UQQhKrTJwLeWHOiuGUO6ueBbald41nc5nnqaQpNoPVxsZBE66J/h823e6SOkDry6od4jr4FxqlhBOGoI1pSpxLTdRHmjrsNgfNIQxC3EegVzm92JpbaSgIYtOO543WhanVgVIZbsaTwQ1PUDd7bxkry+0S9C5tvKi72UoayXSkcHoUgwjCr7PfxRoEzrdXHQrvECMnH1cYVieaOowD+kY+aodgER4WQy9Zd4ucLyK+t+ZMznQOC8nu/o0Gf1AT99qw4fy9IB6zKmSiCMFhIkd1NB+HBMfpKIVjUmG1DCC7ryOLPemOG++VR6pZYzr6SyFG3BG2o6iR+3ew4X6yw9hhZhdpBFIIZqbnjjZfs17QhBm7k0bovB82jMoIgLNtjVz79mRess4lZufAOcna4CcRfRxbXhF1oextZUIV9nfKbo8nYGK+LSjZ4d/sqze/cIT5P7W9s9leFrHFH5Um2dUrOsm0LpKLc5bULQhAG21YFSfT3AzCOjGVapbMJx7ASoHmuPCWX1UG+mPILY+STpAK+E32gsDgcdQHDWpmz6Aavzte0bcV0hU14HsAqCJBSbe1a2LyUA7cXAPQDlK6P2/eGEUNJmaN0NwgfLf4t6jPXYj61kP7+Ah3LzOcD9OHSTShalDoTbDx4NBRe0hTVMJsA/htgPQjL/uEeu+DxMG+umjodf08W6DNgJz7HOwtqniEAf2HQHd5xjUoQhTppbnPH++LMpzLhcEf9wnhL4TdnbAl4HITlw/X8M5PXQTqCGOjpOk2VqTbKUa47h4g7wE50Q0WACq/HwPiRnZ2VjnqI8WKn7g3rh+loME5RdiuuuaqSvvhGHOo/q7pMvdMW253luHo++55CfaEEPBEE4V4rIzlnfn8aRO8fGyNHhcZrGLR0dGjH+9QDgKjO+sqst4GyZ9gma6JaaaakJXW1Oi/1CALTHYEggpjufavl9rsOoUIQtTzQ0jdBYGsiIASxNdGvvG7XuAlBVA6j5BQEBIEgBIQgpt/8cNtkVeLnrZo9FhVTNdGWugSBCUZACGKCAa1CcY6/MdjKqlz5pPpaf2/XV6tQbUVVCEFUBJtkEgSmBgJCEFNjHAxb4TXoNHXrYVj8xCcTgph4TKVEQaBqCAhBVA3qcVekDPLqt914MzN3qNdkzHRaFMec425ABQUIQVQAmmQRBKYKAkIQU2UkzNqRNzLMfZds6wum7uXNSp6cVEIQk4OrlCoIVAUBIYiqwDxjKxGCmLFDLx2vBQSEIGphFKduH4Qgpu7YSMsEgVAEhCBCIZIE40BACGIc4ElWQUAQEARqGQEhiFoeXembICAICALjQEAIYhzgSVZBQBAQBGoZASGIWh5d6ZsgIAgIAuNAQAhiHOBJVkFAEBAEahkBIYhaHl3pmyAgCAgC40BACGIc4ElWQUAQEARqGQEhiFoeXembICAICALjQEAIYhzgSVZBQBAQBGoZASGIWh5d6ZsgIAgIAuNAQAhiHOBJVkFAEBAEahmBaUEQ4m8m+hRsa0u9aXiU7gbho8XcDByT6e36mbe0MnwZD9XX8fzu7vQ/o9dc/RzTvf3VR6z2a2xpWbBbluM9AN5T6O0GG3zoYG967UT0Psr6moj6tlYZQhAThPyBrYv2jLN9PuV4WX9/+tEJKrbiYqJM4Om+wU739lc8yJLRFwEhiImZHEIQ48SxoeHE7a1ZI58D0wUAshN5ShlP04Qgpo8ENJ5xlrx6BIQgJmZmCEGMA8empuN2ZqvufwHsMxli7DiaBiEIIYjxzJ/pnlcIYmJGUAhiHDhO9iQcR9OEIKbRHcp4xlnyigQxmXNACGIc6E5lgojSremuw5/u7Y8yVpLWDIFaWZtmvZ28VEIQ48C2VibhdN9gp3v7xzEFJasPArWyNrf2AAtBjGMEamUSTvcNdrq3fxxTULIKQUzqHBCCGAe8QhDjAG8CswpBTCCYNVJUrazNrT0cW40g8nYDuUWwsRhEHwCwjXomysDvAF7Oo7N+MDj4k1cVQOM1lEulUrFnno8dQGSfBsahAN4JIF4A/0UCfgvC8lkxvivMOKyhOTXHAj0AYCeTwfMzTnPlpca2Re9ENrfAAh3B+RdRuwKIFdLkALxAwOPM3Jkbzd61Zs2dL4fVXY1XTHPmnFEXn72hIQbrdCZuKcGV8DIYfyHG/VmiZWt6O/8EgMPaXcnv4yUI1Y/6+o0HscWnAWj14D82P4br+WdrH0xvrKSNKk9+HtJBBHzCU88mgB8joqXuOdjY3H40AT8t1seEizI9Xd82qT+RSG1DcRzFrOqi/QHs4uRzxoXXMGjp6NCO961d+/1R3RozmLdjzZjI9RXUN884KQNQV5/wC4Z1VaZ3xRo1zyabIKKsr7K2eAxR1V4Ys/lMAi8AsFdhL3TWPYDVTLh2z105k06n1d+q+lWdIJTdQCw+eikTPu3apHWdfgmgTwz0dv68saX9QmJcPpaIsGygp0st5rCPmlpTB7GNawGHhMK+TSD+OrK4ZmAgPaRLPIEEQU2t7S1s83cAOiCsYa7fs8RYmsvWXVIkUF3eKBO4gg2WGpOpdiL6DoC3m7SdgQwTnz7Yk37cJH2UNBW03ylebaKwcDqI/hPA2wzqHGLgRh6t+3IQ9rpyEi2pj4BxvcE8fImZP5PpS3c1Nrd/rAKCKI7N/4T3iZ8A4ayBnvSvvBgaEsSEry+fMTCtJ0dEt3PWPjsez9VPFUtqP4JQfd2cpcsIODtkL1Sc9wTZ1in9/Z0PG8zTCUtSVYJobO3Ym2zuAvAhwx4MMfPHLaK9GPhWFIJwTlAx61IGf9Z1GjeslgfteO74we47nvVmmAiCUCehWdu+8g0wzovetnyLGPg/y87O7++/46/VJAh1Wnz2OVwIoksraPsQE5+d6Ukvm0hpohKCOLAttWtdlpYzHIky6reOLWrPrO78vUFGSjS3qw3gisLJ0CALcmC+BBY9AcadxQxhEkSF80q7xsIIYrLWlxccR2rY5pWLGPhK+CY6lvtBi3Ofsyl291RwtaEjCNi5cxCzbgKowWRCFNJsYqbTMn2dd0TIM66kVSOIpqYF72Qrfo/LqKzY8JxiR8DK5Dc+PoCAD7omwwYw+kE4ypQgnIWyzcZrAD6jDB3Cy8T4NYOeVhucpr5ilsfJzh7p3YAb2xbtRVn+nFrszLwtEY4BsH0h0wjAqwDrH2P1WnT9wOoVv3G1gxItqQvA9M2yDTYv/v8BsNY56YnrwdwA0Ht1i4MYP5hVt/7T3d3dWW8/J0uCaGppX8iMmz2bXQ6MZ5iQIZCjFgTxh8HYT7MpbmBY8zK9KwbHNXNdmaMSRMBcLKhgsBqglxi8PQHNAPbQtPU5i635fX0rHgnqhw9eKssmUlIV6GkN2+i4AAAgAElEQVSfepQ6QfkNmmtIECHzKt8ngN8GQisYb3G1ewispAn697G6fPx2qd8nc315sPTvE7AJwIMBfXoQwN4uCXer+WLyEoQ63BEw7B5b93xw1j2QBONdmkPY77OWdfjDq1c8M1HrJ6icqhCEcwrYduPNzNzhbgwBD2QtPnPN6rTarMe+uXOPe4s1q+6/CDhTe0oNVjH5TCp+wmac+47d8ZBXlxdQ3/31cT7e716iEj2nXgKhp2y2z9K1TYHiuPOoy34RYOXOo3h3on5aTzYd3N/fmScU1zcZBKFwis2q+wUApdMufNyds/AJ7xiqH1UbNmfp8wRcUtpuurU+/uKpOmKrZNJHIQgHlyzdDuBwT13rmHBWpqer3yvdNLSk9iGmHxLQ6M1jx7PzdJKmSpdoXfTvsO37AOzsyuenxgxVowRJEI3J1DFEtMJDyC8B+Oweu3GXe847UuDz1P7GPdpVfmqoAAmiausrmVzUYpO9ynUAc4gVzOeNbN5pefH+RP1R9elvz+GjFlnXv3GH+W7NPJoyBOFp20vMdN7o5h263P1x1n3L4n0tzv1Eo3FZMtDbdW0layVqnqoQREMydahFpMQ9dRHtfOr0yzaf66frV0kCTtq+dxA+KqAHkeOOgYH0/wsAiBpbUqcS03WuduaIcFp/T9ctunwVEAQlWjquBvMSV3l/Q44PHhhIPxkyeMX2/cBNmsx0UqavU02iSSeIxub2Qwj4OYBZhcrW2aOxQwYHb3sxCNdEMnURyJGYit/zbNHBhiqa0DkdhSAak+1nEeEaz8Gja2Q2nxF0Ae2rviF8b6Cn63wvqfgcitYzcGKmt0udbrWfUn3Fs7gNoDZvAj+CKBxwVrkJjIE/wqJjgzBWKl/YfBcB/1pWl48EUa31Vbgfug1E811tWw/LOm5g9QpF4tovQDqcigTxOHI8P2jta/tTRW/Lk04QbW1t8eHsLssAPqE4ourC0h4Z/VjYaxy1yOq2eeUGAj5eMhv8JQjdBqxVFfnML2psbleSizrxFj7uH5mNI3WbR1SCmNO2+K2zstlfArSvq37j00Bz8wk75TB6r1tvyYSLMz1d36gKQbS0n0eM747VZfhYoKFh8S5WXU5timN3T37EFsoGmgSmBDHnkNQOszbjHoCaos5Fld5n03/O4txhfX0rf+duWlNTKsEWKemhqH7MMWNJpq/r+rA+JhKp9yJGvwTwDndaP4JoaulYxMzqEFN8+TZkM88f7Eur13aBn88pXd1x6VzDV2196fALOqy5O9nYvKiBYN/reWk41QjCeIwSze3nAM6hpvg9bcezrX6Sa9iYR/l90gmiuXnhu7OIdROwZ6Fh6qXBSf09nUocDv2amjo+xBarxbJFTPfZmOa2pt4Vs52FpXR36guUAHSVa8oYYeAo3akvKkEkEql/oRidzXkVzX4g5GKcO7y3d+VToUAUEiRa2m8C49SwTXoyVExlGzGQro+vP8FAVaQuam8CcDDAf2XQo4C1fKLuIUwJQiMBqTuj+QO9abWRG326+ajbuJuS7Zcx4UtbCvU/aOgq1mwK0NWTP4DtfKvSsmwpJ5IKT7fpawmimutrnPjp+jS1CCKCFNCQTDVb5Bw2tiuM8YT2JWjiTzpBFHSj6tbdOd0w8EwcuTbTTVG7AHwIwlsXCE8iy20DA+m/G61+R29evuCI8Y3+vq6LvWVEJQjTNgSl26oEkexYQMQrXe2blFdJUXEyJQjNpjMYQ9283t5bN5jWqd+QeVV9fLtUd/dNm1U5PpKesQ1Dvoyyg5WWIHQHMGZekOlLj9lPhPVNswFpCaJa60t3uPFbg359m+xNNcoBTLNPKBW7dk/R9UczxrVDEJpFWbKYwiav+r3RULXhrUtdgv9z2/rjfvuLm18zqaeYprGl/UvEuMx1+tO2eaYRhOYE6Uhpb5zCf6Pe+NvZ+KqQ+4gow2Cc1oQg9j3s5O3e9Prwne5nrQxcnentUs+gI32JZMeZIHariv4cp2xLT88dzzvzNf+cW0myuxUKfs1mPmKwL91rWpG2vRpDOa9UFPUAptqjVIBUl1vtvovQqZiqtb40G6KvFO9LEOV9mtBNdbwEwUwLTZ+rbo19pojrZEsQ1JhsX06Ek8YqZPygv6/rU6YLxVlwyY55RKxCZeZ1rBoJwudk9yfAUvEaIn6snmeOPS8EWHvSrNLAUTK5aDe27I8y40QAHyl5qeIjTUWZwCYbbAFAzR1NGbTPgrCKGStnx7ftLZ6qIw5ApOQm7dfe/zCdNdDXeUOkytSGGiLya1RZFemMm5LtNzLhk8X26VRMGrL6lcWbj+7r+6l6Bmr0+ZBnyR1ENddXU1PHgWyxei23Y3654x+w+dAokRo1fZpKBJFjpqMzfZ3qniT0q9I+o23HpBKEdpOK4C6g2OKylxNagiiPwRyKvHmCkhNiMdtEDpzC6vWstXuMeB+A38dM+7/xYuhAj1uQ8hZXlyCgLnrrhulWYhxpAF8W4LVg3GjqIsSgzLIkJgShE/PDjMH82qJ5yVOy+TSWq+K08yesr2XWzToJwivtRtBtu+v3qi692OjWclj7I/xeKoF5XIwAqAg/T5+mEkFEastE7jMRxsRJKgRhhthkEAQ1tKT2JqYLHGO7UsMls1blR1D75HeSJAinXc4TxDiWgOnLAN5s2Fhl+JVhWJ8v+ssxzBeabAoQRInO3qQ9oZ3S+CDTShCGjxbC6hOCCEOo9Pco62u8G/x480frWWlqIQgz9CaUIBznXDn7SiIcG9FdxfoCqW/xG7QVCKIImWMIl8PxxLig4GOo+MwyCNUsAV8bHtrxcq9hkNlQlKcy2ZAnWYIouWMwaY9JX00kCNNHC2H1CUGEISQEEQ0hg9RbW8UU5rvGoAuBSSph9rx1Ld/uY+3pru9FgP9MoF6bqXu0znp4bfdt/zDdEKKccCZiQ3NUT5uplaC88+KwEIkox+AvZHrTW2wqxjEYJu2fZIKYkSqmyVpfXi+2omJasNtkOh4MWnqTKkE46ogJEIHLLgU1p2bdRZuy1o56IR5ln4pKEAVDrbs0VrJ/Z+AWwLpvNE7r1nbfptx5a11jm+JZbYLw4OZcrNvIzQPhUwDN0fiSMrHCNhoOE4KYyEtq7yW09xK17FEFMGmX1GUv/IBJuaSu5voKu+MxmRSaS/VIev+wOqKsr6j7hLfu8eYP68tWJYiyJ6MVXKI1JjtOJHIsRfOfj1rF++oDFdQVBcyoA6exeM2BcCOyfH6Ay5GSJk0TgihpsxP7w84t9xBjpJccQeNiQhBtbafNHs6+lgboY2PTqMIDhHdTVm4teDTWWnzi29SU2g8WPcDAWwt1RX7mqmuv7sTuJaNKnrnqyNPnmWvJq6rJWl8NbQv2sLLx1W6D16i2HRpbFCGIKJvb2FZbQaYoWTSnqah+eNTTSuUobYu7DR+C0Dz583VmF9CHohWmskx9FqBHme0HZte9tNJrMRyFIHTPBE1djhTbmkwe82abZqvnvuqpayBZRjnhmGywygocMUttrq2ArZzWxWKwjzI1eGxoXfg+y46pJ8dj8SMmSkVh0n4F1tY0lANg7FJFtXUchnKRrcNNDeWqtb4mwmZlIqSQoH0uyvqKsk/o6hxv/ij7tTft5KuYEqm3I07dYCiX1c4XZWPQGmf5SRAatxwMXJrp7VIBYYyimen84PgZVEUZOK11bURDrShuR6JMYJMNNlE+jrkop7qJuo/STXaT9qt8OlcbzLTY1GBJlaFzVqc92Te3X0XAuVvaG83VRlNL+0nMUO5Jxi7+9a42yiUjIJqrjXL/Y85iKfPFpHUzMknrS+PWJZKb66bm9q8WYkgUh0AkiAqYYtIJIu+VdRweTMuc5/mrmHw9QBIfPdCTVqEIAz91yh/J7nI1g1W0u+I3xLCOzPSu6PZmjkIQOjE+yh2Jn8v0aj1z1bogIeocfn2Hk01eI2kIBlGsSYMGzpQgJslZn1ZKHY+zuYKKRRlRlQTWiuCs71WLrY/19a3oCZvzPi7JtQRRzfU1HjLycXYoBBE2GTS/V4MgkEwu/KBNMWUZubvrRNWdjWPxw91pFXdV9xVDJ/64LOhMgBdRnWtxE9fHinYKYTRL62O+W8XN1t0RaAjCV9fsIzb/EXHr8Ez3ir8EjV3e7oC+C1YXvltOlI40xrgl09d1ildCmmgJQtWlv0PhLw70pP87REIjr8vvSnTlfhiZEoQjRWjcfTPhntF6PqESd98EWjor/uK5XvWjn7vvMHfVeRKj7wNQ8RpKvijuvt/IH+pKOk9Esdt1Uc38jAiruL4o0dKu5rwKzrXlsBYSkTCvCqVOAId44BOCmKoE4R/bgZ8gC+f0r04r3fSYCsg/0EyhhwEE4SMFqIwbwHwhbNzq3ewD4mSvR4D04ScV+MW50Ii9qtuP2IRTdbGaCzEIjgLzN31jGftcxE8GQejiDgBwYmRnR0e/pnPfrrClutFvlgV/8omjUMEcVr66vlASszzgcYJ/wCD9XFTtSSZT788RLSXgIE/7Al3J+wUMItB/cM7+sWceUkPL4g9bnP2hX4zyCgIGPcdMn9lzd/tub8CgkOA6fu6+HWeWGil7UtaXT2wHv5jslGhdtB9se7lPSGMhiAoWV1UkCNWuwDCFwLMM9DrhKv1DVW7pXkgcAnUKq99MaZ94w0MgPAqmx/IF2v/m8wxTeSr9fKYnvdQPV91Lk0LaTflQiOrBlf3V/t60CtGpLJC1fv4dh3elITtjYHt/EH2gLLSnCgQDvMcVtEf7rHEyCEL1QXeCLPRZudV4EkSDYBoOCZcaJUZH6LSOQhCOJOQf/jZKyNHAw0Ox0T4BitTPJiFHbQB1xbJC7u50J+58VieUbWDI0VEAVsl9R1DI0SqtL2fNtKTmgkk9zHBH5VM/ja3jkNCwRfiEIEJXUnmCqhGEqjp/erOue8MA7OQIbVULaSnDUa/sVJjwvhHliuUGieoGdQ8BuHCgt0tFlwu83A7YAPJN9bj1LfjpUZePpi4qis3dxETK5XifxXy/6wml9o39ZBGEo4orj7xnAOlYknVsUftERZNTpUYlCIfo8uqVWwFqidL4fFp+AlbsBE+scb9iqLE5dR6BLtfFFffJNATG10F8mPtpcNjjDkettc3Gqzgfi93Eql1VvwHg/wDoC4WDR76HAQShfq/W+nLGNx/JUEVN9JKE79AptaEFZJmdmPFOP23woYO9aRXne9xflPUV5a5S17Dx5h9PZ6tKEKqhJvFwt3SInyDbOiVn2bYFUtGxjAnCU9cV7ueVQYCpp6dMfLpO5aPLZ0B6ZUF1Ei2pjyj32L5qo9KKsgzcnLOsr6hA5Rq1lvY1UZQJXMkGW/C4+SMfcV4H1RADN/Jo3ZcHB3/y6ngmrTdvJe1XZeQvXekcEBTxOp5DQz51WPn2rDh/zy9OuU/+0HjTrnzPkcUnz7KwZniU7gbho8XfmHB+pqfryqA2RltfcMi6jkdf8VjqGtmouOqatPVV7GsE9zRZMF0J2/4K4rTUFVxLCCJsdmt+rzpBFNugNrCRHB3LzJ8G0fvHXDM44jCvYdDS0aEd71MvZEy8uQb1XZ2s4rM3NMRgnc5kJwHaw6W62QTw3xi0ygb9aE1v55/CpAZvXWqh/P0Fpy/nA/ThUulA7yq8EE71CAKUNKVCYO7qOvW9SMBvmblT4wW17FUYaV4TTTZBFDCguc0d74szn8qEw0HYy+ViQznne4HUZSlh+XA9/yzoEriCuTuWpVKCcM/F4aw1D2Bl++IeC6cPAPoBStfH7XsjEkNJtzxj3gpgl0ICNQcfA+Na2LhT3U1EGT+/g4taX2CcwoT9veuLCNe+fVfcp+4mxntCnez15eqf4+DSAn0GjENdno4d9SbDWpmz6AZ1kHIOAKVeHIQgKlhkW40gKmirZBEEZgwCGrsZo1N9JQB5jfIqib9QSb2SZ+ojIAQx9cdIWjgDEdC4m5jQE7AbUm+Ankqd483AYar5LgtB1PwQSwe3BgKJ5vYfqrf4b3i3/QOA37DFtwysThdezoW3KNGcOgKgu4uv1bz+nkrVY5QG8H5Avc7jx2Dj+1HisCea288BcE2xzEpD9Yb3SlJMNwSEIKbbiEl7pwUCGlcRZY8V/DriY2Tnm9/rwNH7ci4IMJ1tS5T802IwpJEVIyAEUTF0klEQ8EegMZk6hojucD082MCw5mV6VwyG4EaJltQFYPqmK2+OiE7q7+lcoctbFlhI2coYWOj72Ca9SjYf0d+fHpDxFQSEIGQOCAKTgEDexbl9P4C9XcW/xEznjW7eoUvnv0qd5uN1dV9hgvIFFnflu78+zsf7vaDSORBUtho249x37I6H3FbUxTKVE0wrR1d4oxr6uQ6ZBIikyGmAgBDENBgkaeL0RCDIitp51gprneqZYwnMaARhz3IDN3oKln1syP1FoBU1MX7NoKfzKLIKV6ue8haf2G4BlzAQR3ZhT88dz09PxKXVE42AEMREIyrlCQIFBPKWza9czMAlESyb3fgZW50bGGwGjou6mB6N8ykBzjNlXGcgAkIQM3DQpctVRSCKFXWxYRVZnUe0oi7W9QoYl8Hma02jGlYVPalsqyIgBLFV4ZfKZwoCavN+5vnYAQReDNiHAPQOl8X9hFts19dvPAiExRrPAVkAfyWmHjBuGx7e4X9N4nnMlHGSfpYiIAQhM0IQEAQEAUFAi4AQhEwMQUAQEAQEASEImQOCgCAgCAgC5giIBGGOlaQUBAQBQWBGISAEMaOGWzorCAgCgoA5AkIQ5lhJSkFAEBAEZhQCQhAzarils4KAICAImCMgBGGOlaQUBAQBQWBGISAEMaOGWzorCAgCgoA5AkIQ5lhJSkFAEBAEZhQCQhAzarils4KAICAImCMgBGGOlaQUBAQBQWBGISAEMaOGWzorCAgCgoA5AkIQ5lhJSkFAEBAEZhQCQhAzarils4KAICAImCMgBGGOlaR0IdDSsmC3LMd7ALyn8OcNNvjQwd70WgFKEBAEagMBIYjaGMeq90IIouqQS4WCQNUREIKoOuS1UaEQRG2Mo/RCEAhCQAhC5kdFCAhBVASbZBIEphUCQhDTarimTmOFIKbOWEhLBIHJQkAIYrKQrfFyhSBqfICle4IAACEImQYVISAEURFskkkQmFYICEFMq+GaOo0Vgpg6YyEtEQQmCwEhiMlCtsbLFYKo8QGW7gkComKSOeBGYM6cM+rq6zcexBafBuCjAHZxfie8DMYvGNZVmd4VawBwFIIoS8t4qL6O53d3p/+pik8mU++3LfocbBwBwp4AYgCGwPwEE26xR7LL1qy582XvaKVSqdgzL1AjMc7RtHcdA98ZHdrxvrVrvz9awUjT3OaO91ngTxBwDIC9AGyzBQ/+A4E6syOjP9G1zaQ+hXd89oaGGKzTmbgFwDsBxF2Y/4UY92eJlq3p7fyTwt2kXG+aRCL1Xo7RJz39yAL8JMNambPohodXr3hG5Qsbq0rqlzzTFwGRIKbv2E1ky6mpNXUQ27gWoA8EFJwjots5a58dj+fqTS2p/Tad4WHMorh1HTMfXyAFv6o3AbRkoLfz5uIm2di2aC/K5pYDpDbWoG8dW9SeWd35e1PAGlpS+xDTDwloNMiTJcbSXLbuksHBn7xqkN6h3MZkqp2IvgPg7SZ5GMgw8emDPenHTdKrNAe2pXaNjdI1RDg2BN8sA9fNjvPFuVzuzSXj6iFz07olXW0gIARRG+NYcS8cqWGbVy5i4Ctjp9fw0h60OPc5m2J3m7ja0BEEMX3WtvgWAv4tvDonxRARTu7v6VqZTC46wCZb1b27SV4GnoFFh4eRhJJI/v48ncPAt8akBZMKnDT0FCw6fmD1it8EZVF1PPscLgTRpSGbtq6YISY+O9OTXhYmTSRaF/07bL4d4HcbdwEoH1chiAjw1V5SIYjaG9MoPaJES+oCMH1Ts1ltAvAgQC8B/DYQWsF4i6vwBwHs7ToB+/pi8hIEA/9HwDCAua7y1jPwEIFeJfC7OH96f7OnM7+32DrTJvt6APsUfssB/ARgZfL/txsLUpBSU7k+urU+/uKp3d3dWR+AQrDgxwBrXZ4L+MNg7KchkfUgPnqgJ63UcNqvqaV9ITOUJJRXV+W/HBjPMCGj+h9SxwaGNS/Tu2LQt46mBe9kK36PC6NiUiUp/I5Aj6g/MPgAAj5YejBQWJJSLe7kZBKC8IN5RvxdCGJGDLO+k8nkohab7FUAtnel2ATm80Y277TcrbtXJ9+/PYePWmRd73MqNSYIz8b9lM32We/YHQ+l0+lc8beGhhO3p/jotUQ4ydP6EQCzCn+7K2tZ5xb152N5WxbvS5xLE/CvrrzryaaD+/s785u85/PZuF9ipvNGN+/Q5b3HaGtLvWl4FEtA9J+ezX6dHc/OG+y+41lvHXPnHveW2Ky6XwDYf8tv3J2z8Ik1q9NPe9OrOjZn6fMEXFK6ifuTndOuLN0O4HBXeTlm3JWLWed5sTqwddGesZx9pa8aSghiBu8QYgcxYwc/kUhtAwu3gWi+exOFZR03sHpFfwWn00oI4nGys0f299/xV119DQ2Ld7HqckpS+VD57/T9kaEdlvhdQDc2L2og2PeOnYSd0zCdNdDXeYO3LLVJxm37/oJEVPz5ceR4/sBA+smgSdLY3H4IAT8BsHMxHQOXZnq7FHGUXCoX0v7cRXDr7NHYIYODt70YUAclkqmLQI6UV/yeZ4sO1qnMmlraT2LGTS6JMEfApcNDO17mh1VBzXgx54moVPISgpixe4TquEgQM3T4m5pSCbboPpf0kCPCaf09XbeEQaLdfIGoBDEC8PyB3rRqg+/XlGy/jAlfcidQKirLHj2sv//O9X4Z29pOmz2cfS0N0MdcG/fVmd6uz3rzJJrb1Suoa0qIMkRV5C5Dsyk/nbP4YK9U0NjSfh4xvjuWl7BsoKdLvRgL/HREyUwnZfo6FTGNfXMOSe0wazPuAahp7I/Md8PG4oGB9FBQJQ5JbLvxZmbuKEknBBE2PDX9uxBETQ9vlI2X+0dm48i1D6Y3GkBCiZaOq8G8xJU2IkGY1dfY3H40AT8tIQjCRZmerm+HtbOxuf0qAs4N2pB1m6qfBOBXn35jLpdWGlvav0CMy13lpOvj608IuBcpJqVEc7uSCg4G+K8MehSwlnvvIRqSqWaLHNLfrpDxVbL5iP7+9EAYVur3pqaOD7HFv3RLQ3IHYYJc7aYRgqjdsfXtWV5/TneDHFsH5yPGN/r7ui42hUOzGUUiCAa0p3lv/Zp6XrOZjxjsS/eGtbVsQ9ac2BuaU3Ms0AMuVVRFgY80kk7Z5t+Y7FhAxCtd7TZ+lRTWV2eD90pbEU//bW1t8eHszrcCSI3VF7EMk3ZKmumDgBDE9BmrCWtpc/PCd2cR6yY4RmnqG2HgqExvl9L3G31K7UF1udWui+BoBKFRkegq1mzgvvp3b34TgmhMdpxIxGNqNQb+yKOx1pB7gbKmlm/+/NuRePzgtd23/aOYeG5r6l0xm9QJ/V2uAtQrrN+Acb2dja+KWm+xHJ1KLSrpq7LK1GBCEEbroVYTCUHU6sgG9KupqeNAtli9ptnRkR6Af8DmQ/v704+awrHvYSdv96bXh+9k4NBCnmgEARyT6e36WVh9GoL4c5yyLT09dzwflteEIDQn/+cYuJ9AEa2vWVlBj70cUrYXceTaentXPuVqJzU2t/9X4VWSX/OfBWEVM1bOjm/b29190+awfqrf57QtfuusbPaXAO1bTM9MCzN9nXeY5C+maUx2zCNiNS75y2ohiCjw1VxaIYiaG9LwDmn0+sabrrv0REv7TWCcWgFBGKuJJpsgPH0IB888hZYw1X1F3TDdSowjDYpS7jDWgnFjbjR7V5BLj4a2BXtY2fhqt3TChiTsbkcZ3kIQBsNUu0mEIGp3bH17NgUIwljPX2sEoQbFeWIcxxIwfVljDOg3bspGJMOwPl/0hxW4sQe8Kgua8kIQM3BDCOiyEMQMnA9CEFsGvdoShHu6OYZwORxPjAv01t/ayZkl4GvDQzte7rZrmKjLdiGIGbghCEHIoLsREIIIIAhD24SJnlGO6mkztRKwGITDPG5NvNXlGPyFTG96zKZCVEwTPSJSnkJAJIgZOA8m4rSpeRIZ5ZJ6yqiYmpLtNzLhk8VpQMAD/9y2/rjf/uLm17bi1KBkctFuNnLzQPgUQHM0jhRLrLA1r8ogl9RbcQRrpGohiBoZyCjd0Jw2c8y8INOXLjFICyqzufmEnXIYvReghkK6aUkQGuvmJ5HltoGB9N+jYDqZafOuQBzX5m2uenLMdHSmr1O5E4HmVVlk2xZVTiLZcSaIlTPE/CeX1JM5tFO+bCGIKT9EE99A3WZiarhWbE0UKSRKcCFvbyf7klrjH8nIBYi3nY67DsLXwPwCiB4B42Hk+EdFFxeJROpfELOU24/WvMdZxGKwj/I8g/Ud7IbWhe+z7Nj/uuNHsMeivPzJLt+LHBaGudkoViqGchO/1qZ7iUIQ030EK2y/xu3D77OWdbjX26df8U3N7V8txJAoJpmWEoTOzxERdQ6/vsPJppHolJdWa1bdqtIAQ7yqPr5dqmjHkEik3o44dYPx3gJgkaQ2nfW7lyA0ZBfJ1YaKPIeYY8j3DpEgKlxYNZZNCKLGBtS0Ozq/O6Y+iLQbSTRnfVPmDkLdwyVa2r8Lxudc2I0FJzLBs7E5dT6BlG+ooifUHJjPHOhL/zDodB6FiDQEU3bHoPMJZVqHkh5GsrtczeBPl/RZVEwmU6Bm0whB1OzQhnZMuzGGRSzLq0qoE8AhnhqmpQSh+pBMLvygTTFlWe6OUKcCGJ0Y5n4k0bqoCbZ9p8fdt9bbbFNLxyJmx63HFiIh/uJAT/q/QyLElbn89rHUhs7dN5gv2WN3XOGOt+EZO+UM8Ow3nPxdURYESQgidCHVcgIhiFoe3ZC+Nekjj/nFWMig3NUAAASdSURBVKZE66L9YNvL9fEZIrn7nkoShIOSRgpQfx5i4HIerbvSG286wNjNV/rQq6Lg4J0dHf2azlLaCZxUN/pNAs4sidVA+N5AT9f5XmLxCxhEhBUxZC/0uigptEm5/ygtvzh3hCBm8A4hz1xn9OCrzidaUnPBpHzvjAW8KYAyBMKjYHqMwdsT0AxgjwDApq0Eofqk4iHM2mbjNQCfoemjO1Sncq6n4jwnNSFHcwz89+z4+kv8XHg3JFOHWkQqnrY75KiqUrnVeBJEg2AaBnE9mBsAUncWcU+bAgMt+UhETh3hIUfhjtgnr5hm+A4hEsQMnwD503N5VLQwWJhwj6U2HMYxhbTTmiDGSGLbV74BxnmaGN1hkORAuHLk9R2/FHK5TY0tqVOJ6ToNSYTVoX5fxxa166LJuTMnk4sOsMlWRORWm4WV3wXQUwD/x1hCkSDCMKvp34Uganp4zTsXGpt4S1FZMF0J2/4K4rS0Qmd9U07F5EKKmlpTB7FNV/mo0jSg8hNk4Zz+1Wn1DLUkzKjfCBQ86v7IvA5H3XUjj9Z92avu8qsjypgW3XfUbfuKino3FtRoihgOmk9kSTmhCAhBTCic074wamhJ7W2BPgN23HgrF9ZKveGoPxjWypxFNxSfwo7Dm+tUJghnEFOpVOzZF7EPbOt0wD4EIPX0882FER4C+FkQ/ZLZumnP3XKPBFwAB00Kmtvc8b4486lMOByEvVwuNpRzvhcIeByE5cP1/DPDaH/e+vJjytYZgH24S2U1NqZ1NHpt8W7CxEX6tJ/l0gFjBIQgjKGShIJA7SPgNbYjxg/6+7o+Vfs9lx7qEBCCkHkhCAgCRQSoMdm+nAgnFf/gNcYTqGYWAkIQM2u8pbc1jEDBS++NAJ4F8AgR3Tsr9uLP/V5UeaHQWJWX+HuqYeikaz4ICEHI1BAEagQBr98qP2M6v+56jeyi5q8RGKUbLgSEIGQ6CAI1goDOHQeYvzjQl1avkgJfV+ntYejW+viLp5pKIDUCo3RDCELmgCBQewg4/pRGd17qjm+hXqAFWWorA8G62RvbifhKAG9zobIelnXEwOoVv6k9pKRHpgiIBGGKlKQTBKYBAkFW1CWW2sofFNv7g+gDGoO9ISb+fKYnvXQadFmaOIkICEFMIrhStCCwNRCoxDLe1c5NAC0Z6O28OUwttTX6JnVWFwEhiOriLbUJAlVBIIIVdbE9yjAvYxOfMdiTfrwqjZRKpjwCQhBTfoikgYJA5Qg4RGHnUgQsBOg9AHYZK43wMpj/wMDKnBVLmwaLqrw1knO6ISAEMd1GTNorCAgCgkCVEBCCqBLQUo0gIAgIAtMNASGI6TZi0l5BQBAQBKqEgBBElYCWagQBQUAQmG4ICEFMtxGT9goCgoAgUCUEhCCqBLRUIwgIAoLAdENACGK6jZi0VxAQBASBKiEgBFEloKUaQUAQEASmGwJCENNtxKS9goAgIAhUCQEhiCoBLdUIAoKAIDDdEBCCmG4jJu0VBAQBQaBKCAhBVAloqUYQEAQEgemGgBDEdBsxaa8gIAgIAlVCQAiiSkBLNYKAICAITDcE/j8AD6rFgBlvJgAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-12"><g><path d="M 211 301 L 261 351 L 211 401 L 161 351 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 351px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="344.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-18"><g><path d="M 261 201 L 356 201.5 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-19"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 212px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="298" y="206" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-24"><g><path d="M 211 401 L 211 426 L 51 426 L 51 444.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 449.88 L 47.5 442.88 L 51 444.63 L 54.5 442.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-38"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 442px; margin-left: 132px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">no</div></div></div></foreignObject><image x="125.5" y="436" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABF9JREFUaEPtWF9oW1Uc/n43uc3UgYrI/NMxUfBBxJfaNUtSiboVKVunmUm7qtgHGfPFoi+iqDDElw3BjeGD+uJ07Zorq7M6FYbEpvlXiTIf6hBfRFlZVGZbtX+S3J/cuIwsPdecW0tuLec83vM757vf953zO79zCOus0TrjA0VorTuqHFIONVkBteSaLLhjOOWQY8maPEA51GTBHcMphxxL1uQByqH2+/o2e0xzkBh7QNgMwANgDsDXDLxenL/us3z+reJKjGlr26f7fDP3g7CXiTsBbAHgvTTXBQK+BeHYoo/H8meMGRHGZYc6OyM3l9ibBHDHFYGEdzPJ+MC2bdGroGkHQPxsDYhgTj4Hwv5M0vhSllQ0GvX8PE0xAIcB3Cgxbp6Bt7mov5zLHZ+tjZcitOTjQX2RhojRLQFmhVw0mXtyKWOiUXx7OHqTXqJjDOxoFLu8n89B8/Rnxk98U+1rTAgYBmgO4H1OABnImkvFnZOTo7/ZjesIR1q1kucDgDqczF0XWwDxrkzSmLS+yxCy9oN2aa84wS0zcySbMj4SDarsl6tn3mPmXieT2sROkVnqTqdP/ihDqDpHGcAYNH6ldROmrI/nz3sCJplvArhbBMTAkexEfFDU5w/2Roh4GECLoP8XML+wpHtP5RPDv4bDAxuKxYX2MpkHCfALkwHjnRa98LQsoTKYX2q9BYcMw7CIXW7+cN9tKJmfE3CnAMjweQv9iUSiVNvXtj16bcsCTgMUEIyZQpl3ZzLGD/V9VmIijY4w4SnBuFky+SFJQpxe2oBuu1QZCMZeY8KLy0AYX/h03p1IGH9cIUIotp2ATwTuzGqs7UylTljZVti2bn3kBq1F/1jklLUipAgx4flsMn7QDsQf7H2MiN+XJWQrAGjI573wZL2j9fMGOnv7mCt41hlY0zgnRwjoyU7Ex2wJhWK7CFi++QUO3dP1xDUb/1ocFaVpZno8mxo53ihJhEJ7bi/BkyBUDvbaNi1D6HcyqSudHvlqNQh1dOzdRHp5XLDnGuJU8UOh/uvLKH4qSPd/yhC6aIJ35CaM/GoQsq1IrMO4AU4VPxyOblws0ikQHqj/J0VI5JLfwR76l+Xy/1xyayEpNFzbThyyHPaHYocJeGa5281J26tPKBjtIaKTgvrwPx2sBBxoelKwXLFSt6aXz9jUgCstfQpk0oOuEKosu2BsPxGO2lTxjotTEN7IJOPPuUbIKlB9C2Ss7GJXV/AAZzWz2JVOjxZcI2T9UiAQ2cKa9zSAuxqVO3b9DHwPjR7Ojo98Z8W4Sqiyn/65tQ4BZD2KOGoMnKUyP1p71XCdkMWg5pHkEIBbJVjNgfhVlHA0kzHma+PXBKHqD1nEfpr23EtkDoArjybCZ6wWD39Yf8eqzqEeGiWWg6shyiFX5ZcAVw5JiORqiHLIVfklwJVDEiK5GqIcclV+CXDlkIRIroYoh1yVXwJcOSQhkqshyiFX5ZcAVw5JiORqyLpz6G8sXc9b1Ei9agAAAABJRU5ErkJggg=="/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-26"><g><path d="M 261 351 L 356 351 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-35"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 362px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="298" y="356" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-30"><g><path d="M 101 501 L 154.63 501" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 501 L 152.88 504.5 L 154.63 501 L 152.88 497.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-28"><g><rect x="1" y="451" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 501px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">All devices go into deepest D-state or F-state</div></div></div></foreignObject><image x="2" y="480" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tvXuAXEWZNv68p3suASVhUW4miq6XFQVdA5lrh8FwCxAuCT2TIAlgXEDIgqLIT8RdXcHLuqvCD4ygXBKEZKYJgkaQizg709PdEwjyIYsIiAoYSJAvhAiTmek+75c6fcnp03VO1+lbuid1/iFM16l666k69VS99V4I+tEIaAQ0AhoBjYAEAdKoaAQ0AhoBjYBGQIaAJgg9LzQCGgGNgEZAioAmCD0xNAIaAY2ARkAThJ4DGgGNgEZAI6COgD5BqGOlS2oENAIagT0KAU0Qe9Rw685qBDQCGgF1BDRBqGOlS2oENAIagT0KAU0Qe9Rw685qBDQCGgF1BMoiiM7u3q8x8O/5zdEdLcHNZw8ODibVxcgv2R7q/RIxvpP7K+PhliY+dXAw8ndnnR2h3lvBODv3d8Kq+PDAOaW2Xen3nPIx4fLE8MB/Vrqdeqqvvbt3AQE/Vxm/epJbyzJ1EejpCb9tfJLuAeGT2V4ycEoiOvCLqdvr8ntWMkG0tS05wGhKPQTgow4xtpBJ82Kx/idLFU8TRKnI1cd7miDqYxy0FLsQaASC6OgIT4OB5SBMxKORG+th/EomiI7u8AkA3QOg2dkRAr4eiw58rdQOaoIoFbn6eE8TRH2Mg5aiMQgiHA4H/vqKcTKz+W2A/qmetAwlEURPT09wPHnAKoDPzAzBJAADQCDz/0+ak4FjRkfXbC5lkmqCKAW1+nlHE0T9jIWWJI1APZ8gOkK9K8G4IKf6qiM1dEkE0d296H1JBAYJmJXuFP86/V+al+lkiojOig33ry1lgmqCKAW1+nlHE0T9jIWWpCEIIu8eteFPEB3dvRcBuC47+YjxTdPAtryLZZR+Wa0JQn/WGgGNwJ6CQD0bsvg+QVgXKQGsA2h+9rTAzAuZ8FcD9CCAfTN/L/myWhPEnvJp6H5qBDQCU4ogOjvDHWzQrwDsYymXgBeDSPUEg8YWpxlZqZfVmiD0R6MR0AjsKQhMLYLo6r2aCVfsGjxe3xLcOzw4eOuOgoUdKOmyWhPEnvJp6H5qBDQCU4YgpL4PTBfER/pvEMPc2dn3UTasC+v9y7msrnOCoDndfR8wwJ8m8KkAvR9AEMAYCI/D5Otg4mfxeGRMYFDu4FvWF0ljPmCeC9AnAByQwVY4Iv6FmIZNg38y60BORCKRlNvnJrE8E0UjLcEtZ/p1auz02CSISsu9pJ49+7ymlpZtR4OwhIlDAN6TwVj07xUAMQZumxyb8auNG28UFnS+HmFW+OLLgSOIzHPAONZWv6hnMwFPgLC6OcB3y5wzVRqbfUx4ess4LQBjGQOH28bN6gMBTzFzf2oyefeGDT97TaXOSpXp6Ai/nwP0GQJOAXAIgGkAkgA/xzDWpQy64ZGhtS+K9kKhhQclOTgM4B+t9j2cVmXyzZlz+n6BpuBpIHwaRB8CYz/n/E3BvCm5Y9/RUsZSFRM/VkzF+nzk3MWzAiafT+CFNvyyc3OICdd7fY8yWTz7oej8K+QKcmoxmPoAfBDA2zP1irXozwA9AMO8aeYBeMprrbDL4usOor0rfAoR3WUzZ827Z5DcT4gZlTthqA5mvRKENTFS5veJcJoNA0m3+GkyjWWxWP8jpRKEhWUQK8D0VdtAe0DIT5OBi2JDkd+kNX+FT2eobzEz/zQre1Y9GI2ue151bLq7z9w3hcn7AGpz2wSUShBi4X7pZeoF8F0A71KQ6a8AXRCP9v/Src+OOqhzbvhoNnG9sDdXqH87iL+BJK7LEn6xd6yPP0XfAOP8zMJb7JUkMVamkk1Xjo7e/kaxwuX8fmRP+MDAJF1XfP4iycAPW4P8lVQq9fZSCCK9iJpfJ2BphtyLif5XAJfNPIgHVBevYhXaf68EQYj6diTpagIuLN6nXWuAU85KE0RbKHyowfQ9AMd4r0s5SZ4kgy/xWiuyJZUJQr4DLVz8O7r6zgfxj2yg+L6srkeC6Jwb/iSbdBuAgxUn5hYYxulg8zx7KBAVE7b2nsWHUDK11rYIKzaJFAjfn3hrxhWy3ZjsBMhMZyVG+m9XbaC9u/cYAsSCnHWQ/FPK4HkbhiJ/ytZRCkF0dIT/AQEScpygKkuOoICrxsdmXO21AxWESwHjKgZfovgR2cTgUTOYOmN08K6XvGTL9KE/86H66gYDz8Cg0xJD/b/39aJi4Y65i/8ZJt8J8PsUXxHFHjI49TmTAsIhVvkEUcK3khUpxcDq1iBfXOrJza1v5RIEzNRFCBi3+vwmtzPTOYmRfrGpzj0VJAjq6O5bCrCwKM2eFlSHNynuiMfHZnzH67tRJoj2uX0fJtNSHx2UlUC2uBT6SAB+L6vrjSA6QuE5YBIxW7KqsywE4li+ETD+D4hbAHSB8V7bDv0ZIrwGRkcOsyJOMJ2dC9/DRvBeAIc6RjoFxotMSBDoDQbvQ0A3gJmFM4JunBibvkIy8NQR6rsWzCt2vePvhNfe3XsNARfn+gPckogOLLfv4P0ShKWO2UERhqXuyX/S+A0B9KpHn1MM/lIiGhG7qIJHqKyap227DuDzZPUT4zEGCYILMPgIAj4i2SE+RWbyxFjsrr/4bGMMzE+DjMcAQeCF88Q2FoMTrTht40ORbapfuUo5jzklTgr/S6BHRT3yvvPTAAm1Zto6sYiKqTPUu4gZYiMl1Fb2R2xeBMYjYBon8HsZaJctbEy4d7KFz6wkDuUQBAP/h4BxAHNsHdpOQMKaN55jit8nDeP4rMpOvN/Tc07r+ORbK0D0gXR95tFA9t/WHzYA9HiuLTY3xEciNzm/jI5Q+Itg+pZkw2MfVzHnDgPj47IxIdCN42PTL3EjCWWCcPo+ACjYOaY7X+BlLf7s67K6nghC6FCN5qb1lJ7M2UdM9p+mxie/4NQfz5kbfm/AtI57Qg1V8HidIFwWyhQz7jYDfJl9l56pmNpCSw4zOHkTQEfY5XNbMCUngJfZoHkqO9fZPUve0ZxM/hogoVMXzwTAp8ajEWHVlnv8EISYLxPJA65l8GfzweKnTcbF7z4YD9tVDpYa6hVLty9i1bzT9s4mg1PHjYys+1+1D0lev3g3M+b/QbDURNnoAOKn+1uCfIZsd9vevbiHYApizy6MYjf8rdYgf0dWXsyToEk3OEixLAdT2XxL32HRnQCOz5sfjLtTAePz9oVL/F5UjeoVONM6pZhiLtg3UikirA0gednw8F0v22W0qRSvcYwlCLSyObj5Yr/3YzIM0uuSerC+gjuI/EpfZabPT+6YPuBcVNtCSw43OCVOwc74dCvi0YHr3WQrRQ2dUfcLR2Q7EScB+i9zMvgtp7rSQ2XtublSIggXcAt2jlkAnLpusXPy41ldTwTR3tV7AZHlFJhdKFJgvnLmwfium67Ua8fqRRASK7CibeUmf9L4IcBC35t9XkCK58XjkefsE1OQUPMO3Lvzwqoz93eboYHbJBZ/l8TfkhK/H4Jwmk1n2n8IKe6LxyP/1/2jkpzqiK6LD/eL003uDqatOzzb4Z8jqixavwgL0B4Kn01MP7Qv+kQ4JzY8IO5x8h7nyQoSWZzvZMbiboB6dv3m70TnNV7it85Q71nMuNU+f6mISs4yEpj2+lcYuLJgd+pCEJlAc2tAdKpNJnE5elk8OiAwlN6LibJtPQtnGsnAnQ71zZjJfOroSET4VpX9VIggnkKKT3V+U3bhpKe1IqcuvwTR2Xn6/qbR9AABH7O1vZWIl8aGI0L96/q4qBrdNldC+1P8kXzE0p1jtqaOjvC7EKRBMISFT+bh+5DCIpXLvnohCNliSkT9429NX1rM4kL+8QNuBCGPjuuqKioYNNlJx021VximXWlRKlBPCQ/62MjAVySLpWq474I6LV180Dg+Mbj2z8VmpoRQnadaiUoNnqoiR5vU3t0rThJioczO49hEK060qz8OP27p3m97a/xn9tOAaihpJ+mWYjjghpN8M8D3wMSSYt+hRRJ7bbuNmYVFjK378tD7MqJn4KpEdODfVAwIhGUVAiRU2O/eBbWarMXmifi9AgShTFgybYsZTM51u8PyTRAOY5P0Bly+cZFh09W1OGSSuT7ry2aVIfwgPjxwqXOslAiiYHdUXGUk+zCVL6vrhSDausLdBllOgXtngH6DTD4hFovEVSalLOKtG0FITl3SE4BXu4V18BMTweC8jYNr/mZ/T7KrLqpmkhCYKxaqJwjZRkLlEj/bl4w6Tywq4t5HPClmWpAY6b9P/I/sdz8fkksdEwyclIgOiFD31iNdfBQv/zO75wcBEndYjzPoqSYyrxkejryqMse8ypQ7fyVm6653ECWsEQWiSxbWrSb42NFoZGO5WJRNED7MeyW4e/bDD0FY9xfJNyMAnWxjbeXNd+adwvWZ8ByS3BOPR4Q1We4pShBSyxfg2kR0QFiDuD4SXbfyZXW9EEThDpVHA2iaH43esVVlwkpMQt1OEGKnehMB52brZcnlb7E2JQYCb5rMJ4yORKL2d+XmyPDUkxaaOHPBTjrbhipBSOaI8iYivTD3BHck97+NAKEu2wjm33HAWJu9TymQ2eUj8MI1fae2/x0AwtlykpMTtXf1ribCWbbxq6pVUrG5IH4v8Ffxschl8XX2XXZJLZvnbqdLL7llBi5QVH8Ww6NcgvDTH0k/KkYQMoz8WiIKrCQkVrDxEeWKEoRkZytddJwDJJs0qpfV9UAQsoWBFYjRiUNnV++PmfCZ3MIhsWJyIZJLE8MD3y828e2/d3Wd8naTWoW11VG5v7t8YIXmyJ4qwEIC87DGUiaIUO/niWG3PPofg3csGBn5+XY//XYr61wgCXjw73u1nP7EA7e96af+9lDvFcS4etc7hSo5yXciio8xcM9Oi51VLcG9B0W0AT/tllNWttP0s8jlyN45RhKS6ewMfxwGPcjAOzLvSRebYv2R7o4VncSK113eJTUzLXKaq7q1KbnkrhhBtHf1zSdi8Y1n70SLnv5lckoMTqSbV0+CcPF9cN05FiyOhSlJlS6r64EgZIstE3wv2u2OD0ymQpHunID7AZKaVLpNTAY3UdpaJeer4bYoSNp03b1LVEGeO31Vgig4WjN+mhgZWKaisy6+IBTu/AF+FjCEI6HPh4WJoM3EsfAk6WLtZm8nbXrI6OcA3T3rAPOZajiEZRuULgA+FrkcQTgXJAlBlGMZJ9lQ5Yfy8XnqcRvYMk8QearLYpOnmgQh2dhJ1cjFZJRtgCEhY0+CkPk++PFpkFuoFL+srgeCkJm6qV482gfHuVjKCMLF0qbYGKv97rIDczFHlqqZJHcbnt7xigRRoJYhxk9iIwP/otYx71K+nZH8NfrHICVDTrPNrq7FR5hkCqcyFWdKcUpaTybdOj4+/TfFjB78iZe1DAoO2e5nhBmR7xzMBXNTThD5RgmAFB+VPvj59lXqy5YpkyB83YVUkyAqiY9zg+abICSXRn7GxK1s0YtePyCodLIUoac6QQhMJPcKBZddMiIppvNUIQjpB1vBTFq7gyAEpkX9COSTUZiD3hyk5NVO0ill7op3JJsOX4tctt3dThBlkI0dO00QhTOpYO2UkL/rCUJqIlfqbHW8V+wUogmiQkCLajx0uBIDhALVkcQSSOog6XVqkl1sTlWCyOKQiUV0SSYWkdMD322AN5HBS2NDkYfLnQFThyD8GYZUScXki1wb9gQhCd7pShAuDkzlztvs+56e1Zog4GtCljEohX4Ijl18e1ffp4isAH+Zp3imQLUThNQ6qKoqJj8mtGVg6nyV2nsWv4cm+XgYfLZLyAP7OyKG1wnxobW/LUeGtPmsVjFpFZP7LFLRvrgRhMyPQYSX3gSQ+K+/h3i6LcyveNfzsroeCKJSO1znAitbpCRWIL4uxfwNRn7pAnM32zFTcpHl6SCZrVmFIETZal5Sy5zXKnnHUSrmwgGttfW1D6YosJQYfSAr3Hb+o+CFXax9cTqkptQQpcM+W48fS5zcWNb4ktrpT1Gq5ZkTn6miYqrcJXWhP4Xs+5AShNyrt/jlstuklTmMAe711QNBWKEWCm3bXcOLuPXdaWopvaSWfcwV1Md7LSYSVWLu9CIxUlCKqaVKEIXmo/Bt5mq1RbgFzK+A6FER2K8luGW1iOHjNDEuFmiu2KJbhd+pI9R7HBir7fGLhDc5Twbmjo6u2Vxqmy4EKfV892qjYEGSXVIXBvIsaYNTTVKfKgThtBgj4G8w+dhYLLIruJ/CpJFuICRWmlKCkNl0F7uY9FyECoO8ieLunrih3i8R4zu5Or0ChIV6b7WH0/bSuSvgllfEaaIKyD2T3epVPYVUyDsSmUG/n4B9CfiDCfzeMPmWYpPHGXojS2LOxUHVD0SZICpg012YvGhXEqRKhJ4XG4VMBFzhKPeSiLLJbD7Y2vTqumwgOYtI2TwZTCJq72wGYq3BLUtVA821h8LnENMttnlUERWjJLGTL49bqSmk5FvcAxzlfI1HNe8gdrujnHRSuERu9bPgFsb/AVxj+dQJQUgu+iaYaYmqw4w0Po3LyUBiMfaGwcbJIyNrRTYvpUcSmE3JYakwpIJwBMOS8STdbPMgVnKQFIKqEoTkAly87unRbQeiWOBBWagIP/GBRFuyGEFOoizXY7tSF8rOSSLxTyhqQWivQx4fSR6LSRJqoyDMdbFJLImt5Wth9qp/qpwgpJtJn74iVgTlyf1X2h14oRpqw+Wj8q1acQ6WNK6LS0ynOlExicVhGgJYB9D8bH8YSJgTkycXSxPpFuzM7aK0q2vRR0wKPJDn5AY8ON7KYZW4+JlLSRGDyB5q+LHUxORxxWR19lMEjDNM+oxIZUrArHTf3UNrSBYmpWB9UhNaP8H6uvoWEvEaW/KiwgyHBpwRRreAeEF8OLKh2ILlEop8jGGcmIiuHcy+L9nV+QqeJsnU+Cev4G7F5M7+Xk6wSdcw7C6LUVWC9XmooVUxyJabKgQh+iOPll1usL7CSMiirQIVk2Qnq3QxWWzAXE4m0svqeiEI0ae2rvCxBpFwfrLFXafbWoLmhW5Zr6w495twGYiucoZL9rCkEfro74HxuXwsvdvK7HKnkUHX5u0IrMB1WJEYGbBn93MdJok6Riy8Iv2n5dLvxwJI9QRhTfbOcAcbVkDEfbLCiYvJYsSY2d2KccklVhKXbM1NWz5rV+3Ixk8xexu1d4V7iSzVz66x58IIoy5Oh0pE5BL1t6Rc4bLBlYX7Lhau3lKrdfeKtJoi9Wt+4h9/4b6TDL48EY2IkDG+w30z8+LESOTnxdYWld8biSCKhURxCfctzV7nxMYl3Ler5VweQch9H/zp3b0GSx6vpvCyup4Iwi23gzhJMPHy0eHIU/Y+W0naW5r+G2wFbrMnm0nvwz0un11OAeKtRxmBFYnoWrHrzfvQRD5aYhKB/uwJjQTzF11k8+ROJzqyR0a1/+wriJ4fgkjr+GXEyI+aFFw+Orzmd/Y+p5PMiMxzJDJs2T2WpTHt3RMSYSuYL4OJO5yhr9vaPrVPIDh5FRNEEqOgDQjXRV8aQhlwTS5jbT6sXMJY5Uj2pBxWWnlhlCQMckvi45EsKd1cCQmDRMIrWXIiMZYvbjJOJeL/v9D7XD3UvTIOk3QPCJ/MlnfzLPd7h+Bs3+/7EvVc0bwTbgmD3PKbZ1Lunsvgbzuy+ImMc1+OD0f+S0bieQQhifCnHIFVZZDk1lGFl9X1RBCiXx4pMe1pFMVuyS1dZQ6eYjtxj/Sm4ryXS79ppTk0cZTUTBL0PAzztPhQRCyuSo/LLjjzrj8LNp8E4YWvaP8lBqIizSrA7wRhrsNkWpQZI8LS2PDAOllnPVOaAmMgPA6mDFbmxwCaLUk5OsbEX0gMR1a6AOpCdFbp7QD/DjCezKyyIhOeiEArUnk6nsoujKJymfoy06hCylGRNTCXf7wmKUf9bm5UJnhdnyC6+s4HsfOkn3MrIMZQc9O0zzqCPVIFUo6K/ZlbemILVjtByHwffF1qqQyUxOoEzuxb9UYQol9H9oQPDCaxJj/7V9EebyLGrUy4IrdrUTBfLTHBfKYJQQ50RimOVhJdeGY9owviI/03FO1tpoBfgsiScPMOEmlEhVrLzyMW7gsTw5FVXmqM9Om4pPotAlLJjJZO71mQ2U+1LyJF6erWIF/sprpUrUhWzmecqGwVAxCbDfD/l6tT4UI0E3FUjIc9JayK+KmdKsOBiVb+rMq9m0qF2TL1TBAeBJ4R39WbnDq7+z4rORWoQCM2B/89OTbjq15xwHIEUW4WOBWJRBkXD+08+/p6JAghu0deV0n3eZiDgWVImocRkNOjFjtBZCsSag5qmvwGASJ4nTMBvAzupNvxUnVsXE54RUNrOOsvhSBEHV45imV9cFPzufXXVr/Qrb9LBZcS27hg5wlE3D/NUGkDgEjSctnMg3igmhFefcSJSopwOONjM77TtNfrIiR7zuRc1XFNtNXE/F1mXiQ5jcm+l6eJ8MXYcETk9Xa9r1DEs6BYPROES3pbGyfjxSBSPdHouudl/U+rKq3Nj1AzF6i1ne+IOR1g419HRtY+WgzPHEG4xLNXNjks1lD2d5dkNXmX1fVKENk+pPW0wbMJdCbSnqpvz/wmHJseZsL1sw7khPjYVaK5emFnqUfGaQEYyzh9IXugbRJs3pmH+o9g3JyaTN5dzFpJZYwK9aHFQ2tUiiCy9Yh7n6Zpr5+QiWMkVDHZPqfAeJFAg8LCKouxSr/sZUT9wdatbQEYy5nMLoBm2khYqINeYNB6E3Tzhmj/s6UsWJYVW8u2o0FYImljDOCXiI2RFMybkjv2Ha10NFcPTKgtFP6wwcZ5gHk8QCItsLhnSQL8HMNY10ST12eDBhZ8iz7zM6RjUqXCBCwC0Yds6kFxKvvzztzoDzBozayDUo9WkxzrnCCs4cos9CK9rbgnsasfVUzMaU533weCzGczWSH/7euSiBz8DDHuTxKt8jOniyYM8vvx6fIaAY3A1EGgIOlSBUOyTx2Upm5PNEFM3bHVPdMIlItAYbgZhTu0chvV79cPApog6mcstCQagYoikFFv/jgdIgSPEtF9zYHNv1QNASK5kyopxlJFO6UrqykCmiBqCrduTCNQOwScITyEh7zXZadTMqeTnd/3a9dT3VK1ENAEUS1kdb0agd2MgNQykfnL8ZGIsErytBSS++P4N1jYzRDo5stEQBNEmQDq1zUC9YqANCgbYJlDJycnvy6zerMsyFq39RKxCI9h92OoSCKjesVKyyVHQBOEnhkagSmMgJcXtTBrBdEomMYt02k2PwGif5L43RTzIp/CCO7ZXdMEsWePv+79HoBAJuz37fakRD66vR2gFfFo/23F1FI+6tRFGwQBTRANMlBaTI1AOQj48KLONiPCXiRM4vOcASnLkUO/21gIaIJorPHS0moEykIgz7MZ9I95HrtWMEj+AwPrUkYg8sjQ2hfLaky/3PAIaIJo+CHUHdAIaAQ0AtVBQBNEdXDVtWoENAIagYZHQBNEww+h7oBGQCOgEagOApogqoOrrlUjoBHQCDQ8ApogGn4IdQc0AhoBjUB1ENAEUR1cda0aAY2ARqDhEdAE0fBDqDugEdAIaASqg4AmiOrgqmvVCGgENAINj4AmiIYfQt0BjYBGQCNQHQQ0QVQHV12rRkAjoBFoeAQ0QTT8EOoOaAQ0AhqB6iDQkATRHur9EjFE0pP0Q1gVHx44pzoQ6Vo1Av4RKJijfqqwYiLRNgL/0QT/CsHAXYnBtX/R0VT9gKjLVgIBTRCVQFHX0XAIiKB1QTYvpRSvisUij1e6A2URhFQYfpqZ/n3WwbwuEomISKtT7unoCE+DgeUgTMSjkRvrrYPhcDjw4iZaBMJhiejAV+tNvmrIowmiGqjqOusWgba2T+1jNE98DkxfBJA0wceORiMbKy1w5Qkie1jGg5NBXvbIYOSVSsu8u+oTC+9fXzFOZja/DdA/MeHyxPDAf+4ueSTtUnv34jmA+QMC2vckjYUmiDqahVqU6iLQ2Xn6/mw0/QbAoZmWtjYaQQi5GXgGBp2WGOr/fXURq03tHaHelWBckG2t3giiPdT3r8RWCtaAJeMepNLWBFGbb0C3UgcIhEILD0pycBiAyIMgnloShFJb1m76r8n9gOb9mXgBiJeJXbUEvqfITJ4Yi90l7iYa+ukI9d4Kxtn1SxB77p2nJoiG/rS08H4QaASCcPZHEMZLL5NYPH8A4O2O3+9vCfIZg4ORv/vBod7KaoKotxHZJY8miPodGy1ZhRFoRILIQtAxd3EnTPNnjrzSKWasSIwM/KjCUNW0Ok0QNYXbV2OaIHzBpQs3MgKNTBAC985Q7yJm3AZgmm0cnjQnA8eMjq7Z3KhjowmifkdOE0T9jo2WrMIINDpB9PT0BCeSB1zL4M/aoEkR0Vmx4f61FYarZtVpgqgZ1L4bqhuCSNulpxbDxBKQdSkndklJBv4X4NU82fyT0dHb3xA9LNdRzrJnfjlwBJF5DhjHAngPgGAGvc0EPAHC6uYA312ufrenJ/y28aQxHzDPBegTtiTxSQB/IaZh0+CfzDqQE6r27QULHePhliY+NSOrZZJHMC8B8Elbe9sB/h0RrSy3X7XAb/Yx4ekt47QAjGUMHG7rh/ABeIWAp5i5PzWZvHvDhp+95jbz27rDsw3QgwD2Vfk6GDglER34hUpZrzISM1elS+pi7XZ1LfqISYEHABy8qyyvbwnuHR4cvHVHsffL+X327POagq1b2wIwljNxKO+7sZz78Gdi3J8kWrUh2v+sm2Of9U1M0j0ga34WfxSshnp6zmkdT77ZA9BpAHcD9G7HnY2Y/y8AxkMMWjProNSjXt+bk7Q8hcz//lyLzplz+n6BpuBpIHwaRB8CY79M4TEAfwboAZPMG0eHI8I6jYsDU/0Su50ghF16IDh5FRPErii7SMt6/ipAn45H+3/ZHuq9rERPauqcGz6aTVzvYhnibHc7iL+BJK6LxyNiEJUfy+kniBVgEg5w6MdNAAAgAElEQVQ1zstFST38NBm4KDYUEWaYnpPDjSDGx9FMQeOHzHxGziRPLvF2MP5jYseMazZuvHFSuVM7Z3C18bMWjxR9A4zzHaoUNzGTxFiZSjZdmd1A2AtONYIQp4jx5AGrAD4z208GXgwi1RONrnvex1j6KUrtXeFeIvpvAO9SeZGBBBMvHx2OPOUsX0mCsPxagskrQSzMZBW+sxxqnt9bJQnCxxpnCeeFnQr2lSyzWwmifW7fh8nkAQAfVezUGDOfaxAdwsC3c+8o7DDEgk0B4yoGi5112p5Z+eFRM5g6Y3TwrpdUXmnvWXwIJVNrAWpTKW8rkwLh+xNvzbjCa+GWEYQBPtckugnAMT7aHJho5fM2PhTZVuydWuDX0RH+BwSo32cfsh+V1DdgqhGE6Gx7V9+niPin9nnDzAsTI5GfFxtHv79bVlSbcBmIrvL/3WCMiS9MDEdW2Tc9lSKIjrmL/xkm37lTw/A+v/3KlE8y+PJENCJ8HPI2ZZUiiM7OviPZwNoSZBQb0svi0YEfFtswlth3pdd2G0F0di58DxvBe21OS1mBUwA/DRiJNJvyEQR8xHa62ApGDISTVAlCHI2bp227DuDzClAhvEaMxxj0J/EBSNrLvqJkd+7ZL8aLTEgQ6A0G70NAN4CZhSNFN06MTV/hRhKFunSOMdPLRFhkq0tMsBGAns+0JY7z+/ttS5SvBX4ebYyB+WmQ8RiAFIhbAHSB8d7CBYsHJ1pxmp3w0mTNnxOnEWbeiwinANgng8MEwOsB4285XAz6UXxo7W+Vvh6PQtVSMYkmOzvDH4dBDzLwjqwITPhKYnjgm+XK7Xzf5WI8Bdtctt4hPgyMj0tOfVsZxvxEdO1otm5LHTT51goQfSD9N/NoIPtv6w8bdqqKdoU/YXNDfCQiNj+5x+M7s1S3DDwivjNLNPB7mfAJm0rHXpVU9dfRFV4OMuakC7LoV+bf1v8/CxjipJ/5mZ9tadrrOqeKryMUngMmoa50fndJgJ8D0SiYxgF+JwhzJfKJ+f7l+HDkv3YXSewWghCLQcte225j5j77SBHwYNLg8zcMRcRinXuE7s5obvoPgqV2KNz9e58gqCMU/iKYvpX/Lj9tMi5+98F42KmL9GjP0+7c0pvvoAjDutfIPsIU8W4zwJc5+yXmbltoyWEGJ28C6Ii8d8BfSkQj37PjkP235LLVXkyqcknb0wu5rFOGTX+NFHu0JWSsBX7t3Yt7CKbYMGQtdFIMfKs1yN+R3QPNmRt+b9CkG5xYe13YNvoltef4K5yiZXPJ62+Wzry5Sdx3iLuz7Go4mDLwaclchjgZ7EjSFwi4Ml9dTHe0BDefPTg4KBbvgsfvJbV1WT+5/0omfCb/m8EN5sTkv7ncSVF7qLeTGMIkOF9jQXRdfLj/YrdFuJQ7TxcCs77N5OTk150yiu/zhU34pEHGjxynDaE1WVyN06HKfNgtBNHWFT7WILrHvtsgxk/Y5Is9dP0uC5W367uLiuEhpLgvHo/8Xw+QqD0UPpuYxBEvt2gR4ZzY8ID9eJ+rQrJrTIH5ypkH47teF2KZi+wfArzUJs8LSPG8eDzynFNGD4KQHunt73d0hN+PgIV9NtyE+PlPKYPnyT76WuHX3t17DQHiI00/RT5aUUQQcvMO3A1Qj20Bc72wnSoEIVXRKF6UqiwK2TLt3b3HEPBLAM2Zv6mY1FJHV/hykLUhyz4vs0Hz3EKD+CWIzs6+j7LBv7bvzBm4KhEd+LdiO+1QKPzOJOMX+epfHg2gaX40esdWGT5+CcKFwCz1eGIkIlTqrneMR/aEDwwmscY+p8WdhDkxebKXMYafcfVTtuYE4XLJpgSAOHk0TXv9BgLOzeuk++6JOkJ914J5ha28kqoou0y1d/eKk4vYEWUejk204kSn3r6tbckBRlPqofzdibeqyN6HzKllvRUMLNs48PVYdOBrygTB/OX4SESEQfe85G7vXtxGMO9zWPasiEcHrne0VRP8Dj9u6d5ve2v8Z/bTgKo1UUd3+ATAIjxrEfO6sNUE4WdpsKwFP0+MXadYxVOK7FtgprMSI/23yyTwSxAdXX3ng9juHKhCXLmmJXc4fzKDyblud4x+CUJCYL5URZlNnCBAYYklnt1mylxzgujuXvS+JAKDBMwqpfOy3YNb8CyhhgiYJIAW+uoM0O4nANnkldQxsVO/eVIiOiDIIPd0hvoWM1sXh1kVmOsJwO0zLayDn5gIBudtHFyzS0cOwOUEofyRSHc4kh1orfCT7Yi9FhQ7fm09C2caycCDAIm7iccZ9FQTmdcMD0deVSDWipieysazmncQ6U3W/ncACOe2LcAzPBmYW0mHOUkfIi3BLWe6qYpsOFBHd++tAOYB/Be27hOM1fZ7CDtmfgmis7PvODZ4IQChlp1JjJtiIwNfUaU/yanYcx74Joju3q8x8O+2sVHaAOetJ446xF1ZLUyZnRjWnCDau8KnENFd2YXUr4me7ONwIwhnWyA8hyT3xOORv6pOJll7xPimY0JSe3fvTfaTDQO3JKIDy4vt5u1ySMjzTZP5hNGRSNReTkYQ5HLacOunRH1QoAaoKX5dvauJcJZ9wat0xNKpcoIQCrh2B14A/hikZGh4+K6XVed2sXLtXX0LiXidrVxRFWaxOmW/+yWIUtrI21QU+sZUjCCkm50Swpe3dYW7DaJfAdhbyO53nSwXo+z7NSeIzq7eq5lwxa4O+GdG1aOvsy1xCf73vVpOf+KB2970A2B7qPcKYlztJnN395n7pjB5n12vyYRLE8MDwnxO+enqOuXtJrUKq4ejci8xXRAf6b+hCEFIicSr4fTOOzhkP10x04LESL9QPVlPrfCz2io8gYk/jzFwz04rlFUtwb0Hy3UEmyoEITcTlZ82lSefpKDkBClKCSvD34LxIzMZXF+JE8tUIgiZhsT5XamMieT7lGouVOoqp0ytCaJg5yMup2MjA//ipxPtXX3ziVgspK7x2aUnDad5mnKjBWZueZdakkkhar4fIF+hmBncRMDxdisjyWlFpmLy1KHKullsp1NL/IR8sjsYh9yWVz0x+jlAd886wHxG1fM8W8+UJogqXFJbJ5WCO7iC2fQSCOuZsa41uFe0FBKvBUFYprWpNw9npoXE6APhEFtPKnaCcO78ARSaUiusOxKTbKiqXRWqVy5SU4IotiipSl2gQ5Rcnvl2xlFtPF0u7zjv1xnLV1OSvkkWuv8xeMeCkZGfb1etu5jqrJb4ZWXu6lp8hEmmuHC2m+G6dUn0dT2ZdOv4+PTfqHiETxWCkJ1YnafjIqbQbpgWqKmEpVjTON1BjBMV5paw798Ixo+LhUCx11Upgsjm0jCNlkPB/C4QzyHwEY6wFrJuVIwg2rt7FxBQcYdFIfTuSKSkCUJh1kuK1BdBlLh7LPAWtZHR7iAIgbOIyRVImd8nwmk+PHeFU+DNQUpe7aWDnzoEUWDoUZDlrFIEIcbEf9gY64sRMbMSDOMLiejaDV53ceUQhOV7kcIZYPqcw6HWz5etCcIFLU0QfqbRrrKaIErDLftW0QtViyhM8xIChG+IxANcKsAmMnhpbCjysOzXqUIQEjWG2F3m3XlVkiCyWGYXY2J8MRPLTCVkTVIYUIyPzfiO2ymvFILIJFLq3WmqfQ2Ad/qYjkkwhFpMnFKz/h2aIDRBVO+I5tdszsdklhYtEs1VqfpM9MsIQCdnX7AfYSulDlQSxrsQtfcsfg9N8vEw+GyXcA72GrbAME6QhcuYKgQhMT8tuMCsBkHYQbZUTztoLkFEX8ZxLmEssq94euv7JQgrLMter38TjM8XOWWOAfwSgzYaoGHi5P/s2LHfM8FpWw93RPitJkEU3QxV4BupWhU1PUFYx1VH/tlSEoAX7KAkenqZ81UpF+IqyEti46RKsVxQaUuUcX78Iom9Xxv4YhZTtcRPtd+inFgcWltf+2CKAksll43pqly8sKcCQVjqngDWATQ/h1sJ5tt+MFcoS11diw8ykZoPwr8ANFsSmdnVT8cvQbR3hfuIaLXtBCBETIL4YTbpDpMovldw2gtuF+Z+N3R+/CCcBjQE/A0mHxuLRXbFllIAtF6K1JwgCkxGS9CfF3hCunh4dnb1/jgvXksJbakMlPAcpabUEAEflO3GVerwU0ay0HmGMpDVLbG8KtiF1go/P313lKWOUK/YvYrFIqeGciPMqUAQnZ3hDjYs+/hswEFhI+/b56YMzIu+auV2MVOr80OgwHXT5IcgMuFV7hWR522CbCKTz4jFIvGiwgGoJkHIN4vVibSr0tdyy9SeIJwmqoDfxa3AKc3tFCJxyd9CJs2Lxfqf9AFcNtyE8Fp9SUSZZDYfbG16dV3Wo1SmrgH4PqSwyE8eiQzR3E/AvjuP7n8wgd8bJt/i3H1IFjrfrvgqTnC1ws8K+87myWDqEIcEBmKtwS1LFTx2rWFsD4XPIaZbbGMqVRk0OkG4xPgRZpSnxqMRQRoVe9Kh1w2hfpwLmCL8SyAA8yTVnBNtcxd9wDADIuJpLn+EmxWOH4KQ3L+kvOKjyQCROABWTMU0u2fJO5qTyV8DJJJcpR+FuGJOOUXECNPge3eG+REBDh8n4Flivn5kJPJCxQZZoaKaE0RHR/hdCNIgGO8vZbctdd5xO0GUEdQrK5skLorYsV2biA6IvBK5p6O79yIA19n+9IbBxskjI2uHFcbBKtIZ6j2LGSJEQfbyT+ocI9cve0fMtMsgi4cFFL5fTlA0P/ipkJUXhqo7wkYnCGno7SqdiiXfacpPzgk/d1h+CEISR8n3BlMSn61iBJGOflwQ/81v2B2Z/4nffqouO57lak4QZQIod9xxIQhLX2tgDYhOtaGwBcQL4sMRYXrn+bjkAB5jGCcmomsH7S/LUkEK2/TxVg6rJOTJeE4KL2Z7KOLHUhOTxzmjOLpcQI6ZzKeOjkREek3PRxJNV7oLrRV+8vhc6jGzCggGkDoONjJBuOQWqFooaKmfDFH/+FvTl6r4nEg3gkyLEiP9IsxO3uOHIJynWr86/q6uxSGTzPV2FR2AShIEZNGqAfXAnelESKY4Edqs9/iuluDenyrFEbHYeuD1++4gCLjk1R1MBrHkkcHIKy4CZ9MeClVCNvx2uqhHlEnZYAkdtUKcH3l7zPeIvNkS1ZHQh38PDJGcxvbQbS1B80Kv3NZWtjaDri2Ib89YkRgZsEettOp1s1BR6Zc0C5fHLrQW+MlPNFAicnnIb0iDyklw8x2iRPVjq1SwPstip3XrMhCJsC2OlJrqi46q3PZykvAnqlFJC0J+e8USchKELHpAVq6CKAriB8Uoxpn7mzsljpivk0nHxWL9j8hw8hu00GVjlSLgqvGxGVd7EaxLVkXlzV8p41x3BOGVhEaWl9k9EUmmax4E4XIKEC9uBfNlMHGHc7H3yCHruWi5nALEDH6UEVghcxhqC4UPJSYR6C8X5jvNee6njyImjCKK6SUzD+IBeyiKTOrIPqEPdYT59lSF1Qo/l53dq8z0+ckd0wdkH5XAzmCsciRbcv2YZPphhTwkJX1zpRKEIIRgcPs/UBN/mMg8FQyRe7rAD8TP6bSkDriHP3FNeiPaEd8ONU1+qyC5F+EH8eGBS2UOcwW5QICnkOJTZblQXOJDjQlfC07xtbI7P3Fp3mTylxksQvpI8957hZeX3cXBME6PD62NuWErPwVYbLbeYHxxZCTyB8e7Hvneq7sRqEeC8E5jCbzEQNRKGeieynBXv4rEqXfJ9JZ9fwyEx8H0u/QfzI+5mOmJSJZfSAxHVnoB6pFmUKz6r4ExBNCrVupME0c5YsJkGe95GOZp8aFIRqb8FmWX1EJwAE25kva2RErDdK5qZ1J3pR1hjfBzOYFZPdoO8O8AI2NcYPVHWLEcUDgW7h+T3JggWz9Z4cEJ5tdi0chtpS6quZ1uqPdLxBC5Oarw+MuRXo4AcnWJVWN+2kwxn5nbABJ3i85F2DMHi2QBztSPTQAJQhpqbpr22Yx6xSs+lOo8eZWBHbaUAyLOkVT9JQSRRD4Wfxae4q8ANA4yn0uNJ890qoJd0rVmhyO3xnmmBCbEg0guqmSUXj/zYbeomLICumRSKyb/dgJWMoS9Nfa1CiskMkmrIuhGAML70u/jK4F4ecnU6XkYdIZXXmSZLp1AVzJYxMRXiWMk+u+rT7XAr8T5kB1LkaJ0dWuQL/ZS57V39V5AZBkTSL2AvdQbfiaN5ATh53W3ssJa7U5OmhcWyYZYibaydcgyK/qp/0k2qNctm5yoSK5ytjeRn/HNZ3woh6w8zMHAMmPS/IpdpeuV07uz8/T9TaPpAQI+Juu4l/qsMxQ+iZnEhiO9Vvl6arcRcBNrtxKEEMqfyzw/TaaxLGWYZp4npAJBONr6rt38zmvMRLo/Jl4+Ohx5ys/YZo7a36A0keXfmcgrkuaSlhV1u2ylYOA1Slr256Fq9Mk2VlXDL9PGBTt3oVcBmKGIucjvcZlTrSZ7V4GEVJPieIpWYYJQjmukiJfvYp2dfUeywTcX5HN2r0mEav8xTzZ9dXT09jeKNOhJQrIFOKN2vpqAC93URo42/8iMK2YdzOuE6rXg1FLEGixzihAZ8WRhXzzvsiwVF/N3mXmRoqzbQfwNJHGdHzN534Oq8MJuJ4isjGLAJ1J0GjN/Ni/6oqUq4Q0MWjk5NuNXQhetEs3Vq++Wnrd1a1sAxnImswugmbZFXBxTX2DQehN084Zo/7N+kv4427XUM+O0AIxlnM4DfaBt97p5Zx7qP4Jxs5/ol17WOFZEy1dwAjMuAqg7o1ZKgfEiDPzKZP7JaDTyWFl9qgF+YoxaWrYdDcISyRhZIRSIjZEUzJuSO/YdVbGsyY5NGiNrrl0K0GH5qjfv/MQK35RVpEyCEPPibwBFGfjlZCsPqVjCqcpWRjma0933gSDz2Uw43lKPMvbL1GepXEjcHxBWj7fwL/zKnL5TIpHe95MO9aHrAmw55aVSnwHRSUg7qmbVqOKE/OedDnUPMGjNrINSj9rv5CQm3EXN0tMOgOblYJwEsjJi5k6hKqG4hZ9ToCnVx+A+gP7R1seMuoqfAIxbWoLmfV6n4DLGz/erdUMQviXfg1+opbnmHgyz7rpGYI9HQBNEA04BTRANOGhaZI1AAyKgCaIBB00TRAMOmhZZI9CACGiCaMBB0wTRgIOmRdYINCACmiAacNA0QTTgoGmRNQINiIAmiAYcNE0QDThoWmSNQAMioAmiAQdNE0QDDpoWWSPQgAhogmjAQdME0YCDpkXWCDQgApogGnDQNEE04KBpkTUCDYiAJogGHDRNEA04aFpkjUADIqAJogEHTYusEdAIaARqgYAmiFqgrNvQCGgENAINiIAmiAYcNC2yRkAjoBGoBQKaIGqBsm5DI6AR0Ag0IAKaIBpw0LTIGgGNgEagFghogqgFyroNjYBGQCPQgAhogmjAQdMiawQ0AhqBWiCgCaIWKOs2NAIaAY1AAyKgCaIBB02LrBHQCGgEaoGAJohaoKzb0AhoBDQCDYiAJogGHDQtskZAI6ARqAUCmiBqgbJuQyOgEdAINCACmiAacNC0yBoBjYBGoBYIaIKoBcq6DY2ARkAj0IAIaIJowEHTImsENAIagVogoAmiFijv4W10hHpvBePsasDAhMsTwwP/WY26dZ0agT0dAU0Qe/oMqEH/NUG4gxwOhwMvbqJFIByWiA58tQbD4auJepfPV2d0Yd8IaILwDZl+wS8CmiCkiFF79+I5gPkDAtpBWBUfHjjHL7ZVLF/v8lWx67rqLAKaIPRcqDoCmiAKIW4P9f0rMX8fQMD6tc4Iot7lq/qk1Q1kpqUGQiNQZQQKCILxcEsTnzo4GPl7lZuu2+rbQ71fIsZ3cgLWHUHUt3x1O7BTTDB9gphiA1qP3dEEITtB1PcCXO8EVo/zfCrKpAliKo5qnfVJE4QmiDqbklocRQQ0QSgCpYuVjoAmCE0Qpc8e/ebuREATxO5Efw9pWxOEJog9ZKpPuW5qgqjDIZ09+7ymlpZtR7PBwuxxLoADc9YuwGYCngBh9XgL/2LjQ5FtKl3o6Qm/bXyS7gHhk5nyfwxSMjQ8fNfLbaHwoYZ1YUrzAEwD4TUwb2DQysmxGb/auPHGSZU23Mo0MkEIP4CXNuNQmMZygI8DcIiFUfrZDvALgPEQg9bMOij1aCQSSSnj4AWqwkW+mCetrdtmMyHM4KNAOASM/WzVjgH8ErExAsaa8fHpv/EaS1/WZgryCTmseZc05gPmuQB9AsABGfmSAP5CTMOmwT+ZdSAnvLArZ/7pd0tHQBNE6dhV/M2OjvA0GFgOon8D8E6FBsYY+DFPNn11dPT2N7zKywgCKT4KAToNwHdti15+NUwXxEf6b1CQxbVIoxJERyh8FJiuA/BRtf7z02TgothQ5DcA2PlOpRbgEuZJVpRXAVwy8yAekC3GlZJPNGbJGMQKMAnnv7cXx88bu+Lv6xLVQEATRDVQLaHOI3vCBzYlaTUDx5bw+pNsUG9iqP/3bu/KCIKAaxn4tis5AH9KGTxvw1DkTyXIlHulAQmC2rvCvUR0iwc2bpCkCLhqfGzG1c7deiUW4LaehTONZOBOgNpKHJMUgW4cH5t+STXkEzK19yw+hJKptSXImALh+xNvzbii3FNridjo1xwIaIKogynR2bnwPWwE7wVwaIE4lroHQwC9yuB9COgGMFMi9iaDjVNHRtY+KuuShCAmAIzbdndbGHiYQG8CZjtA/wSilfHh/otlu2E/sDUaQXSEwnPA9AsA+zv6uRlATIyF+DuB38vCC7pwhzzBzMsSI5F++/sdXeHlIGNO+m/8cQCZf1v//yxgiJNH5md+tqVpr+sGB2/dkf3T7GPC01t2UESyiRBqrVcAbMjKBrA4gXbaVDp2USaYaUlipP+uSson6vKYyykwXmRCgkBveM9lunFibPoKTRJ+vrLqlNUEUR1clWtN62jpTgDHO156kgkXJIYHYs4FWtwZENNNVoiG/OdJM5icPzp410tOASQEkS0idpTXcMq8Mh6PjGX/eOTcxbNgpCYfGYyIhaesp5EIoqfnnNbx5Ju3A7Qwt1YDz5BhLI4PrX3cORbiHqC5desyEAmvaJsqhWMTrTjR7Y6oFD+DgnfSAt6dMvhSt1NeV1f4QymilQQcnT+IfB9SWGQfc/vvpcjnQmApZtxtBvgyiYzUFlpymMHJmwA6wtZ+isFfSkQj3ytr4umXy0ZAE0TZEJZXQXtX7wVEEHrudMiF9DMw0crneV1AWwvTXq9/E4zP571L+EF8eOBS50LmRhAM3DI5NuP8au7WGokg2uf2fZhM/jWAgzJjsYVMmheL9T/pNdLtXeE+IloNoDlTboKBkxLRgYdk7/ldgNvalhxgNKVEXbn7ECLqH39r+tJiY2fdBwRoFYCwjfReDCLVE42ue74S8ok6JASWAvOVMw/Gd70uoDMX2T8EeKlNlheQ4nnxeOS58r4w/XY5CGiCKAe9Mt8VO67mHbgXIKEKsB4GEubE5MkbNvzstWLVW9ZOe227jZn7bGU3GZw6bmRk3f/a33chiDfI5BNisUi8WFvl/O5L967WUM4CS624eqn2rr75RCzUS2nCVrTWSS/CWAfQ/GxrxPhmbGTgK5VYgNu7e48h4Jc2AlIirmzbbV3hboPoVzsNEvbO/O11Mum4WKz/kUrIJyMwQF1VNGfO6fsZzU3r7adiAr4eiw58TX30dMlKI6AJotKI+qhP8tFPAHxqPBoRH7LS09nZ91E2rB1vTl8uy5EgJwgeDaBpfjR6x1alxkos1FAE0d27gICf7+qqOkbtod4riCHubF4C6HEG35OIDgiyKXj8niA6O8MfNw06l8BHAPQeIoo2BzafNTg4KMxFiz6h0MKDkhwcBvCP2cIMnFIx+UJ9i5n5p7bTrO8TQGdBHfzERDA4b+Pgmr8V7aAuUBUENEFUBVa1Sju7eq9mwhWlLEbZd3p6eoLjyf3vsKsPAF7fEtw7bL/glBEEM36aGBlYVu4ldLHeNhJBdHb2HckGPwBgRqZfrlZJxfrt9btfgiinLfFulQmC2rt7xZ3YuTbyuSURHVjuZ251dy96XxKBQQJmZep502Q+YXQkEi23//r90hDQBFEabmW/dfhxS/d+21vjP7NbpDBwbSI6cInfyju6+s4H8Y9s7xWoYKQEUaNsbI1EEDK1XxpXfhqEmzkQiCQG1/7Fz8InG8+pRBDd3Wfum8LkfXazViZcmhgeEBf3yk9X1ylvN6lVnLiOyr1UAT8cZQF0wQIENEHspkkxu2fJO5qTyV8DdHi5H4NEv7zVBB87Go1s3HXSKPCkBjOdlRjpv73aEDTSJbXAojPUexYzbnUYDuyCKW16/AADayZbeUjVm92Ocy0IIu1p/doHTQROAfGnLNNlmzFEpVRMkp2/6Or9AAkiVX4Y3ERpa76Dsy953eMoV6wLloyAJoiSoSvvRb9Hfq/W2rrDsw3QgwD2zZRTIwgPHXR5vct/u1YEIcNUoR8Fpy3r8n/atmsYfJ4rSeyqOJU5XaxKUmDtI0NrX1Ros9Dip/R8EDS7Z8l+zank+9jEew2ywln8MwNi45ENayEVqVIEIZl/KhColSkdF7X6dSlPBDRB7KYJUmWCENZQeReQUhWTJggx+lKLKCsG08vUu5N0r1EMe5KdSU8S0ZXvOtBc72XaWc4JQhBY07TXTyDGF0E4sgRvb0tWTRC76eNvoGY1QeymwaoyQRRc7mmCcB1oT5PZ3GIMXA5AhLcIKkyZFAOrW4N8sVvWvBIJgjrnho9mE9dn1EUKolhFLC9mkHWiyAYa1AShit4eXE4TxG4a/CoThFYxqY+rsk+F8LLekXyrmwiLwDjZJeRJrmUCrWwObr5YZopaAkFQR3fvhZ6BFdMtC7PXTQA2glj4t/xPS2DvJ1KpN/atlpmriopTfTh0yXpCQBPEbhqNSrsncxgAAAbTSURBVF5SO/0pCPgbTD42FouI0BDWsyecIGo9lOkxNI/eGfL70xnLm9zuPCOLqzObX4Jo717cRjDvs90zpU8G4N8SjFtgYqi52fyz24nF74bEj3zCRwMGPcjAOzL9TjHTgsRIv5BXPw2MgCaI3TR4mZg/EYDETtR6iPGT2MjAv/gVqT3U+3li5OLWMPAMTwbmjo6uEcHlNEH4BbSE8m1tn9qHgpPXE+Es++tu5p5+FuC0r8sBqwA+01b3doCWx6P9Io5XQWhxZxeqSRDCi5qaUkMEfDDbrsxZswRY9Su7GQFNELtxAHa7o9wUu6Qudygz4TJOyCROamdgf2Y+U9VRyzpRpFL3gq2L4wzrY1V8eEAkfsp7/BCEzIyUgasS0QGRN6QoOYiGJQ6AFbuDkG12AO9ggLKxyhDN/QTsS8AfTOD3hsm32E/C5Y6xft8fApog/OFV0dKyUBuyMMxejcpMDFVDbXhZsVSyo7Uycy1X5ko4Exb01cVM0w9BSPxcPAMBynDo6O69CLCCQuaeSlkxiQol9b9hsHHyyMhaEd5D6ZH4n/jup1JDupAyApoglKGqfMEqBeuT6r31HYTa+DlPdX6CJ7oQzFcSwwPfLOcEIdlI+NLxd3SE348AiXhd764WQXR1LfqISQERomSXkxvw4Hgrh1UcCdOJkILizsKeve+x1MTkcSqBK9VGV5fyi4AmCL+IVbi8LNw3E+6dbOEzSwn37WY5owlCbeA6O8MdbFhRT/fZ9YZaVFJJyG/XWEKS0NiRluCWM2UWT5JLYKEeUgrTLsiBA3QnAR9zIsBMi5xJg7Jl/MiXeYc6Qr3fA+Nz+e3QbS1B80K3y3Pr9NERnkYGXcuEz9jeFXkkViRGBuwhZNQGUZeqGAKaICoGZWkVuScMcs/R654EBk+RmTwxFrurIMSBJgi18XEJoe6V9Ca9wAXoYgb+Pc9pjfkemFgiS8ojiZ+1BYZxenxorUgQlfe4xIcSvhY38GTTl2X5yEX47EBL8CIwfdEtJ7TXRbIf+bLCupwCxDXJo4zAikR07QbV5Ffk4/ShNrK6VCkIaIIoBbUKv1OhlKNbQLwgPhwRH2HBowlCfdBk6pLM23lpM60wHGx+AmTFOCowcfUaD4naSDSRSR1K4yDzudR48sysesUjPtQYWAQSNB4T71upPBntICsiqj0J1XYQNoPx/iwSTJCqv8TvfuXL1umRrlXkaM2lzwVxC0wcBcIhhSNDz8MwT4sPRX6nPmq6ZDUQ0ARRDVRLqDOTjP4OgEL+X+enYQTOjA+t/a3bu5og/KGaWSBFIENnXmqVijaRwUtjQ5GH3Qp3dp6+v2k0PSBT/Yh3GMjL+OYzPpSjWX6aTGOZGeDuPHNoj3DvfuWzN9gxd/E/w+Q7AX6fClj5ZQQ50Blec9l/nfqNUhHQBFEqclV4zzKzNOgiEEQWsmw+Aq+WthPwn81B/oGXjldUoAnC/4C19yw+hJKpWzKkbd+Nu1WWJKJ1k0SXqQTtK0JCBfcX6fzXr18CgjBvteW/du3bq2D+D5i4Sai5Ck4FhOeQ5J54PPJXWQ1+5bPXYfmFNE1+gwDh1+M8XcmaSxJjZSrZdKVMZeZ/9PQblUBAE0QlUKxwHZkcvfMBFjmERTrSAzPqgowKAjGAIi1B875ixJAVTRNE6YN05NzFswJmKkzAIoBERjZ7lNTNO3Mp/5FA/anJQL/dOVGlRVF30DQvB+Mkp1rILRy7uF8INjd9isF9IPoQGPtl2hJhNv5CTMNgrBkfn/4be75qSVrQFBHOiQ0PiExw0qcU+ewVifuTlnFaAMYyBg61zWVRzMIOjJtTk8m7tbWSyoypbRlNELXFW7emEdAIaAQaBgFNEA0zVFpQjYBGQCNQWwQ0QdQWb92aRkAjoBFoGAQ0QTTMUGlBNQIaAY1AbRHQBFFbvHVrGgGNgEagYRDQBNEwQ6UF1QhoBDQCtUVAE0Rt8dataQQ0AhqBhkFAE0TDDJUWVCOgEdAI1BYBTRC1xVu3phHQCGgEGgYBTRANM1RaUI2ARkAjUFsENEHUFm/dmkZAI6ARaBgENEE0zFBpQTUCGgGNQG0R0ARRW7x1axoBjYBGoGEQ0ATRMEOlBdUIaAQ0ArVFQBNEbfHWrWkENAIagYZBQBNEwwyVFlQjoBHQCNQWAU0QtcVbt6YR0AhoBBoGAU0QDTNUWlCNgEZAI1BbBDRB1BZv3ZpGQCOgEWgYBDRBNMxQaUE1AhoBjUBtEdAEUVu8dWsaAY2ARqBhENAE0TBDpQXVCGgENAK1RUATRG3x1q1pBDQCGoGGQUATRMMMlRZUI6AR0AjUFoH/Bzc2K9R2TznNAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-29"><g><path d="M 211 451 L 261 501 L 211 551 L 161 501 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 501px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="494.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-31"><g><path d="M 211 551 L 211 576 L 51.5 576 L 51.13 594.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51.02 599.88 L 47.66 592.81 L 51.13 594.63 L 54.66 592.95 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-64"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 592px; margin-left: 139px;"><div data-drawio-colors="color: #393C56; background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; background-color: rgb(255, 255, 255); white-space: nowrap;">no</div></div></div></foreignObject><image x="132.5" y="586" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABKNJREFUaEPtWH1MU1cU/wFmWAoraJawyFacgIIoLszMBHCZm07M5A+D4seyaMQvVDY+WoFCakVWSoFaUReNcTP6h44lOpdpNsVghpqp6BgwjNvYMqe2dmUw5pCvdrtNSlree7nvMdYH5N2/mt5zzzm/8zvn3POun9PpdGICLT8J0BhnU2JojBMEiSGJIR9HQEo5HwdcsDmJIcEh8/EBiSEfB1ywOYkhwSHz8QHBDFkf2/FJ7XnUX/kGVqsdDocDQUEyzIyZjjWr38aCV+dh0qSAEcEYGBjE7dutuFh3Fd82tcFi+R2Dg4MuXVOmKBA1Q4nUpQuRlJSIYHkQq40hQHb7H9i6XYsHD6xegstSX0NxURZ6e/tw9FgtTp3+YsgIm0alchrU+Zl4eV4cb1AkKHWXr8Nk/hidnX9SzwUGPoO05W9gc2YG5HKZlzwvQO+/tx67dftx7fodqjEiEBIiR0W5GglzZ1Hl7R2dKN17EDdufkeVHS5AgqfTZiMmOnJoiwpo8ZtJrpT67NwlQQbjZ0fDaNgFhSKE89xjmx2a4mq0fv+jIN2ewmFhChgNasTFRrn+pgIi9eBwOF21ImT5+/tDX5aHlORXWI+Retmz9wAu1V0TopZVdnpkBKqMBQgPf44OyK2BOJiclIhNmatAFJDV3HwPxuqjaG+/z2poZXoqcv5NV7ZVf+UGtDoz+vsHGNuhoc9i25Y1SEmZj1BFCPr6+tHW9hMOfngSLa0/sOpLW74IqrxMfoAImC2bMrBubRrIb8/1yGJDTm4Zfr3/iGFo0esLXDkeEODd9f568jfy8vVobrnHOEOCZdCrEBERztgjjclk/gjnPr/M2CPNwVRZxA/QnPgYVFUWcrbKw0dO4fiJMwwjiYnxqNCrIJNN9tq7easZ+epyBjvEqUpDARISuJtJV1c3VLsMrEyRjKDWEPEka9s6vLM2jTPXv/yqAbrSGt6AuAKwZHEySjRZDEaHKyZ1t3tPDaOuZ8dF8QNEWjCpH67VcLUR6oIKXoB6nvaisKiStU1rS3birSXJ1Cbx8KEV23fqQC55zzV1ahgdUHCwHOZqDWJjZ4wKoI6OLmTt0DJqjo8dtwPd3U+Qm/8Bo93LJgfSAZFL0mwqxqyZL40KIK6JhI8dtwM9PU+hLjSisbGF4RO1hvgYEpJyEw4QV7qM25QTvSmMdsqRpCdTde2nFxj575O2/X8A+rrhFgo1VYx75L9erBs3pPu+yxFaSOvOzillnQFHOvqQqbtmX4k4gAioM2cvosp0jHWKH8lwmrFqGbJ3vCseIDKgFpeYRvRhN7z4oqOU2FetAWHJ5/eQpzMWiw15qnL8/Mtv1HGHS+DFF55HuV6FSOU0l4iogIgD5KtVq9uPpqa7gkERZspKc70+NUQHRFC4H0kOHDoJm62DCow8CWxYvwLpK5aCPJh4rjEByO0QAdZ2tx3nL9S7aovrGWthynzGN5Zbh+B3OWr4RBaQAIlMANW8xBA1RCILSAyJTADVvMQQNUQiC0gMiUwA1fzEY4gKeZwJ+I0zf6nuSoCoIRJZQGJIZAKo5iWGqCESWUBiSGQCqOYnHEP/ADZNm1jj/+tzAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-34"><g><path d="M 261 501 L 356 501 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-36"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 512px; margin-left: 307px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="298" y="506" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-46"><g><rect x="1" y="601" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">GPIO driver suspends non-wake GPIOs</div></div></div></foreignObject><image x="2" y="630" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQuUXFWV9rdvVT8CIwk/ComAgKM44uigkfS7bQ2vICGQUN0dIAFlBIRMGFBEAX9foIOOIgyIoEgCQtJdvEGRh9j0o7o6EEVEjP4KCAgkyIQQoV917/5zblV137517r3n1qO7uvvctViLdJ3ndx7f2fvsvQ9BfxoBjYBGQCOgEZAgQBoVjYBGQCOgEdAIyBDQBKHnhUZAI6AR0AhIEdAEoSeGRkAjoBHQCGiC0HNAI6AR0AhoBNQR0BKEOlY6pUZAI6ARmFUIaIKYVcOtO6sR0AhoBNQR0AShjpVOqRHQCGgEZhUCmiBm1XDrzmoENAIaAXUENEGoY+WbcuHCMyqqq9/4NwvmUhAOB+gAAPMBRDIZTQCvAPxXMB62CPe8awGeiMfj4u+hvpaW2D8Nj9LdIHwiVMaJibcysAOMTYaBB1LDo/dv2nTna6rlNTUtX5DiaA+Af87mYcKFyZ7Ob6uWIUsXi8UiL2w1DibTas/gKMrfx5F2JwMvG0wJi/iO0Wru3vxwfEchdU523tqm1i8Q4/Kxegnr+3s6TytFOyazrlK0X5c5tQhogigQ/0XNsYMMi75EQDuAt4Us7nUGrrNGRr8TZnMuEkG4m5oiottHiS54rHvjC0H9KDZB1NScvIdROXImmM4FsG9Q/Y7fU0x4kBH50kDPht8B4BB5pyTpZG7ak1nXlICpKy0pApog8oRXbGiR6OilTPgsgGiexWSz7WSii/efb/1ARaIoEUFk27KdiFcleuI/8+tTsQhCSAwvvkytAK4E8I4CcDSZcZdZwWse64q/UkA5Jc86mZv2ZNZVcuB0BZOOgCaIPCCvr287jA1sBPjdPtkzKiUaTqfhKpfKyZ3VBOFHSPH5/f3xQb9mlZggRNXbQLy0vye+yasdxSAI0Y+hFF1FwGqHKi4XF1s1N4ajIBE/Se0lMnhVojv+SB5DOylZJnPTnsy6JgU8XcmkIqAJIiTc9U2xTzLTzbtOvHvmZuUtYOP7Bujevr6NL7vVHeK0/Le/GQdaBj5F4DPkJ2a6fmRw7prNm68f9WqalCAYj1RV8LKurvg/VLrU0nL8vJGRqv0sg9cS8Cm3FMRA0hoZPdZL9VUoQSw8PDa3YphuJcYxkvYK1dsNhkXX7ruv9ZxbqhL9HzGNxRbzFwk4TEIuO5nptGRfxx0qWEx2msnctCezrsnGUddXegQ0QYTAuK4ptghM9wLYe2I23mIx1r7rnXhERUUk8tbVxeZQhNYy8BUAcxzlDTJze7Ivfk8pCcJZdl1zez0s605Xv4TKZk2yr/OHsnYUQhDiQr9yzo6rkSZJ5zcI5q9XVeBqRaKjuub2Q2FZNwH4V1dZgVJQiKEvalK9aRcVTl1YCRHQBKEI7qJFJ+xlVFbcR0CtK8ttMPnM/v74/yoWNSFZfVPrCmYIiWSMJIJO78WQINxtlbUDPlJJAQRBdU2xz4PpWxNP/vQMWWhPJDoeC4ujjUfK+AHAq1x5nyYrdUwiccdfw5ZZyvSaIEqJri67mAhoglBEM2dRAyDgoeFqjhVoZinbME0iOiXR07FR1rxSEISQaBDB7QAtcdT5Mhu0ONnd8Qd3O/IliLrm9g/Dsn7hlFYY+BMMOl5Wj+LwwEsqIdC1ldGta7u6ulKqZZU6nSaIUiOsyy8WApogFJAUpqwRi34J4KBscntTixpHJbs2PqdQhG+S+voT9raMigcJ+DdHwnhVdNtJso2tFAQh6q1raDsTxBNUSgwcl+ztFGq1CV8+BNHS0hIdTu2zHuCTHIUFqtRU8fXA8Q2y+OhEIt6vWk6p02mCKDXCuvxiIaAJQgHJusbWcwBc7Ujqq59XKDIniaSO58lKNcvUI6UiiNrG1qUETLj7YKZTkn0dtxSDIOrr2/6VDRZEO3aHQ4wfV1Zs+2yxTvi1DW3LiXgDgMqxNhNd3d/TsbaUPhKHNbfvH2WzHRZWguhfMirDFAO/32XtdhOPVv54YOCWN0SbwhCEO22WsIWZNVWMXpQxMBB4ivubLUz4qTWSWp81Lgiqq74+VscGCYlujwxeb1rMRw/0xXvDzOPGxhXvTiHSRcD+mXwjDHwy2dv5sF85GfXgEsD6FEAfcThFConvr8TUYxn84/3nc1L1fi/n8DKuKn2zvrm1iS18E0BNxjBjK4BHmHBNmDrCYDOd02qCCBg9YW1TOYSfA1TvSPqUNRo5fGBgg5hcRflqm9veTxZ37vIe7mXG7dXR3Xq7utYNyQqfVILw8I7OR4Kob2z9auZSPtutop/uPUyAnzUNXrypO/5sUQbLUUgIf5hXAfp0f2/Hz2qbWi9Q9aSWEQQZxouw+DZvM2u+ryq6e0zMnyCCkM1vYnwz0dd5cRisahvaTibin47n4cRINY7xUr/aKs0o1oDpy2oOpryFDJyT6I7/KojoZQRhjo62RqoqvgjGeR4m1UqEFgaTmZBWE0TAKNY0xBoNsk9Yu2eT5rOAijlZJpUgiqRikhOt/yaSL2YSVZnvnU6+9YyReq4FlVeRQp32KYPoQAb+a1zC8Q614d7gCTh3V6iRMwEc4lHJhL4GEYQoI1d65YEIKpb09t66XQWblpbTqodTb8YBOnZsjQBfS/R2flWWv7al/UBKmRsBEqf4MJ/wFbpi5K15F/mZgeceXriLYfQR+Ive/jalmYthOleOaTVBBIyK5HI6LxG8mINfMoJwxwgCPPsaVoLIbKZCvbQgiwUDVyV7O0VojaJ+k1FXff3yA9iI/lyyUZsAbwGMpOgUgz9KwAccfibbwUiA8Ml8CAKE18DYK5NXOFT2AfRMRppoAPAXp3SrQhAS1V+o07QE7+0W+IiB3vhm98D64sZ4gQlJAr3B4D0IaASwX+7k8PcVkszNnQCEo2pa7ch4DgYetf/fwsdAOBDAmv7ezmuKOhFnQGGaIPwHkWobWm8iwimOxfxnpLilvz/+t6ka/1IQhPSET/Dsa2iCaGhbQsTisjsbvBBe9xuF4trYeNKeJkbvn3BCDelI6NcGYTFVtduOm5m5zZlOWLWlDD7TrcrKmEh/ndIn/7H+50UQY5l4gKORdqeRhJgXgybeNdATfzqbTIUgZBJAGPLOldj4fphY4Y4IIOZY1RDFGTjCgZsdIsWK8AUSFSDVNK38oMGpGwD66IQ84C8ke+Pfk42TbG5mCZWAL+67gK9x3GdQY+OKg0zTeD1fU/VC52s559cE4TM6DQ3Hvc2iarGpfWx8XYbzWC7F4JeCIGobWs8isi/iHRsY3VoV3Xqq7AI5NEE0tZ5HDOeCLqUklkvswF+ilGrq6blDeLgX9NU0xI4wiO52+q6Iy3a2eK1PmBQP/w/bXtozmqvMvDqMBZ0KQQgw6pva2pntO4TM+POTI9Ho4s1dG/7uB5aHebT0NC7piwnmS/Z7J77jdwHt4efyPExe3N8f/7O7fZ4Ewfyl/r64iKJb9gEdC5qgRcysCcIHTOlEK2FoZtVxLSZB2MHyXsJpIPoft0e3xbxsoC/+kOopzS/cd31D62VMuMhRlqcaQhUHv3S1ja1XEiAsl7Kfp09HmPpkprpBjo3Z8oXkUTHn9esylkfj1YYkCPLR77v7okoQElNuJTWTWz3FwAtRmC29vbc/42xLTc3KfYwKU1g0OTzeg8PKZMuQOap64eBBECUzVAgzf6ZbWk0QPiMmMd3zPe1N1uAXShBCpfBWavBdUeBwhvUfgG2WOeFj4MbRwXlnel0GhpUg6ppa14FxqqOSop3oZbhLTqtFISTJnAh1AS4z9Q0pQYSSvFQJQsgxtY2tNzjJS0XNlHvBLZc6cyUUeEoAXutIVcrxIAhPv6LJWrfTsR5NED6jVtMYW2iAxAl6PDBfSAmioMirHnUVVKbaLA0MUTFbCaK2IXYcEYkggLYqxuvE7AVzWgLZ+1YAsbE04SSIZ61oqnmg644XVYYyBEGgrjF2NGCrzrI+JL7m3JJ56EWWMvK5MdnbeXoYdY+EnKVk6TE3L072dAr/B/2FQEAThA9YMmsYv9OerKiCNvMpIAgGfksmnyjT7Tr7N1sJIldVNu5zoLruat33MWEIIuRlexiCWNiy8u2VqdQvAfpQpi8jAC/r740LM++cL+cA5WHUIDMaYML5yZ7OK1QxE+nkd4J0Vn9fx3WBc9PDXDtM/bMxrSYIn1Evxh3ENCKI18G4DBZfE/QehYAsLEHUN7T+iAn/7oA71Ek47OJ0b+QE/B0WH5FIxJ8IW5Yjfc7lt7icTvR1fiZMmbVui64wBAGEUpWEIQjRhxwC9PFCzy1b7rEuVdUCDwAUKogigysIOArAO7N4y3ySJHMzlFouzFjO9LSaIHxGWHKisgP0/WO3qhOefPDmN1UmR5kShAhj8BLAL4r3sQ1E7h0a2uO3fs5H7r6GJYhS3Ql4jEFJrJhkY5nPO9ySk7e6FVNIFWdogsgNvSFVM0nMoj0vtaWqWpXFo5JGgodkbhbl/kmlOTMtjSYInxGV2YcDauZ/YSeKlEjCqJhCqh7Ctq9ggsiN82Qy09JkX8f9hbbFnV+qigAeNXhoaV/fPcJpKq9vNhCEpI9SNVNuDCdv72tNEHlNt7LIpAkiYBgmyzxzxhNEOtbUBE/qUoUskd0d5aMKck+N2UAQos85lkkSNZM7rpaf6a0miLLY6/NqhCaIANhqG1sP3/Ws5c8mRActgVv+TCcIuaqtNPFvcgPHoShe27OFIHJMcV2Xz5JLZ9+gixKCKKnKR6uY8uICaSZNEAFYyh18ir+xzXSCEDDLorkabBzb17exp1hT2sOzt2hOUjn+HCHvBEQ/cwJAhrmkDllf2DsI0T6JKa7JzMuzz+DmBrCUh9bIjml9fexQGPQQA2/P/K1k6kVRviaIYq0m+1E0/QUhINnYRph5dbIv3hGUV/X3WUEQsvcgiDqG35q7KswFuR+mXmEwivXmRG1T60UkrL2yXx53PzkSTpkRhE3mrtAbwnEy67cg8VL3DXQnDllUYXYTcPAYbB5h5FXXi186TRDFQDFdhiYIBSwbGlZ8wKLIg07zOgCBzmQKRY8lmQ0EIeZbXVPbVWBe48CmaGTr8W54Ud+cyDFRBcKG8MhxGgvlST0JEoQYm5zQGxk100hVdNjlKxHYfw9jD2lAP1/yTxPNAwTsScAfLeAPhsU3uk2XNUGE2Xn802qCUMOS6hpiF4LoW87kRXqT2i5ylhAEPMh2G4iX9vfEN6kNR24qzzepi/xiXV1dbF9EqQuM9+RzGpY9X1uOBCEhc9uMlZmHXO+jKPllSF5MfCOserG+qfUUZqxzBJSUmtZqgsh3FeXm0wShiKVHqGLbL2I0yqsf64q/olhUTjLP17VmkJmrs9O1jbHzCfRtV+jrbQycHPREpQxjj2ifImlRpbxM3TIpSDWukJAeRNjvSyb0owxVTKJ97tAbwupM/N0RdFE5DpXsYBDmgFXTsnw/IxUVJtGOYH/4tTkyemT2edUsppog8t2JNEEUhNyixraDDfC9Tl1qpsBXAXwRJm9Q8ULONiItNaANRF8DsG9O42YoQXid9sW7ysJckk2+ShXHmqbYIQZjveu9AAHlIBFWJXo6by9o0CWZ5VIQd6WiWOlzUKDahlgrEd3oipobLtz3JKmYRLfdjqJ2GBZmC0QftmHxeS9EAhvVNbV+D4z/nPgb3VwVtc7u6or/w2ucxAGKDLrK5Ynv+S68JojizXgtQYTEsq65/cM+7wG/vut1qnWGZf54aGivP8kuXltajp83nKqoZdCpBCzL2Swy7bFDSINO3dTb8Sd3EwuN5hqyy9LkYT2p3YWkPXHp+l2n/FZJBa+CcQUxde67r/Wc+62AtMQgXhsjsdkcLnmEx37aM9kX7wwTDC4ELh5vO8jfTRbtHUrR5zKSQ1T1ICDS5WOF5Cy/0PwSA43x4n3CcMiw9JACxBA9zoisSfZuFCrGCW81iAMAMYkos7XOMv2kD00QIWZyQFJNEHlgmXlT9yaAmgKybwUoE5KDxcYgYsjkbhATC9kJ4m8ghau9TtEzgSBEl+2TYcS4lMHi2dHcl9bSuJgAXgFoOH1q5bmOJzdl8L9EBq9KdMcfyWNolbP4SEGijBcZ6BVPZ4L4g2Ac6nUQSPcpxINBkyhBiKblekyPQeQbyM8LyLqm2CIwiUe49pYQpXhOtRugV0Fc5XgO1JWUnoFhHd/fHf+drB5NEMrTODChJohAiOQJ7Id2XiZx+r0SwDvyLMaZbSeYfmilopcODNzyhl95M4UgMn2k+ubYx9nCNbJ3KULgaj9daUaM8x7r3vhCiHx5J/W5+/ArcycB1zIgAvylw8iXMUF4xxLLP+RMgBQeMB6CHOjE/u6Nv/FKqAki7ykt4ezilTUrS7LVByZOJMaF+W1wvIUJl1dHcJufHtYJ7gwjCLtraQc3Wgng69L7GO/ZlWLG3RHwxX198T9O9iQMd1DgLWQZq03Dsia8M1LGBGGPTWPrOYD9HO3YF+ZVO9mY1NScvAdVjH6D0kQ5R2HcUsS41kxVXBJ0gNIEoYCmYhItQSgCpZLM9rqOpo5lIqEX/wgBCwC8zbGqXgPzK4CRJOIucyTy0MDAhq0qZc90gnDuPYsa294bIWsZGMcDdACA+Q4V1E6AnweoF+C7qqK7d3V1rRsKi2Gx0wvSHjHpeGb+LIjeN6YGI4gx38Sga0cH5/1C3EuVczRXGS6S2FZF8y2xrQOHaSkYqxk4xDXWWwH+Cxg/MUdTd7mtlbQEUexZnFueJojSY6xr0AhoBDQC0xIBTRDTcth0ozUCGgGNQOkR0ARReox1DRoBjYBGYFoioAliWg6bbrRGQCOgESg9ApogSo+xrkEjoBHQCExLBDRBTMth043WCGgENAKlR0ATROkx1jVoBDQCGoFpiYAmiGk5bLrRGgGNgEag9Ahogig9xroGjYBGQCMwLRHQBDEth003WiOgEdAIlB4BTRClx1jXoBHQCGgEpiUCmiCm5bDpRmsENAIagdIjoAmi9BjrGjQCGgGNwLREQBPEtBw23WiNgEZAI1B6BDRBlB5jXYNGQCOgEZiWCGiCmJbDphutEdAIaARKj4AmiNJjrGvQCGgENALTEgFNENNy2HSjNQIaAY1A6RHQBFF6jHUNGgGNgEZgWiKgCWJaDptutEZAI6ARKD0CmiBKj7GuQSOgEdAITEsENEFMy2HTjS5XBOqaWteBcWq2fUy4MNnT+e1yba9ul0bADwFNEHp+aASKiIAmiCKCqYuacgQ0QUz5EOgGzCQENEHMpNHUfdEEoeeARqCICGiCKCKYuqgpR0ATxJQPgW7ATEJAE8RMGk3dF00Qeg5oBIqIgCaIIoKpi5pyBDRBTPkQ6AbMJAQ0Qcyk0dR90QSh54BGoIgIaIIoIpi6qClHQBPElA+BbsBMQkATxEwaTd2XaUcQsVgs8uJWHALLOB3gIwEcCGBOZih3Avw8YDzMoA37LzAfj8fjZtAw1za1foEYl4+lYzxSVcHLurri/wjKK37PN//ChWdURKu310RgnM7ETQAOABC16yS8BsZzxHggRbR+U2/H/wPAQe1xt4WB45K9nfeKfDU1J+9BFSP/ToxTQPQvGdxMMF6AgV9Y4P8Z6In/QaUer3a0tMT+aThlLAGsTwH0EQD7ZNKmAPyVmHosg3+8/3xOqoyNjW9j61IC7snW6XQ+E/PhhVeolhjnAGgGMB9ABMAggOcYdIdp0HWPdW98IQg7j99pUWPbew3wpwm8DKD3ZMZoEIQnYPHVsHBnf39c1Id8CWLh4bG5VcO0FIzVDHzIgZuYv68Q8DQzd5ijqbs2bbrztTz7EpjNb/6I+Vox5/WjCfxZ19juBPAnBt9qjaTWF9K+RYtO2CtSET0ehE+D6H1g7OWePyasG1JDew5s3nz9aGCHADQ1LV+Q4mgPgH+207vW92HN7ftHLD6TwMsd+4mNO4BuJlwTZr6qtGm6pJlWBFHXFPsYmK4G8K9qAPMWMnBOojv+K79NL98NPtuGPPJTbUOslYi+C2Bflb4wkGTi0wd64k/7pfdY4PfVNbZ9EuCfAHiHX35RjwU6dVNvx59U2pVNU1cXm4Mo1oDpywDeFpxXbWz8CGJRY9vBBng9AbUB9ZlEdBunrLP7++P/G9y2dAp74zCtK4hwfIZ0PLLyFrKM1YlEx2NhCcImVJO+AcaZjoOOXxNTxLjWTFVcMjBwyxuqfVFN50UQ6bWHHwL2wcK3fWC6Apb1lSxpqtSd3qStrxGwauyQ5J/xbwAu2G8BdwYdNLwIQhQ/lKLLCDg7uM7xMVbpz0xJM10IIruh3qi4iJzjYxJw6fDgvMu8Thx5bPATxj9MflsCegkXgOhS/01HOsUGmfjsZE98vRfh5SxwFqdezCGiMNhtJ+JViZ74z1Qmem1L+4GUMjcCVKOS3pHGBOGKkbfmXeR3GpRJEAb498x0M4A9Vetk4LeGlVqWSNzx16A89c2xT7Bll//OoLSZ37fBME4AW2eohtqoq4v9H0SoA8DhinWMJWPgTzDo+GR3h5D4ivbJ5g8R7Q/gO2HWHhN+PlrFJ21+OL4jqHF5YJ0t0mTgpuoor/WT9mUEAcs8BxFjXcg5u5OZTkv2ddwR1KeZ8vu0IIi6ptgiMAk1yd4u4LcCSAD0qvg7gQ/i9GnSfYIdYebVyb64WIw5X5gNvtD89U2tK5ghNp6sWkwUaat5mJAkUPpUSPxBMA6VLMrtDGNJsnfjgEpbGLicgDMmbKSM52DgUTANA9a/AbRQcoLaBuKl/T3xTX6Tvb5++QFsRH8O4BBXugl9YvAeBDQC2C+3PLp+ZHDuGk8Cd6mYiHAPsz3OzvnwIgO9afz4HSA0O9QTY1US48eVFds+29XVJVRe0s9nvqUA3gwYvwVxFYAGMA7KEr3YtCmtGqzLFuwVi0moayrn7LgaYDE2zm8QzFtAxq/teSGpZzwxd41U43iVTVh1w8pZC8AGwJagsvNVkPqzAPrE/PFZc1CJQ+WxHtJrQr0eXzJyE4Q4KBAwDGCRA5edJKR00LP+mOMPKcM4qgCVpepQlEW6sieIlpbTqodTb94CkNAP2p+9EA2jvb974xPuk7S98Kq3rwbRFROJghMj1ThGtpgmiyBs/WplxYMAhG4+25su08CnN3XHxaKb8An1w1CKPkfAJRM3cLq1Krr1VNkmJ1ngjjLlah27XVUV34W4m0jr77PfU1Y0tWSg644XZbPV1psPUZyBIxy/m8y4y4rwBZI+UU3Tyg8anLoBoI9OyAP+QrI3/j1ZPW4JwpkmrXqLnDnQs+F3zrlgS2ovkwia933XgWEbWbQ4keh4SlaXwMKorLjPpbYSm9VPzeHRz7n164uaYwdFLBLtFptozue1SdY2trcQLEGsYxsvA9+qjvLlstOwqCdq0XVurInolERPx8Zi7SY+88dzXDPqxQsy6sX0HVr6e8oajRw+MLBBHORyvrrm9g/Dsn7hInqTCBsjSF3Q03PHy85MmTFt3XXYudKtKiXQtZXRrWtlayJHgpjYkleZ6bzRobmd7gNKTdPKDxls3iJRaa/p7+28pliYl3M5ZU8Qtc1t7yeLfwlgQQZI3wWeBbu2IdZGRDcBqMz8bYSBTyZ7Ox92D8hkEURtY+vhBAi1TbZNvgso006qa4hdCKJvOdr9Mhu0WKZe8F7g3JWKYuVjXXFx8Zbzeam+CPhaorfzq7I8krpMMF+y3zvxHT+9cOYi+wcAC31z9nseJi/u74//OWd8XBJE9nchDbDFa/103bITqt/Jtrah9SwiiHuuLFEG9slHGvA8Rdc2tl5JwNqxvhJd3d/TIf7taYggCLlyCHcB1DKOEd9XFd091tW1bqgYG43H/AnEQMi8dU2xz4PteTqGHTMvT/bFxwwMsm20ScXABhAJFWj2Exf9F/T3dv7AD4ealuX7GanIbS710KDFvGygL/6QGwcfgngaJi+TzblsGVIJOaQRSzHGZarKKH+CaGhbQsRCvZSedIqDY0/ACG4HaIljQ/lmoq/z4ikjiKbW84gxfkomrO/v6TwtaPBralbuY1SYgtjGLueZ6ZRkX4c43Uz4ZAvc1ldHjaOSXRuf86srY6VyHQGfcqSTkpisTYC/qshZt+yk7kVGMgkifZ8wemQicec2vz7J5gEzfprs61ydI32mN+CfA1Q/NmeIOobfmrsqyGJGvnlDShAfOnLV7v/01vCdTmnAaW3m25/G2NEA3Z09ZDDwQhRmS2/v7c8EzSOV36UEwXw3LKwMunSWzlPCxcmezm+6666vj9WxQUJ62CP7GwOXJns7/6+KFV1dXew9iJA4OL5rrGyPdnoQhCehuNta19gqLOTEoSH7PWtFU81ekrUKztMlTfkTRM7pkQciqFjS23vr9iCQa5taLyK2T2kvAvQEg+/Omnw6806aBOE2pwXiVdFtJ/npwzPtpLrG1nUAFgP8VwY9ARg3ye4hPE6AyiJxTWNsoQESp7Ds5a8pOwXWN7W1M/NPHadFTwnAa5xyy+AnR6LRxZu7Nvx9wvhIJAgV/Xa2jPqG1suYcNH4RiI3Y65piDUaZG9au2fSvkEWH51IxPuD5pr4vc61eYu/ydppS1CjdDcInxjbHD0I311v5vT8EEDiDuQJBj1dQdaVPT1x+x6u0E8mFXpJAe66WlpaosOpvW8FEBv7zeMQlCNBBaijZP2SbNzbLfARA73xzc70UoJQPGiKciTzQlpPodiXY/6yJ4j6+rbD2GCht5+XATDQKiks0JNGEA1ty4n4dkf7Aq2SCu4L8Kxp8GLZHYes7MydTxygY8c2L+CqZG/nuY70VNvYeoNT0mDgxmRv5+kqp79sOY2NK96dQqSLAGElI743LeajB/rivc62SSQIaTovrGob2k4mssks/XlsDrmbo/phRBTb2HjSniZG73eqPjyIjGobWm8isu98Mk0qjVVSofMnrISSQ8YSgpDhRAypdO/Xfsn8EYy2A3DAAAAgAElEQVR8Vn9fx3VBBBGmPkk9miDCTqxSpc+I7hPE/syS2gLCTzgSiSe7NgqzxUAnMs8NZJIc5TIXmkIsFpYv2c8E+DfCxtxKRe/zutBTxVdyAlSVUsaqyEhel2X/QMBD/9it6oQnH7z5TZ+N8PxkT6cwDFD+GhqOe5tF1UJ9+LHxzTt3gUsIIpSIn5NfQhCy0y/nEmNg3+obWn/EhH8f2/g9XpSTSGAiyyADd++yDFpfFd29q1j3CoGNdiSQzJ9HDR5a2td3j3CGC/xy8ksIor4+digMeoiBt2cK9Lwf9KtQdpiBpD6ZBMFMK1TNVSX5NUEEzoRJTFDf1HoKM4SKxWlhM96CtGnhgwxsGK3m7rBmf5MlQYiLvNrG1q9nrJK8EHwRhPuYcXt1dLfesJtEjh27hw7Yb/gkG/JfopRqylqVSE9uwAMABfoXOOtlcAUBRzl9DWQnuxw/COG1OxppViVTFYKQkRUTQpNereueyUsV5mEt5YQnxcDvidHBEbpr/32sPwU5hBVjSbrnj/twEFSHCkFIjDU8jS6C6lNRH0o2eJOZlib7Ou4PKl/8rglCBaUpTCMuT6vm7LiS0zbjcpIYb584kQvpYn2KIhtV7JUnkSAgJKKKYbqVGMcoQJq2u2f8SDXEQq6jk/pJKdset87VrWaQ3FModEUxieQEqLLBhyI8iQQhPWU6wpQott43LIi7jIaG9o9aZIkLZxVnPHGCv48sWjc8PPdXQZfmqu11p1PZ4H2xdkvjKuMJTDiAhGm7ytotdINXyR9gSuvVpbz7HQajQtKW/R1EtnN+NtABADxFRJfsO9+6z+sEpjLJQi2KgAuw8GEp7NpFbJgkw/hcsnejcF6TqtRyCCKPTU5CABNEak0Q3rPBL26ULJd6OI8JuYU56E+ilLrM7StQyGYg8k57gpCQjcoG74ebSn5NEIXOvCLlHw8YhguFgUFwDJX05urnkj/ZBJGFwnaEM3EiMT6fiXETJB2JrClhDjo8OO9y2SlSE0TuRFORQKZCgnC2NBOL6NxMLCJ3xACv1fMSGbwq0R1/pEjLawYQRK5hgcoGrwlCjsC0kSBkzReXVEOptxqJsAIMYXUjCeMwntPL23KqCMLZJ1v1NETNBKwE4UhZmAhHepM9PI+LQRASHfEEUThIwijWZpUtR2WD95Xw3GayZaJi8mgz1ba0H0CjfBQMPtUj3Iozq4gBdXR/98bfFAP3aS9BqI1tqEtmFYLREkQxZl+Jy1jYsvLtlSnr47tCfn86YxnjjHckapd6YZcDQbigoYaG9gUWzCUgfMYjVpLUgU1ySR3+ojXHHHeif4LECiXUpV/YaTAZBCH1TfCwQPIlI5dJbRh/DVm5QmKurn7tYJMiq4jRBrLD20/8FLywVTGfJIJwRxTI+5La7U8hu1RX2eALlSBU8Z1u6aa1BOEHtv32QXT0GqetuUgvs0ypa2g7E8Q/HCsvhBONyOM2bVT19ladLEL9ELXMmyaGWIB0U5ZIEG4fhsBqcyxDMDGcg/CYpQqzm4CDs4UVuhEWKgEUIb/MNyG0b4cbuyLjQnVNrUK6FCFkxtRQwlM+jFWXL1YKl8yF5peEz8nrgCHzSBfhVxJ9nZ9xtlETROCS90xQ1gSRCZMgQgssFvdnDOzNzCe5Ham8emdLFKb5czAOG0ujZlWhbPstm6QygkiHdjaEGqwZsEQk0kgE1idVQyTUNK94r2FFxLsWY+9HyDYfibOXZ5BCGW4qJ2mp/Tn4fphYERSOwVlnhmgeIGDPXaq1P1rAHwyLb0wk4iII49g3GRKEqMxtogrIPbu95psKdnY9Ir4YW8eCSUR9XchAojq6bZWCR71ddW1T7DRiO3x79gulMil0gy80fxk4yoXCq1CCyX97nvqcZU0QqgvOD0b3Ay4yR5pCHLEkpyGpp25dXWxfRKkLDPEimfikISwK3nxyw3mEChchiZEjzS8Jc/CGwcaxfX0bxctdSp/Ev0XqMDVZBCG5WxlhppWqDlXS+EISNVVtQ+w4IhJvCqSNEgh/Ropb+vvj4hGcwK+Ud0CToWKySc4drBDhw2hLnPpUQ21oggicZekEZU0QooE5Irt48Wxk9FiVZw09CCYneJgk1srILu/mZf29cRGXx++TO74peuqSYiA40QAJwUDmDVpIsDV5hE25ZNDQsOIDFkVECJQxG36h/x2u5piKo2I6plBUOCo5Xwf8tTkyeqR7bCeLIKSB/RTnm+2rs9uOm5m5zTlhZFKexNFQhLg+LdHTOR4OxGfW5RAMEMqzvFAJoBj5SxKsz0OKLVQCKDS/4l5clsnKnyAkUR9Vo4ZKQn5LY/jINl+Fzc77lTuPOwxJeAXxIMyX+nvi/x0QKiQn5LdXjByPYH2B8avskNW7vf5NMM5zOCP6naCFPvx7YPznxJlNN1dFrbP9XvgSGzEZdJUzJEVaosKaZF/n+F1QpuDJIghRXU1D7AiD7GipDgMH/z75vRIoD9Yngtrtsx7gkxzYKT3Q5BE1NnQ4Fa/daLIkCI9w3ykGX5jsjYuQLZ6hc7zCfTNzuyy0eKEbfKH5y3LnV2xU2ROEx8nM71EacdqeQxFay8BXJix077DFOcHnMvjdlTKMtW5vbBEmIVpR8RUmfFbqh+FBEB7hFew3hlOjo1+TSUX2ZXvF6LcI9pvF434ShO/393Se715IHgQhumNjZkaM89z9SV+CW1e5H70JknA8pADRpMcZkTUyh76aptghxCQC/U14R9qPkCeTILzedvB6E9znsSV7CnldUjc0tDdZZN3nDHcNwPPxGpu8mmKHGIz1rseWlMNWq+wJk0UQtlTs8WCQ1zwVRPzCS8YyIv6fXO9z71DzhW7wheZXwb1c05Q9QQjgZOqMDKDupzojYOsjIPth9RwTV78nNH28g8efmQQiDP4oAR9wEINJjB8w2X4Y6SB8PlZQ8hOqnUvU82cQDdhPgYqnJplrABJ3Fs5XukTap8lKHSN7W1lCEKMADAe5pGP8gB5PYyguzG283E56nnU4J7PP85xCgSliZHXbT8KK/lj4mNRME/QMDOv4/u64eBUu55tMghCVe7yUZ5Os4xlMcdp1z4WctvtYMXlIYHYROwH+HWBkXr3jdwhtK4B9ctFRf4NDZROaTIIQ7SnGk6NB0n6hG3yh+VVwL9c004IgBHgZ5y3xQI6ql6kTcxWPU/HmwtkhH2e3VTewUjeyERVRWv85iCDsgH1NsVOJSbya5SYxlXnyFBvU6vVYvYQgNhDodcU4Vpn6ecCKmieqPoiSPgnybQC/W6UDE9MIcqAT/Ry9JpsgRPsOa4nNj6bEe8zO19sCe/cSMdY5357wM3P1eFkvsJJMAt/oAKqFuNNNNkHYazv9KNh69zOiCn0Q4Wc6R6r5s373XoVu8IXmV+hH2SaZNgRhT6SW9gMpZd4IUJNC0D6RJUVEt48SXaAStE9s3vXNsY+zhWsyp2q/gXuVmf8j2RfvbGpaPj/FUWG9o0IQdpmZdy5+Innv1qtOEQr6Rzxa8eWBgVve8EokW+D7zefTX3jFOJuYRQjvt/l0aieIv4EUrg5jrirKy6jCvkEQjn1KxGer1sxUxSV+/ckcDpYSMP5sZUg/lXwJJlzMLO7haGQ1UtYHnW0N8oPIxBg7a5eUeKnjzZOgDUNYO12w3wLuLHaE16kgCJuQm9v3r2D+DjOvUAufw1uI8PlET1y86+0b6r/QDb7Q/EGDWc6/TyuCyAKZjltjxghYAZDYlJ2i99Zdbx3/hUAd5mikQzUktHOQxuM98WcB+shY+bbKhP9IRNdWRviu7EVszgRS38BoUWPbe6PMpzLhKFv9wtgre0IE8AoBT4Nw03AV36tiHeS3wEU7R7niHCJuA9vqMKFWSqszGD+xUpXxoM06aDLb6plhWgrGagYOATDfQeb22Ii6VKPTTiVBZPuavjuKnkogcaksnAOzJLt1l1/LI0y4Zv/5nBSbddhgfdk67Lu2qh0fB2Elk9UAkAgbk5UwBwF+kdjoM2HdkBrac2A6R3P1m0MT1jbR+xzrQQQofG7XGe5BBm3Yf4H5uCo5FrrBF5o/aM2U8+/TkiDKGdCpbluhJ8Cpbr+uXyOgESgfBDRBlM9YFKUlmiCKAqMuRCOgEZgOjnJ6lMIhoAkiHF46tUZAI+CNgJYgZtjs0AQxwwZUd0cjMIUIaIKYQvBLUbUmiFKgqsvUCMxOBDRBzLBx1wQxwwZUd0cjMIUIaIKYQvBLUbUmiFKgqsvUCMxOBDRBzLBx1wQxwwZUd0cjMIUIaIKYQvBLUbUmiFKgqsvUCMxOBDRBzLBx1wQxwwZUd0cjMIUIaIKYQvB11RoBjYBGoJwR0ARRzqOj26YR0AhoBKYQAU0QUwi+rlojoBHQCJQzApogynl0dNs0AhoBjcAUIqAJYgrB11VrBDQCGoFyRkATRDmPjm6bRkAjoBGYQgQ0QUwh+LpqjYBGQCNQzghogijn0dFt0whoBDQCU4iAJogpBF9XrRHQCGgEyhkBTRDlPDq6bRoBjYBGYAoR0AQxheDrqjUCGgGNQDkjoAminEdHt00joBHQCEwhApogphB8XbVGQCOgEShnBDRBlPPo6LZpBDQCGoEpREATxBSCr6vWCGgENALljIAmiHIenRK1rbaxdSkB94wVz3ikqoKXdXXF/1GiKqdtsTU1K/eJVJpHWBaOAmERAQsAvM3RoZ0MvAzGJsPAA6nh0fs3bbrztXw6nDMu4QsZBPAKwKI9D1uEe961AE/E43FTtaic90SAv0Qp1dTTc8fLqmXI0gkcjWjqWBB9EsBCAO8EEM2kTQF4iYA/MtPtXEEPJLs2/hUAF1Knzls4ApogCsdw2pWgCcJ/yBYuPKOionr7ciJ8GaB/ARAJMcgmA48ZBl+87z54NNTm7CbuEJX6JP0bgAv2W8CdKm0pJkHEYrHI317B0RbTJQQcFg5H3sKMr44O7XnH5s3XjxYHCl1KWAQ0QYRFbAak1wThOYhU3xz7OFu4JkMMBY4293A0sjrZtfE5lYKKIEH4VdM5Us1nbH44vsMvUbEIoqYpdggx3UBArUrfvdPwFhDO6u+JP1pYOTp3PghogsgHtWmeRxNE7gDW1cXmUMS4lMHnBpx0dwL06ngJ/A6Xysld+Etk8YmJRLw/aNqUmCAA0PUjg3PX+J3Ii0AQVNfYtgrgq31xIbwGpgxZ8e4A9vHBJ0XA14YH512upYmgWVTc3zVBFBfPaVGaJoiJw7Tw8NjcyiG6HkCrZAAHQegEGddXGdW/7upaN+RO09IS+6cR01jMbP0HQC0SgtkG4qX9PfFNvqd3iYqJgeOSvZ33qkwsoRqrrNy+N0WNJmbrKxIpyCTCaYmezp96lVcgQVBtY+w8Al3uuF/IVpViwoMRy/iviorqx9w4irbPmbPjEJP4PLA9DnNcbTQJdP3w4NxzNUmozIbipNEEURwcp1UpmiDGh8veVOfsuBrgM1yDmCLGtanR0a+FuXQWqhWDsR6gj7rKe6Aqyif6GQLIJIgwBOGsz5aIDLqKCf/u/DsDvzWs0SMTiTu3ySZtIQRR39S6ghk3Szb3u0yDz9/UHX9WZaEsWnTCXpGqiu+CcYqLbE0Qf6m/J/7f+gJbBcnC02iCKBzDaVeCJojxIatvaj2FGetcG9F2Zj4t2RcXJ/fQljRCIqkaojgDRzgmR/DpvUAJwj0RvdrBzMuTffFxKzZHxnwJoq4ptghMAq+9HcUNEvDFfRfwNSoX5K72U21T7FRi+oGLcAaJsCrR03n7tFt407DBmiCm4aAV2mRNEGkED2tu3z9qWQ8AeP+ETa0IG1B9/fID2Ij+HMAh42VzYqQax3hdFBdTgsjWWdcYOxqguwFUZv/GwFXJ3k5x15Lz5UMQQsU2nKLbABzlxJGJz072xNfnQ7LZcjykkqesaGrJQNcdLxa6FnR+fwQ0QczCGaIJIj3o9Y2tX2XgKxOmAPOX+vviQoceWnJwT6XahtaziCAua7Nmsm9azEcP9MV7pZtzkSUIUcfClpVvr0ylfgnQh8bq9PF7yYcg6pva2plZ3GuMmQMT6NrK6Na1XV1dwsehkI/qmlq/B8Z/OgsRl9aJ3s6vFlKwzhuMwKwkiNqGtpOJ7Amd+fjJkWh08eauDX8Pgqy2sfVKAtaOpSP8GSlu6e+PC3tz38+dlwkXJns6v+2VSejHq6t3LGRCjMEfA+FAMPZypB8E+EViow+MDcPDc3+lcoGXD0GItlTNef1iBi5xqWMehslt/f3x/w3oPi1qbHuvAf40AccBOHBMdWBbtPAfCdSRGhm9JYzOPwhzr99tx60K82EA/zo2CwL082HrktXht7GVQoKwT/ejdDcIn3DM94EIKpb09t66PYfUmlq/QAxBkNnP11FO3HUggtsBWuLI84eUYRz1WPfGF8JiJkvvIek9ZY1GDh8Y2LA1qA7b2bHCbGNwG4je51hDtoMegAGA4lVR637tLDoRzdlJEM1t7yeLf4m0V6z4fE92Wcjki00tb2PjSXuaGL0foJpMeW+QxUfLzB/tRWfgdBD9XwDCjFL1E+aX5wY5ReVBEFTXFPs8mL6VDzmEtIm3L4fNVMUlAwO3vKHa8bDpZKdeAGv6ezuvCVuWX/r6xrbvMqwGsHFjkIfw5BGEt3d0WAmivj5Wxwb9AsAeWRxKcbqXtMskolMSPR0bvfA/rCU2v8I0vs/MKyRWVbJsO0H8DaRwdX9/XHilz/pvVhKEbKMPOs2LmVKbSyz2BGLC+cmeziv8ZlNNY2yhAXoIwJ7pdCw9xdW0LN/PSEVucxBJ2EkaaA4YkiA8yIG7UlGsfKwr/opXA21P2pfpHAb+S2LZEtAvegYGndjfvfE3YQEISt/S0hIdTu19K4BYNi0DL0RhtvT23v5MUP5S/T5pBFFEFZNETed58CkEN/nao1uroltPlamx6upi70HEvntx3AGptiB4bquWNN3TzUqCEIMmmdjxqui2k/x0prUNbcuJWGY9EZzXJbrLLgo9rE5Ec0UsHbERbxp30rIdtOo9HIxGmGllsq/jDtkEDUEQHpYkSgvIS+oQTdoJ8O8A4ym7fcQfBONQCYko+Q+EXYR1dbF9EaUuMN7jyBs4hmHrCZu+FAQRtq9hJIiWltOqh1NvxgE6dryv6uraMPiEqctnHU2Ydwzeg4BGAPu528LAjaOD885UUdmG6cd0SztrCaKmIdZokC0aCy9OQOEuIef+YWy0/ReFZHKP7IrX88lkb6fQgY99ksUpfvO1IW9oiL3PJLqWgI9PnHx8P0yskInKqgQhtyDhAStqnhhkQeJhffIqM503OjS3073w0lId1mTUak4nqaJbrNTXtx3GBj8IYN6YBBFwHzQZC7sUBCFTpflJy2EIQuj2qcLsJuDgySDa+obWy5hwkaOu7Rb4iIHe+Gbn+NQ1xIR69jqHOlSYxp47/Na8dZINn+qa2w+FZd3kvI8CUBJJaDLmUTHrmLUEIbHukG7aWbAldwjOcfC9w2hsXPHuFCJdBOyfyZRzwSa90CTqGH5r7qqgU0z6opCEOaGSykSFIAohB49Lxadh8rL+/vif/SZwbWPr4QTc4rSnZ+DSZG+nuI8p2LJI1C2TBPN1SCvmYiw2QdTXn7C3ZVQ8SMC/Odrpu/GFIYj6+tihMOghBt6eLZ8Y30z0dV5cTFyyZamMm1zSCL5bqm1pPxAp6wEn2fmZA5eif+VY5qwlCCEz1Da2imBin1KZ3DmLgdENQvO4EEFn9fd1iFNLzlfbEDuOiIS6xzYDFOJrsrfzdOeGl9kYf+awV99GFi1OJDrSapiAL0ciAl4ni45MJDoec2cNIgjZJi3uTFQkB1FXXWPrOYBt3pn9QqmKJM5rz5oGL1b1xA3CSrIJSk+iQeUU+/diEkSapM2bMqE/xpvKfDcsrPS6hA1DEJI5J+7jfC3zCsFMskbEBeCEdSeRapTHNnfeyu8JC+nDdMs7mwlCnCTd5q73VUV3j8ni7bjSbgfoPwEWl6+2JRQzfprs61wtOeVSXVPbVWBek5kcUusLQUCWQZ8i8EcBOoCIeisjW09RtSNvalq+IMXRHgD/nJ2EXqdiP4KQecSmwzOkliUSd4gY/b5fOq4Rfg6QuB+xv7ASgKwM90YQ1A6/31VVFYXUkU/eAgmCFras3KsiZR5KhFM84hltZxhLkr0bB7zaF4YgCmxvaIhyDT1sA5EJhCRZB0oWiqIxGdXjnQy8CeDXBvAEm3zVbLZomtUE4Vb9+FiyuKQNftKEEYtA6C2zZqvyewi3KqtU1jLFIIhhE4e4wyWEIQexyCSLWPkE59wxJJt40S6R65pa14FxqqO+0G0sIPKqZ10FlKmy2SrFMZruBNHQcNzbLKoWIT8+5hCbAi3uVACcjWlmNUFI9JWmLE6N+/5BqIj2X8CfeeElWmef1tKfdOHniuHsKaUUMgELJQgY/CVmutl14fiXCMwjw5h+uqUyBv7Eo5FmFYcmZ/9z9c3Fs46RGBvMdIJIgfii/ebje0ExkaY7QdiSgMxDHngdhI2GReuHhuZuDrrXK2QtzqS8s5ogxEC6F4TsYirnVJzRe9Y2tZ5HjO+NTQiXPlRWfjFVJWlP69cOthA5DsQnu18/U1YxAc8DMCTmfoEB5tyLQXLyf4mBBwgU8lUwPsAZ26eYklcx7iAKOO1PpgQhzKOTFvEZAz3xp1U2rlAE0dC2hIjFaX0sxAYzrfAyr1ap3y+Nx51Hjg9SxgdCOMK+y6M8EcL9CVi0Psh5sdA2T/f8s54gciadxImorqHtTBD/0C0puPO6L58lEkrok6q4TBe65Uoz9W62cJBB9BEAH2ZAxNbxe2RF6P6lbwmE2dyEBICocZTqq2gS9U2x1kg+2Enrdo2nSGMy09JkX8f9qo0Ng6GrzNIRBOE1ZhG2JP0+tjkSeSi05BYi1IbKnYAqnirpVKyYsuVk0ooovc73w72q2QbCHczGuv0XmI8HSVkqbZ0paWY9QUjMXV9mgxYnuzv+kBnknPuHbNymXIuJiVYPOeatPh6szgllv4k85/WjifF5kP2Wr/vxFKX5lydBiAdxxGPy2QflbasrVaeh6UAQKtYwSgArJJIQSSiCmGzz2zAShEytSYwfJ/o6P6MATegktU2tFxHjMkdGT0s9kSZkiJdssa8T8F1ztOKqUoZ6Cd35Kcow6wnCVgO5AvAx0ynJvg5hi58TDdMpJbhDNhDwd1h8RCIRf8Iu12UlpWAjnu+byCYYL4BsiWKMTMITBD0jAgMSeJUreuYgM7d7vSHgnLvTgSAkfilS0+NirMmZTBAe3s2egQALwVNWl+L9FtU0xd5PTJ8n4ERFiUI09Sk2qNVxUCyk+dM2ryYIeyP39lPI8bp13TO47yEc5OL2swgyt6O6xtazd3l2fydAYshGoNwMYvHO8aNVkd2fNM039szbzNWevuOxj2SObkEvkWVXQA5BENb393SeVk4rxMOZSjk6aJi+zGSCsA9BuSqpknggy0gdQCjLNhEb7IWtxsFk8vFMaCPgAwFB/IruxR9m7pRDWk0QAHIn37iqyOv+ITt47nuIrIidq7ryt8KpbWyvIVhCB54J5mfXYAL8G4JxIyx0V1Zaz3mFIy7Iigl4nolOSPZ0/DrbL+lLa4Tv9/d0nu/n0Vzf0Poj5zOXBDz0j92qTnjywZuFbXnZfBJnvsDooPk0fqYThCyaa1i/FxVcSzFe9kHBGvoI2BIS83LXa3jZZhU9wq9Kf8sljSYIAF6XyXOir/52YtTP3E3e6x5ilEc/MDHWE13d39Mh3pHICReRVlXtsx7gkxwTYydAp/f3doiXugJDTBREELKL+XTI8Q0gWuZo0xsGG8f29W0UDnnSL8eySyHG1VQshkXNsYMiFglLl4PG6+eukWoc7/XiWz7tnOkE4fEexPMweXFQWBVVPD1ChhT1zQkhXbz4Ek4D0f84JfhyPeCoYldoOk0QGQRzTihMZ41UGLc7X+OShciQhI62L7kNi9scr5VJ/Suyg+ehEw8Vf0gagE7Visnj8ryuuf3DsCwR0NDxzrD/Jiq5AB7ZJQUt6++Ni3KUP3s8CF8D8ysgehyMx2DyT4ro1Sp7qUzJmUy5E+n7raUEON9/njGX1FkcPJ41VTZsCMBTqGq/TumHqsY+rzcnbEmczWOIqI6BQzJr9suKYybq+hYBFzrS+z6YpFjutE2mCSIzdBKTvbjBuNYiCDvvdMRXiZ+D+HPuPQROI2C14xUv31hCEvtu38CBstkmEcHVzVy9rauoriF2Ich+KMhx0PZ+lrOQoIPZChYtOmEvo7LiPgJqHZUW3cFQFqANgIj8uSrR0ykL6x56oc8KgpBLm0UhW4+owJ7SQ86diKLlYHZgJeOlCSL0rJ+BGXKitRL+zEwdBM5GpvQ8+Uk2+EeRDoGcfbHO9zJNcuoOZZfv5RikbMXks4jkmzW2wTCO9njMR3YyD7Xp1jbGzieQeIo164BlgvnM/r74DcWeeh4bkFDvrenv7bhZRb3n1SYPM8sZJ0GI/sulTZgEXDo8OO+yPDyXPd4i8SdwyVoMVIs6x09y6f6owUNL+/ru2VnsuTcdytMShGOUXOau4lJV/JdRr3hfMnvExXccfr0jvYpEsrDJqr4Hghw4Qre5QjqnBR4Pr9agaK7uiVvTEDvCIPt1Lqc/xgNVUT5Rdmne0LDiAxZFxHsL73SUtY2Bk91vYLjrqmtur4dl3ekK9/1bwxo9MpG4c1uxF5VQEY6k9v7irkuer7qeUxVeyPcazF/s64v/MUS9abNKi75OBHF/M+ZPkiljRhKEPY+bWlcwQ5Cq22/nrpRhrFV9o7qm5uQ9qGJUqHrOdI2JsLa7fmRw7hovwpEGegSeJit1TFCwSZlEqWCaHmJqTL+kmiCcBOEyd52gVZGE6M7+LnvC0pHX7XiXM0s8JrXJwHU8WvElmcOOOJzNChgAAAVOSURBVNlHqqLngOnzXrbdXqGXwxJExnHvOmdo9LT3MdYk+zqzHuYT+iWRAsTvgwxczqMVV7j7ZF92RrEGTEJf7PR+DSV95LMERf8qd3v9m2Ccl7shQYzDYwT63kjU+NXmrg1/d9ch8kd3f+1Ag43jwfh0JuSJrCk7wfj6yNC8K2Ub3GRHR5U1MIyjnCS/n6m2MM++FYZxXZVR/Wt3xORs2BiTImdk5lmOB7SwEGSL1wbdQ0kt8MBbLMbad70Tj0g8pTP+R8aPAH63o18vGWwe2dd3++/zmVczIY8mCMcoejzPmE7hcf+QzZ5jvZP9QVEHKp/UdiGDYN4CMoQJqmk/k8ioBdmPD43FwLGf8SRsdT6jyYSLkz2d33RP1LAEIfJ7qLE8rVXsTXfOjqsBPkOyUFIM/J5Aj2dMecWibJCcPMXm/N/V0W2XqIY9z3dR2jbyrxhnE7Pw1PULzyAes39FvJNq10U8F4y9guplIMnEp/vFRJoBBGEjUtsQW0pEIsyF02R7IkQkQoLQjsziqgIwX0LO2TwpcSk9PDjvchVVlX3YyLXAS5dFeI0Yv2bQs+l/8kFM+IhkDItyhxI0L8r9d00QjhHykQQC4wDJAomleUXtARWxoVbN2XElpzdU58avMId4C1nGaivCjc7ggV5vVORDEKIRtQ2tZxHZDwGNtY98Xr0LOJkH9csE4YqRt+ZdpLIpBBWm+rtwEoyY1hVEOD78OMhq4cfJoM8lujuFabCvufIMIQgbhOLhyFvIwDmJ7vivgvBzon9YS2x+NIUNOQ8mqU2EUISkVuT0TKUJwjVukkBuYl4+mY2/5DXMHvcQQd7TE4qzN9Tq188FQTyvqRJk7FUwfx0WbhBid85lt4cPQr4EYb8bnSLhl3GUo+EjzLw62Rfv8MAmI77Tla43f31WTH6bQhGXoH2PYIC+4PHwTlBVIlpopwX+9kBPXMT0CvRjsQk41yTW0xItqAH5/l6gisldLdU2ti8iWN8V3QtJuE8R0SX7zrfuyzd4npAkOEJfIUD4H6nGM3uKDD43LCHli3e559ME4RohaYRKn/uHbHa59JHfk4XifiFaWXEyg9tA9D6H+Cv0uH8lph4wNgwPz/2V83QtMTGVhuvOlyDSm5jU4zswJIHtiLQVh8AyTgeswwESoZizJDgI8Isg+mW5RdRM68Z3LGTiExjcKF77c6lD7NAnBPyRmfsNRO4dGtrjt/lIPTOQIMZWlz2nqyqWMOM4AB/NYJjdtMfDxzD/zEpF7wsbhdZvo00fbIwlAIs322syxhNZ44GdAD8PGA/DsG7Ybx88nS8hlftmn0/7NEHkg5rOoxHQCGgEZgECmiBmwSDrLmoENAIagXwQ0ASRD2o6j0ZAI6ARmAUIaIKYBYOsu6gR0AhoBPJBQBNEPqjpPBoBjYBGYBYgoAliFgyy7qJGQCOgEcgHAU0Q+aCm82gENAIagVmAgCaIWTDIuosaAY2ARiAfBDRB5IOazqMR0AhoBGYBApogZsEg6y5qBDQCGoF8ENAEkQ9qOo9GQCOgEZgFCGiCmAWDrLuoEdAIaATyQUATRD6o6TwaAY2ARmAWIKAJYhYMsu6iRkAjoBHIBwFNEPmgpvNoBDQCGoFZgIAmiFkwyLqLGgGNgEYgHwQ0QeSDms6jEdAIaARmAQKaIGbBIOsuagQ0AhqBfBDQBJEPajqPRkAjoBGYBQhogpgFg6y7qBHQCGgE8kFAE0Q+qOk8GgGNgEZgFiCgCWIWDLLuokZAI6ARyAcBTRD5oKbzaAQ0AhqBWYCAJohZMMi6ixoBjYBGIB8E/j80OxKJiN+5EAAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-47"><g><path d="M 51 101 L 51 144.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 149.88 L 47.5 142.88 L 51 144.63 L 54.5 142.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-48"><g><ellipse cx="51" cy="51" rx="50" ry="50" fill="#0cf232" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 51px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Suspend initiated from userspace</div></div></div></foreignObject><image x="2" y="37" width="98" height="32" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAACACAYAAAARZ/7/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmAHFW1/neqe5aAj0WURUDBBRUVl0hm7TgSQHZCkp5J2AUUEQRRkB+LT1FAffjkgWAUZV+SmYawGAEBcch0T/cE8uQhDyMisoQlIC8BhMlMd9X55VYvqa6+VXWruyfpmdz6K5muust3z73fveeehaAfjYBGQCOgEdAISBAgjYpGQCOgEdAIaARkCGiC0HKhEdAIaAQ0AlIENEFowdAIaAQ0AhoBTRBaBjQCGgGNgEZAHQF9glDHSr+pEdAIaAQ2KwQ0QWxWw607qxHQCGgE1BHQBKGOlX5TI6AR0AhsVghogtishlt3ViOgEdAIqCMwIQQxffpXm1q2XBNjyzgMsPYF6P0A/q3ULMLrYH4FoCQDv8u28rIVDybeUG+2flMjUD8E2mO93yHGTxzyeUN6aOD4etUQi83ZKcfRIQAfKpS5xgLvN5JMrKhXHWHL6emJv2ssS3eBsE/xWwYOyyQHfhu2LNn7E11+Pdo4FcuotyzXlSDa2o7ayojmLgDx18oIIXgkcsy4KwI+P5VK/DX4df2GRqB+CNR7UrlbNtUIoi0W3zNi0ZlmLnLByMii1ZOVIDo64tNg4EQQxtPJxNX1k6hNV1K9ZbleBEEd3X0HA3wtgPfWAE8OxD9EDpem04nRGsrRn2oElBGo96SaqgQhiM7kpu8y+CsAnotSLjY0tOTlyUYQ8Xg88uIrxiHM1o8B+hgTzskMDfyHssA08Iv1luV6EAS1x+LHEdMvAEyrB3bE+A1bfLomiXqgqcsIQqDek2oqEkR3d9+nTfDvAexQ6N/fJytBdMR6F4IhtBz2ownCe4bUTBAdsfgMMAm95faual4k4Jew6M7m5rFVg4N3ri3+LvST4+PGbjB4NsMeqJ0rmsh8bjqVEHphDprg+neNQC0IaIIoLJQ+dxBt3fHpBugBANtOAYK4HozjNEEEz5qaCKKgw1sEosMdVZlgvhgWfqxyArCPey/TqQz82HUC2eQXecHw6TemAgITTRBTAaOwBNHIfe6I9WqCUBygmgiiszPewQbdB2CrEhsDF2WSA/8ecudfVFP9BkCk1HaiK9ND/aeHLEux6/o1jUAeAU0QwZKgCSIYo0Z4o96yXBNBtMd6zyTGzxzAPGFlI/t6WTb4ASg9jRCeRo570unEi40Avm7D1ESg3pNqKqKkCWJyjGq9ZbkmgnAf1QAkWqKvHjk4OJirBs7OWN98Zr7ZcYpYSxbtPzzc/0g15elvNAIqCNR7UqnUOdne0QQxOUas3rJcV4Jgxs2Z1MCx1aqE2mf2fZws/gMAA6C3GZyLsHFSKrVYOBmVPRKBDXVnUe334s5k1WrsCcs4EeD9AezmuDt5C+DnAeNBBi3adSfz0UQiYQaJVnt376EE3F1S0znM7gonqyNAOBWgTzn8S4T9+UNMuGrXHTmjUo9XO0SfXng58nki63gw9gPwAQDRwvurCXgchBubI3zn4GDiX0H9Eb9LHKXKrF5mzDhiO6M5ehyBjgSwR3m/+L8ZtDA7us19K1ZcnVWpz/3O3jPn7xplcz4sLADRxwpjlGPgfwG+kbPNvxkZueXNjaFiCuMH4Z7gTuc14YDaNG3tAQQ+BaDPOSyK3gLwFINvtcZzNyxffsfrQZipOLK55TKgzLL5p1K+X3k9Pce3juXe7gFoNsDdFc62gPJck7XFty8EJUdJIcORpuhsEE4A0UfB2K5QrjDRfxag+y2yrh4ZSvyl2jVRlLcpZbkmgujs6v01E05ygF21iilIoBuBIDpi8S+A6UoAn1RrL68kA6cOL0v80U9AvAiis7NvbzasG4Wttn99vBKEr6WHEg+rtav0FnXOjH+RLVwVXIf9zVsFP5UrgwwQvAjinXfe88/m1rVngCDuqTZ418sb/hpAJ6ST/b9TnWDCWTMSzV7EhFMcJCcrvVR2e6z37EbxpPYiiLzs4ZcK45QD02WwrO/5jZHKAr4pCKJ6Z1vvuVZvggghY7bcMZBh4hNHhhJPhpmfIeqZMFmuiSDau/qOIrJVQsXHBPG56aHET1UndBjAnO9WewIolhHye2rvivcS0XVV+HqYBFw0NrrNxV67YRlBEBlJWNYdEvNhL8jEruXsdHJA+KMEmgaLkwlFjIsYfEaZYYDSgPCIFTXnjQwuWeX1uowgYJqHIBK5EECvUjX5l3IMPieTTFwW1K/CCXRAncAxysxfNoh2K1jR5WtU3EGq9qGmEwTz4US0K4BLw8geE+7JtvCRXiFsGpEgOmbO/ywsvm39Ce+Dqti63pPKSj0JIr9pw+Iq2hhqfjaKLNdEEDNmxnePWCRUQrs7BipHjIW5bPZClaNulYKAkAt8RTVhvvfx9RBqnmGAXsuvK7w7A+2SnfE4Mx+bSSX6Zf2tIAjg1wC+QHnVS/F5lYGHCPRmQTi7JAvGKBGOGR4auN0PV6GqaJ72xpUAf7XiPcLrxBAqnn8I4mDw5wn4hGQ3/iRZuYOGh5c8J6tLMilfBPhxgA50vD8KwmNg+nNAXYHqw87OOR9gI3oPgD1d7TEBXgkYGfF3SX/WgDEMwsGl7xqIIAAsAjDbMdYmCGJsUmAa85E5XwcwFYIoLNgFhzLrPQAdAqC5gNObzLibiN4p/H+Uo/RfmcHFz4r/q5TvHCef8RP3mc8x8Ehe9gvzjPA5h0rHWVSFrNjqquw7p4HoI/kXrS8CxX/bf1i+XpX1WKkQtpanU4lr3HLtsw7kAH4aRCNiTAB+LwgzJe1T2kA3kizXRBBirDq64ueA6EeSRUKEzXgIbPxmvNW6v97B+MIs8LIFTPX7gi70FoDmFMth4CkyjPnpZYuFUJXt1u3Ft3XNsSASO16HCoWHx1txkAyHgKO8IJ8zdtmJB5z3DGICrsvRtwm4wLV4vwrDOCC9bPGfPEiCOmLxs8D2mG0wKQavtBinv/99eMh9n5G/L2j6AQEnu04bv2+J8jzZvUTAru0tAi5ujvJV7m+FvrXJsq5h2HchGx4fk2c7OOQWb9zEzH1lnwAP5Aw+efmyhFhQS49Pf/LvNBZBFNttMuNOK8Jnu/tj31NFcTaYvuuSBU+Vb9gFPOwldZjye3p6ouPZ7Re61NUmA7+yxrP/7rHRpPZYbyfZajeXyjfAPL4aPwiPRdtzMyzu9Z5/CfsYZPzSddoQp9b5mVSidOfolM1Gk+VaCQLT941v3TRGtxLjII8FqfjnVcT0oEW8JBuNpFcMLvpnwPu+P6su8F6FqH7vuDjfqVDWq2TRrOHh/if8GtjeFe8johsdO67x9buggzPJgQfd33kRhCAiGDQ7s6xfXHLJHg/VF93aEl19nMyaTNJvUe6DMLkvnU78n0+fZCFVTCIcPzw04FQz2kX4EIQgsCPSyxYPe9UlZKplHSXKSMLH5LmtK76fQXSX80SlEK7FgygbkiCE8+kFu7wPl/oYI8j6YzLzHNliFGYBF+M0kQTR2dn3STZs45RSNAZW9KeKxeLvzTF+C1DbBnnikQiaDkwmb10jk7GwBOFBYLZ6MpNKCJWmp0p37574jtGcOAVST7Et4k7CGs8eIiO+RpPlmgmiuBisy9EVBAgLJseu1G+5ESG/8RBAiWpOGKoLfM0E0dV3IBGLUCL5fjEeamniw4OseexdXQS3O1UqxLhkODVwviJBrGEYB2aSi0cCiFR2ivMiMeqI9V0B5tMcZfqqilx1U3t3rzhJiFNL4ZGfjDwIQuyCT8ukBsSuz/fp6I4fANiLflGlITV5FpN3LLfDDQALa6jCEHlPQGelBaugXxHw5bLGNNoJgvkuYY0VZBjQ1rZgB6PJFBuQkhEFE87PDA1c4ga7kQiio6vvZBA7ZSKUsYvkLvQfVjQ30+uOLCxBSAhMSVVUxLyjI/5hRGxVvEh7IB6TiI4eHupf7ByXRpTluhBEoZMFixi6PMQlYREfcZx8xCC+aOcdcZ+KyeZGIwiXCSrgvztxDnh7rPc8YghP8FVCx8ngu2Tx9qUniBBe5LYZnGWJQGofLy2SkgiVkjsjzxOA1+otKUN6MpISRAjHx+7uuR/MITJIgLigLS78FfkKJO9JJ59Xf2S71wZTMXmeAioXfUGW298KIF76zYPsGokgOjv79meDhQr38wB2IcY1so2U1xiGXQtCE0R37/cZ+F7YDYizvZ2uMgBe2hLdMj44eP264nuNKMv1JAi7n0WbesA6mYB5CqaMrnFXM9kMKxRu4VL9Pm+1wPcD2KbE/gFWSV6C7PV3CUG8SRYfMDycSKuW1d7deznBJqPiclohgO1d8cOIaEnpNBRiwS6Wmt/llC9CspORxwlC2ZGyu/vIbU1k73WqDmQJbdx9YuCFKMyeZPL2Z1Swk/WnkQgibH86u3ovZsJ5k4kgVMbJ7x3VuVwsIwxBSIm0ivDgbV3xboPssERbinbIxrURZbnuBOEcSDsQ34vGbpZhCVO9eWB8RtFUT1xwn7fLjviZ12kirFBUSxBCH968Dvesd3rpLC/DJrJrORJJZAYXC0ueQNNSdYJQP6UUy2zv6ptDxCXrJXF/wdnITGfYE/fiQcAD/9qi5YjH77/p7TCTtHAyutiPjDwmllTdIatbdYdbsSBKdmZBfasIGdNYKqaHDV53aCp1t3CGC3xUPWlV8S1WOJF3EIGdCngh7FoQhiBku3pmOjST6r83TLvbeubsYuSiyxwWnxUn70aU5QklCDeARcJgwr4w+LgAwvDV84UVimoJQnzXGes9mhnXe96v2ClUcT8Di6pJn1ph5lqFR3oQHtKdMvhvgCGc+EI+LIh+hoMgKi4FPQjiW5mhAWHdFfgoLmDU3tV7IxGOLhYoLqeHUwMioY3y0+6+Z2oggghL4psLQdjWhebbezHTHGL0geyIBsXH1yw6DEG4d/4AxoV6CDBCGdkw8xZEOKwssCnT0ZlU/y2FRjekLG9UgnDPWHFJGG1d00YGnUsMEbaiGN6hNNBeF7VBC2LQ6hDme9v0bNobl3PebyDoEj5vd0+4IUeRxY8sW/xCUFskjnLKO+1i2ZKdztsW8wEjqURSvBPaWSio0eW/VySPUVzgPWtR+b5ux393roMGIoiw6q6pRBD5DWVuO8to2RPMO4N4BoE/7wprIZOhuhFESG/yULPGmaioUWV5kxKEE8222IK9DDYFm7rCWMhNNsMs8LJRC/u9HYPpZRIewOISPkxa1SeI6IKdd7SWeqnL/GIxqUqcxFtX6LxKl7qaILyRrJAFTRAVYG0sFZPt32NiHpi+6eGgqTIlNEEUEzvVKMsNQxBi1GXOKF6XdGEXeLdUVfv9hoBpOEeYhwfE+ylWK6y0bmyN8uky81hNEJVzXp8giooHtcBxRQQn6wmihg1YDoxVILzPYRKtCWJTEkRPz+xt1uWaxSXlvus9YpsA7Mh5fZqwkKnpkYT8lppRVrvAFxtX6/d5tc3xrety73QTYS4YIgzBLn6dJ9DC5ujq090ObPUgCIlDX3B0zSqsMVQHV2WB9ytL5ftGPZa7+1VLLKbNQcVkRx/YYu0lYJwZoMIdBXgVg1YYoCHi3MPr1m33VHTamr1c6VAnkiB8c3Grzg/3e40qy1WdIKb3LHhPcy73B4D2KnbUywksLGCSha5MVVKvBb4eBOHuWx4XEeeFTxCxlCQWW1IHNjdBVHPRKjHHfZkNmlX0wt5r/2O2fNc7Y3c4vZOrqUd1PFUW+FoJQnxfkZOkiiN1xUVkFWX49UUTRB4dmZmy+Lsk6oD4sx2qhy261SJKbxGd9rzTZ8CJd9i5HOaS2m3AQMA/YfF+w8OJDbGbVCdFwHuNKMtVEUQhPlGiELyrOPpKHsZBWErMwaSCFbRjDqqnwnIFCAwIF1RmmdC2HbUVRbNXOS1s7ElCqLDkqbwIq3SiCaq70puUHx+PRmc5Q5pUhGdX9AoPqlv2+8YiiAqT2yr6VIGdJoiKIZ2oOwgPM/KXyOJ5qn5AE0kQnZ3xz8CgBxh4TwEUZcfFsPOmEWW5KoIQHa/0DMSoxXz4SCrxQFhgyhZWl0MJAGmIBcmuLFT2OUm61AqCKITLECEfZomNDgPbM/ORRcugoH7aJwrTvAeMvUvvShYfiaXEP0yDZ7mDsvnUJ0JgXOMMGcHAdZnkwIlO/wxJSAOluFKueovhOoS3bt5DnK0HWpteu92pOttoBOE2UQXKTk5BYyQiL7mxC6vWCapDnyC8TxASM9LQ3v1uH6D1wS3rpmKSaUsQIspBUTaEx75l8D3rw9SI6LSPEfA3Yr4qlUo8X3xHsmnd5LJcPUF0xjvYsD0Dt9owQXhwvBWza4jcSh2x3p+B8U1HmRU7YfGb1GqHaa7KPYj0BCQRqnroBVWOjRKCCBUuQh5CozLWSy1B0YrjIYkrI054V2SSAyKvROnZWATR0RHfGVEaBOPDxcqd5oNBi7c0ZL0+QWy0E4QkjlLoRVESX6xuBGFHrK6MX/Y8TJ6VTieeDpKvwu+SGGaVG5lGlOWqCcIrLO16M9XbYPLJAZFBpbh2xnrnMuOmsqicwIXDyYHvuz+QhWJQZfaurvkxi6yl5eQm33W4vRv9IjG626jqTSyztWbgfwwru//w8B2vBgihhFQhDXZWSF+6CESHO8p8FcSHpocSy4OE3Y5qmdvhCrZTXpaeUYZxUCa5eHBTEESNE1g2cRsr3HdIsppsVkzuU21YHX+YuVza5MR6rwfjONUNhSzCKkBXj49ufZpKStx8Xg1LbKZL0WoBXtIS3fIo171KLWQ0IbJcNUEIcOUdF7/wSiLj//nZ/jsXk3x2Mzq9EBBrmuM3P6auVA0AgYtdYQcsooS6E8tIdx2d0pOSmnBILt/KnNdKR8uKgIDFX+imlqj1dZ/IsdTR3ft1d7Yxvx20TNgVwoqLBslDi3tEGt1YJwjRsK6uuZ+wKCLiZQlTx8LDg7koFjwymHjFg/i8swSGXJSDiHUqqpiCYkSpjr9ErQIwn5tOJX4SFL6mMDdvKx93ezR81c3uU32QgY3HxiowU6S9RnbE340IiURh+zo3VV7q+EaT5ZoIQnS4M9b7FWaINJduL2jx84sM3EzES1oi2acGB+9cWwCJpvcs2K7Z4j3A1jFgzHcEwyviGJgdrSL4nP0lPUOG9RV3Hmjbf6H1jV4iFqEeZI5uUoLwOCl5Jm8pCIWc8DwWUz9vTa98tiJfLTVlf+RO4hN0wvE4BYhmrwHz2bBwqzustE9uXE9CVl0gvBbXkN97JkGS5QS3nbHkyZbyzdEEUTEsEqMQ3xwVquPnkZVylIAL2eQrZCHO80ml+FwGi5AqsnXH02JKdKwysCWehMmH+6mMfDbDSw3GWalU4q8u0HzyvftuMBtKlmsmiPwlX/xMAgnGlw5W0A5L8rtSHmJf72DGszDwsFdaRhHjBiRyEtvxUewF0gLvN5JMrHC3R87q9lsmGC8wIVNIhxgBW58D0cdkJq5eqhwJQWQBGA6b8LI0k4D1aYCmS/AOPEGJRksT8mzotDMNqEjP6FXXKBN/OzOUWCgbX9UFok4EAd80qsAqBpL2GBF/KjBopCaIimGRXtbmyfR1ML3B4FyEjZNSqcVD4s8hxl+uGsm34C2A/wwYheRcLDZ2ImjmDq4GvsbAurLQ8D73kRJjDVGcuDx+CSCRJW5Zc9O0U9xmtTIVuKMdJRlj8FYE7CPNJ09IR5GbOzS05GUv2W8kWa4HQdhi0tYV33d98ncR0M5xzK+CGoDXmPkbQZmaiiVXl+g8r36ImvRjhy7S92KrvbtXOAWKUCAOPaJy/14ig48ZXpZ4SPaFxMx1kEAPMyCSCymSLj0Dg+b5pBotqzpvXkhXAxDhQ8I+gQnYQywQ0rqr+d7+Jmf8AuBjQnRIpD9dyIDYjW5rf6cJQgaf7K6r7L2Cs6wdfC7M+IXISilpFw9xNHKskbXOd6Ys9UqUJArw2fAVyveOptwZix/MTOKeNC8roR4esaLmPK9ERs6iGkWW60UQdt9Ep8ZzdAID3wGwcyjsgLUiBy1nmy4ZGbnFTk6u+ogjZ8S0LiOyk7v7BdPLrTdV/UVrlM8Xen2XLjLQD6K9Z/5ulDOvAyimELRPND9HRLdnic72C9pXQRCMh6xc0xFGU24mwNcGxH6y8+KauaYLwuLmCG9wqep4eam83GMVZoGQjXO134cL2cAryTKONQ3LKvPE1QQhnXqy9JnOF526/LDjV1D7XUyAuFNT2RT9nRnn7fo+vl3EOKs4Ffj7w8jS55a6EnS/Yqu4mC9l5rmKbX0LxD9EDlcGZQV04tkIslxXgih2zk4atNrYg9g6BEwdgJ0HYkeX2kUcH58HjAfJsO4ee3vbIRWLAB/SoLZY/OMG6BtgO+H9BwqDJ3a7zzJoiWnQr5wLdViCKNZtE5JlxgmYC9CHXEfe1et3sH8nUL+ZjfQ78zF4tV1GEMW0pvm7hvGT1t+tHOsIXjYK5pUwsCgK82a/46oKyRaj6kZgnMhkdQEkQoYUjQXscWLQUgt07fJk/9+CLg/D7iDrSRDFsuzNikmzmfmUsuiftkqElzNoYXZ0m/uEzOlgfXnUvDydneOTNygxvsxsnSRRpZYSQoUlCOfcipqmKPtgAHs4Eo7Z83h9Xpb7GbRo153MR53BLyUm3G8abBxSVHnJZKwtFt/TYBLpc4U6yKm2khqTuMsQKV4jTWYfg/tc64AJ4BWAHweM61qi1r1BKYr95ummlOUJIQiVRUm/swEBP4LQOGkENAIagU2FgCaITYW8o15NEA0wCLoJGgGNQAUCmiAaQCg0QTTAIOgmaAQ0ApogGlEGNEE04qjoNmkENAL6BNEAMqAJogEGQTdBI6AR0CeIRpQBTRCNOCq6TRoBjYA+QTSADGiCaIBB0E3QCGgE9AmiEWVAE0Qjjopuk0ZAI6BPEA0gA5ogGmAQdBM0AhoBfYJoRBnQBNGIo6LbpBHQCOgThJYBjYBGQCOgEZAioAlCC4ZGQCOgEdAIaILQMqAR0AhoBDQC6gjoE4Q6VvpNjYBGQCOwWSGgCWKzGm7dWY2ARkAjoI6AJgh1rPSbGgGNgEZgs0JAE8RmNdy6sxoBjYBGQB0BTRDqWOk3NQIaAY3AZoWAJojNarh1ZzUCGgGNgDoCmiDUsdJvagQ0AhqBzQoBTRCb1XDrzmoENAIaAXUEGpIgurriH7UM+iYsHADCrgAihS6tBrAKoO+nk/1L1bup39QIaAQ0AhqBsAg0FEFMn/7VppZpa89h4HsAoh6dMZnp0Eyq/96wndXvawQ0AhoBjYA6Ag1FEO1dvV8jwpWOE0NFTwj4Jyzeb3g48Zh6N/WbGgGNgEZAIxAWgYYhiOk9C97TnMv9AaC9XJ3IAXgJoBzA4lTxLysb2WdkZJFQN+lHI6AR0AhoBCYIgYYhiLbu+HQD9ACAbYt9ZcbNnGs6dWTkljcnqP+6WI2ARkAjoBHwQKBhCMKdNIeBF6Iwe5LJ25/Ro6cR0AhoBDQCGx+BhiUIAA8bvO7QVOrutzY+LLpGjYBGQCOgEWhcgmA81NLEhw8OJv6lh0kjoBHQCGgENj4CmiA2Pua6Ro2ARkAjMCkQ2GQEEYvN2SnH0SEAH1JGinBDemjgePF+T0/8XWNZuguEfQrf/z1KudjQ0JKX22LxPQ3GTwCaBWAaCK+DeTmDFmZHt7lvxYqrs151zphxxHaRpuhsEE4A0UfB2K7wrrCmeo6YhkxY1+TWbTviV46z/Ir7FcI5maGB/xDvxOPxyIuv4ABmnAqiGaX6FNosMBg3aTYznwLQpwD8GwATwCsM3M/EPx0ZSvwFACtjrPiibPwYOCyTHPitShHVfi/wWrUae8IyTgR4fwC72WOcf94C+HnAeJBBi3bdyXw0kUgIPKp5aEZ330cM8AkEHFZWT35s/kqg/tx49pbly+94XbWC9ljvd8iWzfxTxKyt7aitqCl7HgFfBrA9gFEwr2TCzdZ47gZZHcJvKNq6pi0C40QmjgH4QMl/yG4jniXG73NENyxP9v9NRQ682ifamm/j+EnEOBpEHyvgboLxAgzcZ4F/Xou8if60tr4xnQlxBn8BhN0c8080YRTgVcRGCoxFY2Nb/1F1DsrGZ++Z83eNWGacgLmuuS7U2k+B+Xe5SOQ3jyxb/ILq+Drf81lLRgE8C9D9FllX14JZNe0K882UIgiY/AVEaDaASx2LRjkeTF9Lp/p/5QYpLyzWhQQc4+Ok5/zsRQBn77ITDwQtQl4E0dnZtzcb1o2APdn8nifYoN7Msn6x2IuHOrr7Dgb4WgDv9fnQZODG1iifXm9VXbULfLGt1XzfEYt/AUzCT+aTakLOK8nAqcPLEn9UWRyLZYoNBjFdQ0C7Qj05Yiw0c00XqFjbyRZgMoxVsPg2gD8or4+XtkS3jA8OXr+uOP7tXfFeIvpPADsrtFEQUYaJTxwZSjzp974HQSxVlDe7Hgt03PJk/1Mq7RLvdHTEp8HAiSD69wB5dhf5GoAzVOag80Mx15uYL2XmuQpzPQfQAEzrG+l04v9U+iSINBLNXsSEUxTKVx4blbrr/c6UIggCrmDgx57kAPzDNHjW8mWJfziB7JwZ34ctugnA+0ICrLQAywgCFj1NxNcXdv0q1T5JVu6g4eElz7d3x88kkNiFenmbu8u7DSYfm04nxM6lLk81C7yz4pDfU2FBvM5nbL36ZRJw0djoNhcH7Tbt09zLdGqADHnUQ8/AoHnpZYv/FGYBJuAMBk4GsKfHdyYRHT081L9Y/G6foF7C2SC6yM+h1KOsUSb+emYocYMXYVYQBPPhAnMiCoP9GiI+Zngo8bsgYWvrmbOLkYvcBlBb0Lue+ICuHhvd+oyg8S1srOYBfE2IeWdXy+JEYdBsxyZN2pz8pg+Lvcnes5dibp6dTg78IsxmpkrMlD/bZATR0zN7m7FcyzkA3p1vLYvj8ZccLX+Jgd8TaIM6iK3l6VRCDK5MxTQOYMwx8K8y8BCB3gasdnuXTrQwPdR/unMAOmOmjklYAAAU70lEQVS9c5khyKGopig2wQRBEEkKTGME3p3zO0qhxil7mHBPtoWPXPFg4g0Z8m6CIMLdzHZZQpUgHhPglYCRyQsjf56AT7gJgBi/scAPuiarEKwUQM8weCuCrXIrllvqi3ORUZYOnxdDLvAVJYX5viMWnwEmobpy90s4Sw4DJHaS8BmjcWY+NpNK9Pt0iTpi8bPA9CPJwitUV38GjCfs74k/BcZnJDLzKogPTQ8llnvV416A8+rPkhqzNJaFBaYLwN+tbGTfomOoh7zaah4mZAiU9xnybuMahnFgJrl4RCqrlSqwnxDwVad/klBdwcDDYl4A1qcBmi7ZrARiMX3f+NYt6yjBwH6utthqUgDLi2MLsDgpdwLYQdLucWZakEn1L/GTbZ+5nmPgfwn0qD0XvbF7wormDhwZXLJKVo+PnAon36dBNJLHjN8LwkyX+iy/DhCfmx5K/LRRSGKTEYQbYPciigArJskdxIbFEHQ5m9YFzh2zOFbCMLOPDCaE4NlPx8z5n4Vl3edaeEwiLI4gd7a4z3C20969vUy96yfL5e6jMIEWNkdXnz44OCjuKsqeir6V/3ynafC33KeattiCvYjNBAF7OF4XE8cC0CT0sQRcyCZf4exnYYd5PIh+Xr6A8b0wMbdep4gwC7xsMql+39NzfOtY7u1bAJpTLEfs5sgw5qeXLRbhVsruV4Qeu7l1zbEguqyczHl4vBUHeZG4x+LxGjOdmV239YB7d5qXP5xWUIs4Nxe+i0gFQZTA4RGORuZnBhc/W/yTqGPUxPuLaiFbp93cdD+Az23AlAdNAye45Uf8Lr5fl6NvE3BB+QJOt7ZEVx8nlVUXQZSPnVxlZ7erpek/Ie4mNgTWFJ9Wg4V0PhTbIQJ5mkQLCfiiq22+8u0x1231YC6bvdB9x9PZecT2iDRfwczzyvpE+K/00MC33HLX2TnnA2xE73GdBD3LF/P0+Zewj0HGL12njVFmnp9JJe72I7uN9duUIwgGrsuObnNy0HGzoPdcBCJxhC4+Ssc8j2PxqMV8+EgqIbzBFQmCrh4f3fo0r7Z2dMcPAOguAM2uIkeJcMzw0MDtHoJCHV3xc0D2brj4vMwGzQo6IqsKnuoC71We6vftM/s+Thb/AcBOhbJeJYtmDQ/353fzHk97V7yPiG50YDfOwMGZ5MCD7k/E5iFqWb8H8HHHb0/C5MPT6cTTvvV09+5LwC3OTQYDF2WSA0KfXmEcICMIW30RNb7kJAdZne35uoTapigPTzhPF/WQBR8CG8xFscC5wXLW56X6EpuY4eTA991ta2tbsIPRZIqxKN0nEVH/2DtbH6M0dyMk1GTxYrl+jrU9PT3RsdwONwB8pHOuM/OXM6nEgNdu3d5sTHvjSoDFCar4VMifKH88u/1CJpwUpnzx7t498R2jOSwCqMfRl4w1nj0kjPGD6rwN+95UI4g3yeIDhocT6SAgOjvjHWyQOD1s5RgYz4ntLq+jI/5hREgsXO8v/cZ8FywscO/SPU4QgZO7u/vIbU1k763Qz3rsYpxtlCysngtkEFay31UXeK+yVb9v7+o7kIiFeikf8l3RP8beAERwO0AHFttAjEuGUwPnV4xld++pgB0ksrQIBKmKnGV0xnqPZoa4TyqGpZfedYlvZAuw1yLqbmd7rPdMYvys9HeHVZ/fGMoWY2Y6OpPqF8RW9tRCYGJBbZq29lcFS6xiuVI5l5CdEvEXC23rincbZM/fLQt/W0sW7T883P+Iu0+dnX2fZMPeZGxQUTKfm04lxD2er4WfbPPgHi9J+aFURZK1pOzeqZr5Wa9vphhB8EgETQcmk7euCQKovbv3cgLEfYSvIPuV01G5sKyxwPuNJBMrnN/JCEJ1UeiI9V4PxnGO8qR1uNspC34Yxgw1CD/VBd6rHNXvK7ELMcax3vOI7TEWOUQeY/BdbjNcoQdvXod71pscCv22/fidAGT9kZUBD2s5yQL8tsV8wEgqkQzCXPJtoiX66pEyVZGrLOro7hUENgvg5xj0GGDcKLuH8DhBnJZODlwV1D7xuySmmsnMc9wqk87O+Gcsg75M4M8D9AEiSjZHVh+t0Be7GaryI96V9OkvOcP4kqr5amd37/cLKQjsugl44F9btBzx+P03vS3+7/7dtuQKeQJwlwFUWK6pwF/3d6YUQYjgfpnUwLFBuwLZztxrd+mHeHf33A/mEBkk2EmNCqtLpRmthCCUd/OdXb0XM+G8De1QWyBldzSTkSDyViEs9O7bFDBQtkpSmS2SBU2JgN1lV44TpIu3ZLH6hxXNzfS6+CzbaHT1zSFip1ox0CpJBYOyOirvIDxPQ7KyC3dGCYAOcRDuFZnkwBlh2+L3vipByNoDoivdxip+dYnTChHdSsCrAB4lokHOWXcJTYF0njn8nFT77D4RNUosuqlFEIoDI3YvMOgBBt5TGEDlBds54HLh2+DMV3y3lkCElQuK2s5iqhCEdHduA8srQbiWI5FEZnDxc0GbAq+J2t7VdxQR3+xYzJ7ibGRm2HDy7RWLNz8+Ho3OWjG46J++C7CiykyUMWNmfPeIZas1d3eUKSzg/gTGL61cdGnYdrtxqeGUUiqqPX9yu7j4B/eOW3XRrAdBCPUaNZnLnMYezDQ3yOJJtY2STWJVCc3y95rRZY6xrWpNUm236ntTiyA89KoVk6Dysq/qC9yKnaNkwktOECWv76CBkphFlrzJ/b6dKgQh+ijR8Zd3PW8qej8Di7KtvMzLUkmGl2TnX2leHTRIecIqM9P22gHWuABTe3fvDwpWSV6tWgXCUmbc3hrdIulwrlPqRYUfBOH8zNDAJUofF16qRd796sl7Wr++h4XIYSA+quBgWrz3KXmlO8uQnBA97yrC9LH4ruQuZFyohwCjbGMQVDYzb0Fke+xvuBNVXM+Cyq7l96lFEIqhHuopwBUTXokg1NREYmA1QQD5VLRvXM55a5LSguAh+HmfEsINOYosDtIzS+54aplPzm/l91FuFY7iRXOxYHGiahqjW4lxkEJDhf39CjB+bWZzd6pYxVQ6yoXfbdeoLqHpPQu2azZzH2QLuxtEwqT3swyIRGIyH4gSDDIVqltFWe+MlAEm7ApD5P0KK2pEaqok4GNNEHmAlHf0bjwlO8KKssL6eDjr0ASRR8PPByVAxp8gogt23tFaKguJMtkIQvTVttCK4jQwfTeER7DwockwjG9nkouFI5/UescvFpPqQhT2Xqdg/XQAMc4CYe8qvOXtpskIop6bQVn/NUGoSkWN74VdRGtRodRTaCR3BBWWVGH7pgnCW5hKiwkgvPBFeAaVcCOeIVEmI0EU0bEd4UzMsxfWfDyvoNOV+DQnLOjGRrf5iczfYCMTBHXOjH+RLVylEI/MKRT5AIFknyhKToqaIGpchCWf6xNEvU8QKiqmEBeT+gThLfTCSGBd7p1uIswFQ1jN7OI3RWTe7hUEEVLlE3ZKVjueQfXYqqd1NJOABSDsLwnjULbAMvg7mWRig09F4dd6EITEx0F2Qhemt1/3DayZb1MhJz1WgFj4Nz3cEtnycdN8c1t3NGgZQbjVXRtBxVS1NiJojDfF75srQbg9Uqu+pHb7U8gsNvQJolK0Vc0Uw0yKvO+H9cX1Ib9PAPAFWawktxd2Z1fvr50esBNhcVOPE2EYHISpflfX/J0smAeC8BWPWElyBzZ3LCbCtzJDAyJsifKjYtHV3j2/jWDdWxbjKR+T7E8E4zpYWNbcbD3rFYVYVX4m+pLa7chZbwJSBn2CXtw8CaIyfENVpml77X/Mlu96Z+wOZ7AxEVBvODXwlbJFobv3UAI2xFaZAieIWh3xJKbGUh1ytXJv5y6IZq8isuMDlR52LXgS7+SnkeOedDohwrnX/ZmoE4RfQ/PewKYIK18K5yACwzHToZlUv1ikS4/kBBHah0Hiu1MWrtwj9MVbAJ2YTvbfpmKyrEwQleajCGvmauddydGS9VZyH11/QvurBfwlCvPyZPL2ZyRyLHUMrLsgbaQCN0uC2OSOclOAIKR3QCHsyytCaEguGQvhMkQ8KpH4qZ2B7Zn5SBWvYzF/bBIzzXvA9sVn/nGpkCTqEGGmeHg6mRBhHJQf26uecCGYXwHRo2A8ApOvrQi7UqUVU0dH/N2IGEKNNjMfnRiRCKyDxSKl0si2mXM/YlgRkRejlD9CZiUjuVPzDXLorlvFcUzmYBrWe13iQCndYMg2cQyEIj1Je0ve77KNUlhHPIGhCNdhGXzPehNmoVJ7jIC/EfNVqVTieZXxnah3NkuCEGBKQm2Ecr+3y6j0OlULtTFVCSKEzbwE/4oJrrLYBE2MoDuGWoLGFesW0UyN5qal5QmG5A6N1Z4gOjriOyNKg2B8uFBvqJ2qKpYSmVaOb5Zf6CpinFV8L/Md8Aqk6DW+kjA3nifQyjAWCIyD5qy3M9Y3n9l2piwYAZQ5QVJHrO8KMJ/m+OZ5mDwrKNCj432Zf0vVau+gORHm982WICYkWB/kIbWn4h1EXk2w/a3OiJrw6L9bIKWBDj3MFN3qijBxbjwWRbfjF3XEen8Gxjcd7QyKllvWpfbu+LcIJFLIFq2ITDCfXMxd4ny5WoKQ4a0a/VTULyEYqapFGovJIwilZFxFZjhXhOTKOSE5tYVS8YaRHw/SAhSD9UnTCrhCdbR1xfczyI667Aj77h+p2YmdPBQ5L2mJbnlUWEfHMIu/yrubLUF4hPvOMficTDIhLuU8ozx6hfv2iuM+FQnC4wQVuLDmVSUkEvfs6xZQuaNTZdRdQG3ySUJ+SwPjdXXN/YRFERHvyZlRUCScOkoWHtw1uTthWXe4wn3/j2Fl9x8evkPE7iknkypVTPZCV7GTVU4wUxH+PYSnt6g6MP6VHRp7i7WXgHGmgyiliXw87p9Uw/R/mCN0GwGfrpAfDxWnV7jvgJD5Xtn7KrQEHmtJIGYF4pbNB8/UASqLej3f2WwJwh4cj4RBzLjTjBhnur1whbPWCy8ZhxOxSMbjSk/qvWhNVYKQWIgIWNeI4/Yu70O/0zFtQ4IUXOFl8y4jCNuLeos3bmLmPofgm2KMrAifLUuUIyYsRej0QgTODbs6n52w5BQgqhtl4CecbbrMnW/ax1nNlySrPUGIxshVWfBMSiO+sS/rm7I/onxa0w1+Eh4h4z2iudok4TUvCiGxr1ivOxf54EuP1wnHI76W8FX5FWebzpXl9s4nJYqeCqazvJwD/TyPPTYBo2D+QUsTrnRbS3kmQfLAzWMtEfvMpQbjrFQq8VfXwu3jA6K2AaonEXiVtVkTRH5XVnvKUWEaOdbKcdWUo6o5DaS7dEU7/VocCVUFzyNRSvHzYsrXNz1SLD4JUNKZjMUr2qzH5LYXLVeqzQjY+hzIdhpzp5D1TYHpkRym2JfylJTgDwIQ6UAr0tQy8NPW6KsXeIWtroUg7AVfqs6wm1me1pK4BcxtAIk7C7czYTG/uQhyWPZICEKk/DUc5OLEQiQ4zKfzrXTS86yjMO/cOTSK7RCL9kqQ8d82KYk0uiI9L9kRk52OgG+BsNpxJwMOuAPzmeujIDwGpj+LOjxT/gbMc5/yRd9WMZAUKWF9UgMLI4p0FLm57myWqnOy3u9t9gRhL8L5pDQiQ5XIexvmEeELBsZb+RS/AHFT9QRhL1hVJJ0vJoAH86HEEElb7McvHHlBb12WuS3EQL1EBh8zvCzxkN83HmoS1WpEDvPLxt/Z5jy/jGi1EoSww2qPxY8jJpHc3k1QKm19gg3q9cosKCGIRQRaqxgHqziSI1bUnOcXwjxkfC1Xv3glWcaxVoS7nQmUVML9d8biBzOTyEG/rQpYG95h34x6xfeqL9+eAYG4hWtz7W9rgihgKI7JTcyXMvNctfANvJIIZw0PJUQeWt+sVFOZIGySaDtqq0g0exETTgnALq+maOLTROrKsF677T3zd6OceR1AMeWwEkS3Z4nODgra55hKhaM/ibzjpXSY/lNNnqtZ9k0dCMIutmDmea16G2112a852/RdmQqn2FZZ+3bZkU984RXj68QsQnj/mw8Wb4H4h8jhSpXc5/n84WvPAEGkZ/Urt1jla0IlBAvXiPIrLrsJSj4sYq5HTOsyIlslFhSeJFSfREPDryUIXUftS79aCZogXDjZwmOZcQLmguijjpAFIl/1s+szj93PoEW77mQ+Kgv+Jl0UpqCjnKyfeez4ZALPAbBbYYcr7LqfA+EBC/zzkaHEX4qEGpYginWWjRHoQ64on6sB/juB+s1spL/a/Ah2cMDV2BOWcSJg7QuQSC1bXMRGAV4Foj8wG9eHkoUaLqklmNOM7r6PRJmPY8KXQNjNIa/idPsKAU+CcONYC/9WJQy6H4EJ57QsN51KxH1gOyeFWFzfAvjPYFxr5ZoTfuTjtSQJfX+0uekoBve55pwtO8Q0BMaisbGt/+g8nUlMlE0iHD88NFDK7+G3DNp3J6Z5EogOBrCHY3ztPgkZyo1nb1GJgiurR7Qv0mT22f0ql1N7bAB+HDCua4la93p5jKst4xP3VsMQxMR1UZesEdAIqCJQrxOOan36vcZGQBNEY4+Pbp1GYKMioAlio8Ld8JVpgmj4IdIN1AhsPAQ0QWw8rCdDTZogJsMo6TZqBDYSApogNhLQk6QaTRCTZKB0MzUCGwMBTRAbA+XJU4cmiMkzVrqlGoEJR0ATxIRDPKkq0AQxqYZLN1YjMLEIaIKYWHwnW+maICbbiOn2agQmEAFNEBMI7iQsWhPEJBw03WSNwEQhoAliopCdnOVqgpic46ZbrRGYEAQ0QUwIrJO2UE0Qk3bodMM1AhoBjcDEIqAJYmLx1aVrBDQCGoFJi4AmiEk7dLrhGgGNgEZgYhHQBDGx+OrSNQIaAY3ApEVAE8SkHTrdcI2ARkAjMLEIaIKYWHx16RoBjYBGYNIioAli0g6dbrhGQCOgEZhYBDRBTCy+unSNgEZAIzBpEdAEMWmHTjdcI6AR0AhMLAKaICYWX126RkAjoBGYtAhogpi0Q6cbrhHQCGgEJhYBTRATi68uXSOgEdAITFoENEFM2qHTDdcIaAQ0AhOLgCaIicVXl64R0AhoBCYtApogJu3Q6YZrBDQCGoGJRUATxMTiq0vXCGgENAKTFgFNEJN26HTDNQIaAY3AxCKgCWJi8dWlawQ0AhqBSYvA/wcO/kj32m27bwAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-49"><g><path d="M 101 201 L 154.63 201" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 201 L 152.88 204.5 L 154.63 201 L 152.88 197.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-50"><g><rect x="1" y="151" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">GPU driver shuts down clocks and sends SMU messages</div></div></div></foreignObject><image x="2" y="172.5" width="98" height="61" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAD0CAYAAACB1LEoAAAAAXNSR0IArs4c6QAAIABJREFUeF7sfXmAXFWV/ndeVacbkG1UNkHAUVRcRg2kq7u62lbWCCGQUNUJEIKiLBJhYEBGYFRGkFFHEQZEEISwJd0lIQiyKvaku7qrA1FEjMgoqCBLkB+b0Eu9984v91VV59Wr+7ba6K6+9Rek7/qd++5377lnIaifQkAhoBBQCCgEJAiQQkUhoBBQCCgEFAIyBBRBqHWhEFAIKAQUAlIEFEGohaEQUAgoBBQCiiDUGlAIKAQUAgqB4AioG0RwrFRJhYBCQCEwqxBQBDGrxK0mqxBQCCgEgiOgCCI4VqqkQkAhoBCYVQgogphV4laTVQgoBBQCwRFoGEHMnXtSS1vba/9iwlgAwoEA7QlgFwCRwnANAM8D/Bcwfm4SfvruXfFIOp0W/x7q19OTfNtEju4A4dOhKpYWfoGBV8FYr2m4T5/I3bN+/e0vBW2vI5G6AYzlU+UZD7a28MKBgfQ/grYhKxfrSi0g4Kf2vzFwRHao/85q2q1l3Vgi9WVifGuqTcLKkcH+E2rZR7GtRvZVj/GrNvMIyL7Z6bauZ6Os6k4Q87qTe2smfYWAJQC2DQnyKwxcbU7mvhNmc64RQTiHqhPRbTmicx5at/ppv3koglAE4bdG1N+3IDAbCKKjI7kVNJwIwuTIUPqamSD/uhFEe/ux20WiuYuYcCqAaJVgvM5E5++xi/mDIDeKOhFEcQovE/Gy4cH0z7zmpAhCEUSVa35WVW9mgkgmk5G/Pa8dzmz+F0AfYMK52cH+b88EAdeFIDo7e/dnDasBfo8HCAWVEk3ky3CrQ+XkrGqA8CPofNbISHrMC9w6E4ToehOIF4wMpte7jUMRhCKImbABTJcxNjNBdCRSV4FxShHrWU0QnYnkYcx0E4AdyxcfPw7Wvq+B7sxkVj8nWMFexmLav2l7mRo+S+CTALyzvA26ZnJs+xUbNlyTc1vcUoII+QbQ03PkDpOTrbubGp9OwGedtyAGsuZk7nA31ZciCEUQ02XznQnjaHKCKHmPnLUE0ZFIzgOTeCzdqXRR8uMm4/R374YHg6iIRF2hr6MInc7A1wBsZWtvjJmXZDPpkodae3+1IAh7ex3dSzphmrc75mUwY0U20/9D2QeoCKIxBDETNj81Rn8EFEH4Y/RWlKiZimnevKPers1puYuAmGMiP4HBJ4+MpP9fJRPsTKQWM0PcSKZIwu/0XmuCEOOWjQMetxJFEIogKlnvs7WOIojpKfmaEUSZuSEAAh6YaOPkhp+nX61i+tSRSJ4NpkvsJrFEdNzwYN9qWbv1IAjLAiGC2wCab+vzOdbogOy6vt87x6EIQhFEFWt+1lVVBDE9RV4TghCmrBGTfgFg76mHGOAJRLVDsgOr/1zt1Ds7j9rJ1FruJ+BfbG2lW6ObjhkYGNCd7deDICy1V7z3ZBCXqJTcbLUVQSiCqHbdz6b6iiCmp7RrQhAdXanTAFxhm6Knfr4SKCR9/JVMvXt4eM1fGkUQUic1puOymb5bmv0GsX/3kj2ibCyBiaUg+kBB5acz8LvN1mo3cm7OtaOjt7wmcAjjvOYsWyRcYSZNLbnzCgYC4k1rDMyPM+Fmc1JfWTQO8OurszPZwRrdC2C7gozeMJkPHc2kh8Ksw66uxe/RERkgYI9CvUkGDssO9f/cqx1r49O1+YD5WYA+AWDnQnlxsPkLMQ2aGl+7xy6cDfo+l0gs2lXn6CCAf7ba2qLqfKOzO5VgE98E0F4wrHgBwINMuDJMH37YzD0wuX3rBC0A43gGPmqbl2WdSMBGZu4zcvraID5MfgQh1oMWnUyC8DmAPjLlU0V4CczrifBj1vEzPwtHMa/2ruRcDfSAzZDmZRN80OhQeoPfvIPUD21F6eNI2t6+dOdIi9HL4F4QvR+Mt9vW0LMARgFKt0bNe6p1xHXOv2qCEAtlzjjuBqjT1vhjZi5y4OjoKrE4a/KLdfd+kEzu3+yFPcSM29qiWw8NDNwwLmu8XjcIKUG42DQ3yw0ihD/LiwB9bmSo72exROqcoJ7UMoIgTXsGJv/E3Uya72qNbpMU8vcjCNn6JMY3hzP954dZmLF477FEfPOWOjw82YbPuKlPLZVkFCvA9B/BHET5cdJw2vC69C+d1n3OccoIwsjlUpHWln8H40ybKtZeNRCh+WFifVsGfQOMkx3GI25VdWJcZegtFxQPEEG/WXFYyI3tcO+ctlfOAOGrAXCcWoNeGM4Ugti/J7lLi6F9n5kXB/Qlex3E34COK4IQpZ+sxd+rJoj2eLJLI+uEtk2xw0o+wCCDDVqmoQThEuaiGQhiipSBDwfEXliYfVYj2ouB/5qq43FCcm7wBJzBsDaffV36NOzvT34EIdoov33yaAQt84eGbn05yLx6ek5om9DfSAN0+NQaBy4cHur/uqx+rGfJXqQbqwESp/gwP+Hrc+nkmzuc52XGXUYQ4AGGliHwv7uQg9gvPQktyCA7OpL/hAj1ASJUTrgfC5WzRkfK3utES7JvlsDHcx7zVIjedAafmx1KX+pGEjOBIDo6ku9FhO7w+A48IOEBPYqlDw2knw+Bm7Ro1QQheZyu6Apf7UTs9etGEM4YQ4DrXGc6QXR2LtqTtejdkgVqAPw4oGUF5gzej4AP2U44L4MxDMJhlRAELJXB1BVaOERmAHqycJuIA/iT/XYahCA6O3s/zBqLN7Ki+XWo03SBKEX9XQtzclVJeOLGeJoJWQK9xuDtCOgCsHv52vf29SknCLwOQDiazrHaYvwZGv7X+m8TnwRhLwArRob6r6z0OxOx1OZs9eoVyPsn2X+W6g+k/QqAAbIcXuNg6z2yGGetUJ4HJttwpOzW5aKWEb5SRcxFG5ZKk0APu6y94rheZmjzs0OrR2XzrT9BnNA2kXtzBYjel+/f/BRQ/G/rH9YD9MjU2NhcP5JJX1f8f0t9N05pBg5yjP91gH8LaI8V5u+6hhi4Pje2w8leB40ga6FagqBYPHUjEY6zbQZ/hM49IyPpvwUZQD3K1IMgpKo0gutcZzJBiM2gdetXb2LmXrt8hFWarvHJ69eln7L/e8HE+T8pf/J3bArWPdU1WJ/M+i3fNo9yNLLEbuQg5Dpm4N2jg+mNxf6DEITsBsDA5dmh/jOCrL9y4wS+BwYWO6/xLh+2eI9ba0b4HCduApn2xNKPaKxfB9B+trEYDP5ydij9Pdn4JARRLDZGwL+/a1e+0vaeQV1di/c2DO2VSk3NReOxriU9BFMcGIrm5gYDl7RF+VsyvbcwXImadLVjkyu5/dnn5qO3d1VTWW9jpnk5gCNLsaJbW6MvLJcZsdSbIJwyc+4Ffo5yHfHkiSC62vYtjRHhjIk3d7hBsuFTR/eSj8E0b0TpTf81MvnQ4eH0SJA17lamKoKIx4/Y1qQ24Rj3yS1sWJuopdVMqh4EEYunTiGyHuJtG6D7IpzJBNEeTx6kkXW9nfI9Ica1bPLpHrpNmTlyXowhCcJSRwS0gAtCEGIInYneJczWG0JBfvzoZDR6wIaBVX/3Wmsu5s3S07iE7AwwX7D7bviO1wN04SH7BwAvs43lrzD4gJGR9B+d43MlCOavjGTSIopuSYSCar6lKSLuSl1GwOlbDoJ0xchgn/h/174Kh6q1APVsGcOW96OABCE2x2XDg/23uc1DSsweh7fpTBCyw0yQ259Qa0I37yNgnyJOYQ5BdSEI6UKtY2jnoAu9lgQhwn888yxOANH/OD26TeaFo5m0sIYo+81Ugujp6YlO6DuvBPgY20LzDCtSLCduHi1bvXJ1wfJoCyYhCYI89PtOoIMShMQUO5CayameYuDpKIyeoaHbnrSPRViaaC2GsGiyvdf4h4UptiFzNHXDwYUgnjI0PkBySwn62biW++jBy7Z525sTt9tvA0FDcXd0JQ8FrMOGpf5yw8/1BhGQ9Jz9AHiFTDp4eLjvIefEpjNBiHVELcY620Yf2MKq2rc22QKo6gYhMf3zPC1WvVIDNlAtQQgWf1Mfe3cUOJBhfklEYHR27afjm6kEIZGpq1pAJg6Jvj/sDSLUG1ZQghD3mFhX6jo7eQU5YZV/dPJbY/kNBa43ALdlHPSW40IQrn5BAT8b12JSE1QX8+6yzbhn0e6aHnkAIPE28QiDNraQedngYPrFADeIwKTXbvUTXefwxZLmSZnOBCGRbeDvIR8klQWRvwHgV5rA2+DLq7FoqoogJECHJojQNsP2VeVyMq2qzWBf00Yy9c/IfDCK1WcqQcTiySOIaE1RFeN24nODKX8D2elWAMmpMuFuEE+ZUb17dGDNM0FEEYIgIDlleppjS9aRG1nKyOf67FD/iWHUPRJylm4OMoJgwvnZwX7h/1CPX9lbo59VUthBuHyzgUnPhcQWZzN9Yi2X/KYzQUjV9qidVVJYuVRFEBLrjqYnCAZ+QwYfLdMN28GfqQTRGU9dzITztsxFrjP2WmixROpMYmx5YA1DECGj7oYhiLk9S98xR9d/AZBw7BK/SYAXjgylhZl22a9sI3HRa3d1HbOjgdw9drNWJpyVHewXppaBf/I3PTplJNMnHiynflKCqHNWQckNSYxnjIE7CLyyNbrNgJtfUhAApBt8CNJzqS/NuzCdCUJg1dmV+nohSKkduldAWK2ZtHJ8fPsN1VonBZGJKFMVQdTiDaKq035jbxCvgHExTL4yyJVthhJE2UlRPE4PZ/q/EHRBiXKxeO98IhbGC/kH4TAEAQQ+NVp9hUxvWkaA5P7YWt62vKxU1QrcB1CZl78XjgxuIeAQALsVy8l8iqpRQ4SRo72sRzDOYrG8CSqjjyO0do+dzSeCeoaLBsJs8LI5hKk/3Qmi4AMhzKrf7SKvMRAegUkruYXuyw6sFuus5oYJVROE5ERmBej7x9atRz16/01CD+b7m6YEIcIgPAvwMyI/tobInePj2/0mDGvPRIII85F5CVZy8g5u5hrSyCE0QZSH3pCqmSRmza6P2lJVq+/KD1hAgoeEIAI/ZAbsVVosHl+yn0mmeHCeIjCP9oRvxl1k0g0TE9v/0u/b8Qu14TfuMGt3uhNE4ZC1iIhvCOA9LopvAmENs3bDHrsaD4chZj9cq7pByE2ygpkP+g3M+XcpkYS5QYRUXYQdn7O8IghbrJswN4g6E4RkHUnVTOUxnNy9r2cLQYg1LvwOIoZ5KZHld1Du8yL/cITD44+jpF88OLhGOL+V/RRBlGPSnkjuS0zCsMKZQsFre3qFgO8auZbLvUKbBN3fqiII0Um5zhp1Oc3MeIJAuPAObgKUxIOqGd5hTmEz9QYhxl1mmSRRMzn1wF6mt7OJIIpyt4jCNM8gQPhuOBKEua6OZ0njZcPr0g8GOQAGNaUVbYVZuzPhBmHDh9oTyQ8S09kEHB3wRiGqP8YapdxCmzSMIGJdqQMJ+NmUm3++56rc+oOeMNx029WauQYFz6tcLJE6j8SbxZbfn6KkJ9xOUEH7LHsARu0IOcxHNpMJoswU1/H4LHl09vRKrXbDCSr7Yrm3SsXkMk6K9SzZk3J8CDReDsbHfIL4bYKmHTqybvWv7e2pG4T/KhA+WU+/oO1DBh/JhF5HiBtZA4+ZUX1+UItAWQNV3yDkDkLVBwYLcsKYzgThDM9AwN9h8kHDw+ktMVj810RZiZjToxWoCfEUOypTjYVU+Yh2ygI4TiMVU/60WWaKazDzomIa2/IAlPLQGkXMOjuTH4NGDzDwjsK/Gcy0IJvpu6cCEftWmWYEUTJe4SzZ1vbSPgZFlhGjtxAHqnROkhubIghfsZcVsFT85vgnwOYyMBa53OSqOqxXTRBi1BKzrElmPj6bSYvIjzX5zTQVk+RmVbIJVQKKy4P+/2o8viCT+al4FKz6V3bzqeDtpiw09jQjCGvNOkJvCMfHot+ChIQ9PzKJ9yv84u1UI6jpTBCOeVFHInUwGCJO0JQaSvhQcC7SbU8H0EiCCBN8USanMis9n1t82FhMlawNt4gPYY2GnH3XhCDi8cUfMilyv8O6wdeZLAwQM40g5F7m/vFrvDCReSkH8QYOg7Nk8bumVXVpt8xpLJSZa8gbS1grpuKYy0JvFNRMk63RCYevhO/8XYw1pAH9vGRRIBoRT2dHAv5gAr/XTL7eeetsNEFYGyqbh4OpA8BcBobbopuWyQLhSTfURPIEYrre9reyd7NGEoQEP9ewHPL5OPx8akgQsa4l7cTGZ4iog4F9CwcXkVMkyE98e5cQcK6tcFUahpoQhLB074gnzwVZeaOnfjXKSW21N9MIwiXo1rMaGwdnMrf9Loi0naexWFdKREy9wPbvVd9KnOPo6Ei+C1EaAOO9xb+FOQ3L0s9OR4Kw1myi93IwryjM0zJjZeZxR36TQH4ZkoyHr2msHZ7JrBaZ3wL9OhOp45ghTBuL1kFS09qGE4TDux4egfBkEw3yRvMWEwSYSep17ZyPy3ftaSgS5gZRduAJeYOXGLFMC4KAWwxzQRK5KB9fTfIK1+xc09jMVSwsmfdppaTZkUjOA5NwPrNbjNQ8c59k4xRTCRpXSJxgnCQWzlGuQTcIMSln6A3hlCb+3eZJHjgOlewWHUbWhVhC4s3CnpzpV8Zk7mBnys5GE4Q8PhdOGB7st2XYc+dAZ/gWAGXhVBpJEDLPd3g4TNpnFo8vSZhk3mVLYSv+XDOCkCRgC3XQkEQUrkoFXasbhIXhvK7efTTwnfaQswVwRWCuf4fBq4J4IRcFkl804qGLLgTwrrIlON0JovOonUyt5X4C/qV07PywSVhuz2vg9nkVdIsCAxFqfMfS2wNWZDP9Pwx0PA1RSK4y9I0HQ7F4MkVkqRKmwoRb3U7DNwgxLKejpxVGhdkE0ccL4w6T20To278Hxr+WQk03tUbNL3rlChYHINLociZ8Poh8G00Qsgi/eecsXjAymF7vtbTkIb/LveUbSRCywI1B5uOR5S0UQXhl3HRJ4RxIXS8L+V1tds+aEoR1Kute8nGPfMIinsgNmmlcOz7+9idk3pU9PUfuMKG3xBi0nICFbiZzDGRN0PL1Q31PlF8DBbHQHSB8eupvIa9qIfZTz6KF09NqyTx0JtxPzP8zGY0+vGFg1UtFd3nxsUxOansx8QIQHy+LJhvmdFrBXFxyO8jzJovxjuv0bwX1VzQokYtylb4hFPuotr5L3Jt88wFPlcWxuNwChFgfZkRWZIdWi820JCSCmzOUl3wbTRBifi4n5xeZ6czc+Pb9sm9ZzE1jrHQkQxqThclvMEFAcqsRAn+SNPMLzrzgVhj7tldTRCxia71T8j15EoTE6GEjDF7oFs9NomoUy+Zxk3H6u3fDgxJPaersTn6KTe1Hjjzu1ai0C2ekCnYPvyqFnLw3ApTwKfsCQIWQHCw2FuHCX77BlDbim5h7OvhB2IZMsa7kmQQSiVz85uYHbeHvPGpGjaOrsW/268gjxaSo+gwDQyJ1Jog/4mv7Pk1vEGIi5R7TU8h4BvJzw89FFVggHCud6jqAXrRSc25JB+pojp6EZh45si79W1k/bwVB5FWPshuSNcKSVJgAi020E8DO5eOX58hoNEF4hvgppmxlmtgciHBvznsyb1uciyBvEMaYcUTh37xvEPHek0HsvOkXwvmQyJa3bk7LVqcWgx1aKnUNq0AkDsilP8JLxPjV5gO0ldXRGh/hE7Y0vcXyIv3rV0YG0/9dTZymmt8giqOzVCPPkUg2fpkL6/rtUc6/vw6mH5p69CI/F/JpRhCWHAvql6scaqKwGAid+E9YN79YTfrIoJ1aOOqaM9OZX/XXCbiKARHgL68Sm8YE4b5RVB4yxucW7YOfIAc62ulIZq/0FhFE3lAk/HqY2rAYuLEtyqfL1G2NJggxqMrklFe1Rg36LzCWByEIFytPm0jLoyzs35PcJapjVWk2Pr9Pb+rvuvD8nxjb4Vt+MbD8WqwbQRQ7ttQPBo4mFqZX5Yl3/AYorlZM+FZbBD/x0uPa25mGBGENr/Bhn7/50fdzPt6mMlgeI43PcF5//fGrrkQ4oufHydSONzTT1DA9YzHJ0JBYIImgkxcOD/V/vVL02tuP3Y5act+gPFGWvsnIG3XNu+ws/lYRhBhHYT2csvk2fBGAHQLiI/LTn7P7rtzvFkjurSAIMfYQsaVEtNoftEX5fLEPOSyT/MLdUCyRXE5MP5CtBbecK+ImwRH6WiHVa5A1JKZU032i7gRhX0CW13VUP5yJDgTwCQJ2LYktQuIKzs8DWpaIB4zJyAN2Z5qAi1FuEvsWvUHIxixM5cb1N7s00HwGi3zeuzuu49b1M28Lz/ciGllTz5C+QXC13kUMOpKZTwXR+6eutHmZrWfQVbmxHe4VJ5bpHM1VNleJ41RNEr6LvizrvglaAMbxwq4dwC42M9YXNueh/hMYPzZy+lqntZKbXN5KgiiOSaggW1tf/RQIS5nMOEBiDRc3sTERCZlYyxgwr9PHdxz1O8m+VQRRmI8V70gDfQmMgwDsWVAHiyCDf2bQGkOjqx9at/rp4vxDEoRVLf8mQ8JMXbyN2tVvnlnjCje3+QCLJFztDlW8UO/9FdB+Ds28bvedsXHaRHMNsrGoMgoBhYBCQCEwMxFo6A1iZkKkRq0QUAgoBGYnAoogZqfc1awVAgoBhYAvAoogfCFSBRQCCgGFwOxEQBHE7JS7mrVCQCGgEPBFQBGEL0SqgEJAIaAQmJ0IKIKYnXJXs1YIKAQUAr4IKILwhUgVUAgoBBQCsxMBRRCzU+5q1goBhYBCwBcBRRC+EKkCCgGFgEJgdiKgCGJ2yl3NWiGgEFAI+CKgCMIXIlVAIaAQUAjMTgQUQcxOuatZKwQUAgoBXwQUQfhCpAooBBQCCoHZiYAiiNkpdzVrhYBCQCHgi4AiCF+IVAGFgEJAITA7EVAEMTvlrmatEFAIKAR8EVAE4QuRKqAQUAgoBGYnAoogZqfc1awVAgoBhYAvAoogfCFSBRQCCgGFwOxEQBHE7JS7mrVCQCGgEPBFYFoSRCyR+jIxvjU1esLKkcH+E3xnowrMCgTau5JzNdADAHYsTPhlE3zQ6FB6Q7MC0NOTfNtEju4A4dPFOTJwRHao/85mnbOalxwByfr/U5T0xODgmudqjZkiiFojqtqrOwKKIPIQK4Ko+1Kblh0oglA3iGm5MKfLoBRBKIKYLmvxrRiHIghFEG/FupsxfSqCUAQxYxZrHQaqCEIRRB2WVfM0qQhCEUTzrObwM1EEoQgi/KqZRTUUQSiCmEXLvWyqiiAUQczm9e87d0UQiiB8F0kTF1AEoQiiiZd39VNTBKEIovpVNHNbUAShCGLmrt4GjFwRhCKIBiyzadtFUxLE/t1L9oiysQQmloLoAwC2AqAz8DuAb+TcnGtHR295TUilWke5uXNPamltffVTrLFwrusGsAuASEHiLxDwKAg3TrTynRt+nn7VbSX09PREJ/SdbgWQLJZh4PLsUP8Zfqtnbs/Sd8zR9V8A9FFb3euzQ/0n5k3Y3X+SuiWOYLGu1AICfjrVLuHc7GD/t8X/J5PJyNPPU4wYpznmPgbgzwxaY2h09UPrVj/tN4da/12sgYhpJAlYDKL3g/H2Qh+vA3gCzD/TI5Fr/cZWLUHUan0EwWfevKPeHmmJHklEvQyItbBzoZ4lD4DuN8m8ZnQw/XuvdVGpo1ysu/eDMHktAfvYxjvGzJ/NZtL9bn3OPTC5fesELQDjeMe4DQDPE7CRmfuMnL52/frbXwqCRY3KUKxnyZ7QjUUa6FAG9nV83xWNT4JvifOZkKM2J7qcQMcgj+W2xf0E4F8x6Krc2A73bthwTa6SeVr969p8wPwsiObZvo1nQLhLM/n7mUz6D6LtpiKI9vZjt4tEcxcx4VQAUQ/wXgTocyNDfT+LJVLnVOJJ3dGR3AoaTgTRVwG8M4Cgxhj4Eeda/qNITs46HfHek0H8wy3/zqMRtMwfGrr1Za/2JUIU32KwuvFkl0Z0L4BtCmfF4ck2fKZIZm4EMa+rdx8NvJKAmM/cDSL6CevmF0dG0v8vAE5VFRHE0ML8HWZe7LMGRD86QP0wzC+5ja1SgqjH+nADRqx7asl9g4AvFA5DXhgaAA9yNPLZ7MDqP8sKVkIQnZ2L9mQtejfym2jx50kOVj8GfQOMkwOM25IXMa4y9JYL3L6hqhbPlsrU2Z1KsMnfBWi/EG0GGp8bQbz55jv+PqftlTNAEHtKkRTcup/aw/wOgcUGxIHumecotTkqwGU+e5YOpkthml8zI9jXEUlgZnpSi9MLmSxOKR8OKFBr8WpEezHwX1N1AoTa2L8nuUuLTjcycFDAvuzFHmONUtl1feIUV/KreDMqIxaAgL/D5IOGh9OPeI3ReYMixjeHM/3nF+vICEID/46ZbrKFn/CFgYHfaKa+cHh4zV98C1dWgDq6eo8G+LoAH1dJDyxuFBodWSuZ1Gt9yGDp7Ex2sEY/AbBbSNg2MXBsdqj/5856YQnClRyIv5gdTK+UbWAdHcl/QoT6ABwYctziSuwqr7BtOcuLG9+crV/5Jhhn2jQBoZr1W+sygoBhHI5I5EIAYgMP+tMZfG52KH2pH0mIPsd1upyA44POixjXGuCVGpHQIBRDzcw8gnBZoAJkcVp6HNCy4n8YvB8BH7KdLF8GYxiEw4IShEdfYld+CYx1AL3I4O0I6AKwu0Taz2qsLcxkVj9s/5u4as8Zx92bVQGdxX9npuOymb5bPFYMxeKpG4lwnLOMX13JQp1k4DD7puEkCCL8lNm6Nexk6+8ZBoYI9BrA7wSh23Zt3QIt49o5LZtOHRgY0IN+AUHLdSZSi5khSEuoE+0/S7VIIIG1AeKPgPExSbnHzKg+f3RgzTP2ymFJu57rw4lFRyI5D0wiPpJdFvl1T3gKQAZMEwTem/M3PeepdBM07dCRdat/bW87DEEIMozqWAVQj62NMfYgB2sT3urVKwA+yTGnMTA/DtJ+VZBVK4A4GHuXb2o8MNmGI73UtkHXjq0cdSSSZ4NhmZgIAAAgAElEQVTpkrL+rG+b/wBoj1nliVvB3A7Qe2U3VbG5uq11Cb5/A/hRgObbMQThETD9VoxFsncVi/rGBvPAW7QhDgoPim9Xtk4K37tQne9Q6HBmEYSl49361ZuYude+IAh4QNf45PXr0uJDmfrl9Xst/0mwrrXFt4ItBTxuEHndnXVaO8Sx+B5jwinZwf5hJ5O3J5L7EtN1ElWMdEPqjKcuZsJ5UwQBeL4lyN4fbHU93zAKt65fANg1X4cfnYxGD9gwsOrvxTacBGGfNwNZpsjJo4OrxCKeeusoXGWXb15U33dsSpvIpAOGh/vyH1mNfh3dSz4O0xRqMvtGaV339VzuQqfeurPzqJ0QmXM5Mx9dsgYI3x8Z7D/LPpcwBNGI9VGErL1n0e6aHr3HcWM2mLHWjPA5znVfUOd8DYx/LdnQmO8Qb3UjI2nxTmH9ghKE9XYwTmnHTVq0c87IUP8P3E61sa4lPQRTqKOKZG4wcElblL81MJD+h3NZzOtO7h016WpHP0J1edzwYN/qGi0jmb5dMMGTJpunvHs3PJhOp8WbQ8lPqPe0Fv0rAJ/tIArXtS7D19bo6wRcPCfKVzqxsNSnpnldmeaC6IqRwb7T3fDuTKSOY8YNjv3uWYBP3H1XPGCfV34/faWXGZe7aAhmFkG0x5MHaUR32E+Egr3Z5NPti94hV6+Tgms011g8dQoRrnAA3T/Zxid5nWRcr62SDSnWlTqQgJ8BmOO2advn0l72hlCyhXu+Q8TivccS8c1TNSQLzY0gAmAM2amebY/ctfiw84/7O68EWDzoFX/+D6PyU2zZRx2GIBqxPooT7OxKfZ2Br9nmbID5gt13w3dkG1mhnGzdTwK8cGQoLQg2MEFUSg6ig1hX6jICxIaW//lscKJI4Xa9tvSmwne1RrdJDgzcMF6DtUQdid7LwbzC1tZfYfABIyPpP/q0T7FEcjkxXWvfG9xu8B4EIW50R42sWy0OmtKfFHfCH6Fzz8hI+m/OSuIwZGot9xPwL7a/bYTBC73m5XE7nTkEIdscxKnWnMwd7mftIDbtlq1euZqAz5aA6nKDkKp/AvZlLXD5TedZjY2DM5nbflccQ0dH8l2I0gAY4uoqfm+YzIeOZtJDshUTS6TOJMb3Cn/bBLB4vBJqNPF7jjU6QKZbF59lrCslbjbF+RvMvCibSU9ZLBU+5hIrJvFveR1r7uDh4ds3eX041kNtBLfZr87MuDmb6Rd6UE/rqqAffGdn74dZY3EL2nJ7YP7KSCYtQrh79mFZu5nmfQA+OLVXARcOD/V/vfj/QQmiUetDjKu9fenOWosh3g6m3tsYuD43tsPJfpYt8s2Jbm2NvrC8qPrzu0EUrGB+APAym5x8bw6i7EcPXrbN296cuN1+Cg4aKbajK3koYB0GrcMTA09HYfQMDd32ZND14rrxSiwBAawYGeq/MkjbXV3H7Gggdw9A7cXyTDg/O9j/TWd9F4IQt78V2Uy/zUhF3rMTBwCvkEkHDw/3PVRGEIneJczWIbCoLXlNY+3wTGb1oN+8YvFkLxHduOWwatWYOQTR1bX4PToiAwTsUZhsqGundHNxIYjykz3KTl5+gMv6c56oZeauYDplJNN3dflCc5jGMh7cvH/cA8J3phYp0+Jspm+Ns65kk3nK0PgAp2pCdoMIcwtwqszAeLC1hRfKVAl++LkQZGk+D+D3uqYd4me+WmzLeRIXqsl/bN161KP33/SGtRkHzAfRqPVhkXY8eQQRCZkWP3pfPbQdu07HpiEefTkX6R4dXfWCKOdFEC767FCPpWW5Jvzf2azh59VqkQcAEm8TjzBoYwuZlw0Opl+sZO3Y64hHc4rQFxn4BICPgWBE2DgkDPl0JFI3gCFUq/mfy14iJQiPW4BzbpJ9TxqOXbqXoPQw4IWb7IA3owjC+aGEPVFIAXQRatlGF9CM1C4AucDKr8llqh8g3RrddIzzcdd52xB+E8yctputuvlSSDY0aR8SgvC80TgXXNlcakgQPT0ntE3ob6QBOnzLR+mtjy0jyniyi4huJUDchh4mogHWzTuK6smgBNHQ9eFU0YDvgYHFHirVkmmLDcaAJtSYBogeJiDDOt9WNPV1Iwhhey95XA5MDsVt02lUUU+rpGqJI0z9qgjC5RuX9S+9rUgSOkm0EfAzXPH9fmfSDULyUYbWSTpUNFLWd7kWB3JicwJe7utQfmUrezx2OV043h8sFRG3GL/S9Og6wLL8EPdw6Ym9/GQvv6VICOIpM6p3O6193D6ksvo1JAhxC6IWY53dMYtdbkxhPnR72SAE0cj1Ie3LRZVR6ZxlBEGEo8H4CAMXlLzBMb46kum/yE+d53WDKfxN+AndsdmSZmVrdJuBGr0rVApBRfWqIQg3dZRsIH4qwGIdyfukl8pZOufOzuTHoNEDDLyjUGDGqJjKzDvFw+lwpl84CwX+xeK984lYmArmr+uSG4TUUshF7ePXsURoZeqBIOanoh+HD4Ml/Dfb5vzVoeMtWxSS9l0XTpkfhEMd4TffuhJEufrHVRfrN063vwchiEauj0Ri0a46R4X++J+LY641KbroyNeLpzSn5V/QNz87vgVLwrs8nCzzpsmMPo7Q2j12Np/weHivVLS1qEfx+JJdWTM/zYxjAXyyxHw6hIqJCWdlB/uFP4PvLyhBxOK9i4j4tql1EvLbFfUkh7CZQRBSkCqwkCnbACRClX6UFeboDbLhCMGUmbs65lamXrGdzB11yx6fy24oHqf6ajf4aut7fS2dnb37s8b3F220gzoH+n6BtgJB5NXI9VEwb91yQwQMZlqQzfQJk9ea/HzMMMv7CGgUYK8Yjy/ZzyRTPDgHcfAT4VHuIpNumJjY/pd+D/E1AcHWiMDjTV3bLUK8L8DvY6ZPbLY03B/Anp7e+mEIIsR+EpggnHHmKri9u3l9T/uc1E1EENIHJom5a4n6TPb+UIzb5LwVlXlHO8xbvR6dq93gq63v9bFL1F81P91MA4IoWR9BxlPtBulDEMIXYALA1rZ+XmZo87NDq0fD9G3FyzLMS4lwZFDvXgDCWurHUdIvrscmVRg/tSeSHySms4lwhMzpM9A8FUEEgqlYiEKV9incRAQhffR1qi2cD/AOAim5JTitHOyWOZKHcm8zWkewvrBWSIogwq16CQGUyOctJgjhoX0pSLsVpnmX4/R/X2uUj67EOi0fWNE8gwBhNuv0CncD8FnSeNnwurSw3KvZr0LSEv0LIwexx22Jy6YIIpRcFEGEMJv081OIJVLnEePiggRK3hCcD5l2cim3bPAO6lftBl9tfa8V5nzPmQ0qJplpd4NUTBY5TL65w3lCxVMWBdlSdQWz4/eQqRU9lXJ8CDRe7hISxV5dGiok1K5kK5z3yOefbI74/B6fNl7Y7APyJwINmUwDuRbtIRF9oKpHaqVisti1pr+gAgmzydT7kdqpOvLa1JwmokVVkdf7Q3Gu5e8QeT21s3+/kOLVbvDV1veUXRM+Uvutj7fskZpx6eT4DucW9f8uD81ljp/VfPDC56Kt7aV9DIosI0YvCHuVtRfACzvIGOSe2lbNvzFwM6Ddm4vSYxsGVolw41IHzKD7UdA3BLdxB61fCxWsxKS25mrcuqiYRKOOU7SrSafXAikPN4GyUBsye/tKLKYKY7Z7PluRKe1OSvaxuj0m67q5k91BULbJu71DOIjD19mv2g2+2vqeBJGPR2R/sBV23lLHQK+PbVKnNQy8n4A/mMDvozAuKzpIBVHpNHJ9xONHbGtSm7C6ExYz1i+MiWSxTqwr9WMCDgb4Lwx6BNBuLL4hBN2ApGFuiPom3tx+WR0ekqkjkToYDOHZO6WG8vp+ghDD1IGq3ONY3Jh+BJ3PCupfMt0IwmnEASCUQ6XARnJjnUEE4TRR9Q4tIVsvznATrt6PjXSEKg5U4sloqZJg8rts8ZqkITLKBct3mbno57UWfc1UtNgA3pvVbvDV1vf6yGvhfyD5ACrS+TdyfXTGUz9iwue3YBMuJpHU0crmzRyUIMR71qS+8+UMFvlXir9JZj4+m0mLUN5lP+vQw+bhYOoQZrMMDLdFNy0LGuE3lkieQEzX2xoOvek5B+WSrCtQyJ5iWzLiDuNJHTTciOgvqHxkfkJuURncvjOJ1/7MIQipp2AIU1cRJTJikojjk3cqE78QoTaYaaksjIUb2LLEPn5hK5y6XuEJqYH3tUV8lfowSDbPp0A4C4wfb4nS6O92X+0GX219v1OgJGjdY2YucmAxbIRv/bKTY2lE2yA3CNGHLNRGvdaHM1SGeCANEyW3kENCBOfbroBPyRoKugFZ8+5Zshd0874SZ0WRryGqHSJLSFS24QQ4pNhlGFQefnK3/93FMzmUI2yYsD1h8JXNI0T98gNwCFNXl0CYM4cgxHZeVQTGrpQI+y08Q7f8Ghusz/fDdj7EMrCaGNtO5bDwELhTnUSEe5lxRHGyQdzuq93gq63v96FLNjsgoF2+PCZOaaiOoBtSnYL1SdeH7GATPFhfT3Qyt9NVjhtISaiOEBuQJR5ZOGkCXTUn+sLpzpuBPH4aThge7N8SVdhD6JITbSjPflnTMkfHMCpkt5QDb/UNQsxVEtjPIAqGdzy+JGGSZa1WPEiIJmcUQSAeX/whkyLCWcrmcMMDehRLHxpIP++y1igWT6aIrKtqaYIZj3wQsnDOTLg718rHVBLu2+0jso9ZsniFOZ1ID2qlCPV6ZC7zEi8FI5DbfbUbfLX1/QjCLdw3EZYND/ZPeZE627FyVjyLc0AkwkS4Br0LShDWaVoSDr5e60Ma7pv4KyOD6f/2CHshW/eTzptOWIIopFddBaKFNpxFyPUlzujALvLaBOIFI4Np4a3t+nN5SJbGEPNbN/a/u6gqXW9B9rrW3KP0PbCV7rUkv4xb5OKw+DrnEqa+i2xcswkW++roSL4XEStyrj2F7MwjiPwtQpYFih8nDacNr0v/0v7BCHDHdfq3ws2hPG91RQmD5H0JNOPx5PsNoqsI+JRD0BvJ1D8TIAVn+TVxS0PS94fin2VRH7dUDaa3rnaDr7Z+kA9dfkiAyE72n60tuMJpmy8scCKtLd8FW1n4tnzUdUsYVPv14RLnXyTeudqczH3VGe7edd1XkTDILptY15J2gim8uYupKV3DwrucTF9kpjNz49v3yx64ReItjbHSkSN6zGReOJpJPxBknXiVkRCu2DYeNgnLRwfTG511CzleDgPzJQB9QNq2y+0+zAYvazdsfZfcDq+D+BvQcYX9ET6f7EukUiaRulfm5T6zbhACQJ+UelvSYbqnm9wiB5+c1DVKKRnoxFQclORaXfyT5y1AdjIqVvR7+5jqexo7ytk/Ho+Uo2NBUjcKZ8KJNk46b4JhbhBiPI1cHx5JXYKmWZUeUsJuQAU5iINMmcqWgYuyQ/1fddxqhEWSOHWL7HbO3+sA/3YqtadIYQuIFLw7lxelaybHtl9RC4upwolZvEe+29GPAcbTTMjmU+oiAjY/AbJIwa59EB7mTxRiZBWSfeF/NR5fkMn8VIQKmfpViG9V9T2+D5HBLsugp7zSBduGP/MIQgzeAl3XnElM/A4WApyrGNb1MH/y8SEIUaQQl/5WgBJ+HZT/nR+HFjnGmQfYqx3Xm0CAB6dy6xqrp8CWH9XeAKqtHwbfzkTyMGYSeamnTrHB6rurJMMSRKPXRwjnrhIovEJsV7qBudxqpAlqKvxei3MQN6Ub26J8eiWe225rohDcTqTmdObu9ltGrzPR+SIHuMZ8ny3yqfR9pFJ8i4OosH4x651IBevM2+42P/FesZqZ956yfJxpbxD2mRVyIac2bxCXlbi8S6fPj5OpHW9opqmBxBU1MEGI5vK6PToNBLEwigm9vRaSIKNvz4ny98MuamneA5/3h+JA5O8Q3t7T9klUu8FXW9/vy3T+PWSoBOk1295mJQTR6PUh8iJHormLmCDMTcvVpqUgWbm6Db3lgtHRW8SJuOxX4QZktVO47Yo80bZNiAcm23Ck83ZW+F5P2Txm8Q4U5BsSXYi0mufsviv31yPCa0ci+UkwfuiqNnJgycBNhqZ9TSSokrwXSlXA1eAruq+mvlDVEZPIJBnz+bYs0su1mjfOGaM1IHy6UH5m3iDskxUAThp0JDOfCqL3TwXbIrwE5vUMukokPxFX0yDRXL2ALJyE5gOcLFyFdynotcWVUzySD29OAZpujZr3hCWGko3aGZnRCm1QniLUOVbZ7cMZvM9rftVu8NXWD0sQxfJWOlHD+DyIDgOwj+1UaKkwCNSnT+Zu8UtNWylBlJz2dK3u60P0J95WonNajmVwb8m6FwHumB/frI29U49ErvXLtlfNBuSSytdg8JezQ+liatwSsVpWQK2vfgqEpUxmHKDdbQQzBvAzxFrGgHmdPr7jaC1USl7rqjCHQwuxoYR6q/hNi2ovEPAoM/cZOX2tY/2UWVWSxHGwGnyrJQhRXxDz089TjBinAei2zc9aJ0y42ZzUV4q5ScY68wmi0k1F1VMIKAQUAgqBtwaBmsdiemumoXpVCCgEFAIKgVojoAii1oiq9hQCCgGFQJMgoAiiSQSppqEQUAgoBGqNgCKIWiOq2lMIKAQUAk2CgCKIJhGkmoZCQCGgEKg1Aoogao2oak8hoBBQCDQJAoogmkSQahoKAYWAQqDWCCiCqDWiqj2FgEJAIdAkCCiCaBJBqmkoBBQCCoFaI6AIotaIqvYUAgoBhUCTIKAIokkEqaahEFAIKARqjYAiiFojqtpTCCgEFAJNgoAiiCYRpJqGQkAhoBCoNQKKIGqNqGpPIaAQUAg0CQKKIJpEkGoaCgGFgEKg1ggogqg1oqo9hYBCQCHQJAgogmgSQappKAQUAgqBWiOgCKLWiKr2FAIKAYVAkyCgCKJJBKmmoRBQCCgEao2AIohaI6raUwgoBBQCTYKAIogmEaSahkJAIaAQqDUCiiBqjahqTyGgEFAINAkCiiCaRJBqGgoBhYBCoNYIKIKoNaKqPYWAQkAh0CQIKIJoEkE26zQ6EqkbwFhenB8Tzs0O9n+7Gea7f/eSPaIGf4aIFzOwL4BdAEQKc9MBPEvAH5j4ASbtrj12Np9Ip9NGmLknEot21Tk6COCfnfUYeDoKo2do6LYnw7TpVrYj3nsyiH9Y9nfGg60tvHBgIP0P5996epJvm8jRHSB8eupvhJUjg/0nVDumWCL1ZWJ8y9bOyyb4oNGh9IZq254t9RVBzBZJz9B5NiFBUGd3KsEmfxeg/UKK5W8AvgqDV42MpMeC1PUiCFGfmY7LZvpuCdKWV5menhPaJvQ30gAdrgiiWjSnT31FENNHFmokEgSaiSA6OpL/RFHtB8x8tO2mUIncHzMpcuzo4KpH/Sr7EQSAdGt00zEDAwPixlLxL9bd+0Ey+RcAdlUEUTGM066iIohpJxI1IDsCzUIQ+/ckd4nqWAVQT40kvAmadtTIutXDXu35EUSt1Eyu6iXrmqJUTDWSecObUQTRcMhVh2EQaAaCmDv3pJY5W716BcAnOeauM+F+MH5IBv9qcnLHTRs2XJMTZZLJZOT5542dchzdD4RTiHEwgKi9PgNPIKodkh1Y/Wc3TP0IAoBBRMcND/atDiMXe1lP9ZIiiEphnRb1FEFMCzGoQbgh0AwE0R5PHqQR3QFgq+I8xebOFEkGUROJOrGeJXuRbqwGqN2OFTGundOy6VQ3FZGUIAgvgfH2Le3Qra3RF5ZXqmbq7Oz9MGuWemmnQpuTAOZMta9uEDP2A1cEMWNFNzsGPtMJoqenJzqh77wS4GNsEvsrDD5gZCT9xzBSTCSS79QZdzpIYhOZdMDwcN9jsrZkBEGEW5gtq6Hie8FThsYHrF+XfirMeIplO7pSpwG4ovD/LwMYAHCUIohK0JxedRRBTC95qNE4EJjpBNHevnRnajHWEbCPbWorRob6r6xE2NLbiIfpr4wgGPg6gfezWRxVrGbq6EhuhQhuA2i+NR/Gg0w0QuDzFUFUIuHpVUcRxPSShxpNsxFEV3KuBnoAwI6Fqb1hMh86mkkPVSLsuQcmt58zjrsB6izWZ8bN2Uz/8db27PhJCYJwLpn0aqnPQmVqJol6aQUTtinxP1AqpkpEPS3qKIKoUAziQ22doAVgHM/ARwHsXGhKODI9T8BGZu4zcvra9etvf6nCbmA5EunafMD8LECfsPUjzBL/QkyDpsbX7rELZ4M6UZVtGo4PWDhwRUw+mcCLAOxV0J1b8wKwjglXhunPMXea19X7Pg38OQIvBOi9hcfXMRAegclXwMTtRTv/Sm8QjZKPn1zbywniFTLp4OHhvof86rr9PdaVuoyAL+TlQUyMdXNatjp1YOCG8aAEAaI7HWapFamZHOolS91lRvgziiAqle70qqcIIqQ8rA3boG+AcbL90dGjGZ0YVxl6ywWjo7e8FrQ76+oexQow/QeAbf3r8eOk4bThdelfyk6S9vpuBCHKjOt0MQFfdFrMlPfPj5OpHR9mo7OIxzAvJcKR3n4AW9oOSxCNko+/PPIlOjuTH4NGDzDwDludilVMQfstlnO7QbRFtr7c4dhmMPOibCb906B9lKmXwPfAwGKO0pcUQQRFcXqXUwQRQj7C0QkR6gNwYIhqVlHLJFGjI7Pr+n7vV9fNYsWvnjBZBOHSyTd3OK9oLimrIyMImMZpiGg3OK1kfPp8nZlOyGb61viNrbM7+Wk26SYAu/mVLfzdsvMHmycFDbXRKPkEHL9VrKvrmB0N5O5x4LqRTP0zw8Nr/hKmrUrKuhGECFfi9F1g4PrsUP+JfgeM4jhk6iXxtlIW4kKpmCoR3bSoowgioBg8bNnHwPw4SPtVfoPmVgBxMPYuPyXzwGQbjtzw8/Srbt12di7ak7Xo3cjH5rH/DDCeZkKWQK8xeDsCugDsXt4WXTM5tv0KN5JwbhoM/IaACQDzbG29TkCWQU95zwm/1zXtkIfWrX7abU4dieQ8MN1pM4MsFtUB3gBov5H1IUiV8iaZHcUKbrGYGiWfgMulpFhnV+rrDHyttC4/bjJOf/dueDCoarCSvr0Iosz7mfBH6NwzMpIWIT18fzL1krCmUgThC92MKaAIIqCoYl1Legim2LiLtuwGA5e0RflbsiBk87qTe0dNupqBg2xdeFqLWHrzcUo76zBjrRnhcyRmiNSeWPoRjfXrHHF9DAZ/OTuU/p5sej7OUy8y05m58e37nQTTnlj6UY0NEbfnw452XVUm8+Yd9XZtTstdBMTsOIBwszGR+zfn+4zALWKSGLdQQ5X93AiiEfIJuFTKiokbIXTzPoclU7Hc3xi4GRqtrCQYn9+YvAhC4uAWWM3kpl4Sb0eKIPykMnP+rggioKwKD4OnTxUnumJksE/8f5nlSLFMweJkbWl4Bb6rNbpNUvagKIk+aYD5gt13w3e8TpmFh+wfALzMNh1XW3sPgtgIgxd62edLbzgeKoRYPHUKkWUjX4xS6jsnj9sA3AnCeritq3wCLhVpsVi8dxER3+DznpR/qAffrpnm3ePjb3/CS1UYZDxeBCHqV6pmKlMvMZ0ykum7WrSpCCKIZGZGGUUQAeT00YOXbfO2Nydut5/sGTgiO9Qv1Caev46u5KGA5UVreZa6xb4R9vJai/Hz0tO5t6rI3rHspE7AhcND/V93DtCFIMZM5oWjmbQwyfSZU4ljlCj7lBnVu0cH1jxjrygzySSivok3t1/mt/HJyRVSgmiEfPwwCfL3Ct5hdAZ+pzF+Zkbo5kpuGH4EUamayaFeeo41OqD4vqYIIshqmBllFEEEkJMsZn3QMMntPYt21/TIAwCJt4lHGLSxhczLBgfTL9q77kz0LmHmm20n7dDetuVt8KOT0egBGwZW/d3el5QgPG4BToja48kujeheANsU/iaNsy8p9xqZfOjwcHokAOxwkqtFsBKnsEbIJ8h4g5Rpbz92O2rJfaNgpjoVeiNIXQCvMHBdC+nfHRxc81yQOn4EIVEzTQK8cGQoLeQr/ZXjXXorVgQRRDIzo4wiiGByolg8dSMRjisWD2OVFKALinWlriPgs7b2Q1mUiHpdXYvfoyMyQMAehXakTlnS8AuMbw5n+rd4v3oMWtKPlCDKVWY8GkHL/KGhW0U4Bt+fzALIRcVUb/n4jjVsAXHji7TMORpkfgmgD4QM/y1uFj9oi/L5svcvv8OAE8OySKw+6tMy3w6bekn0rQgi7GqYvuUVQQSUjeSEL2qOMXAHgVe2RrcZkL0rBGneZSM8KzvYf2mQ+sUy8fgR25rUJtRen5yq5/h4xb9LT5VMi4OYq7rULyOIfAyinW4VgUltpHd5dqj/jDBz6oynfsSEz0+14RJWop7yCTPeSsrm35DQRaz1MrEwoZZYppW3bFmfGXy015uR3w3C2tDLczk8ZuYiB46OrnpBNh8HAZSolxRBVLICpm8dRRABZeNijWOvbemLidHHEVobRl8sOZGLdu8DKJSdPINbCDjE7mtAkpuBZNMwmGlBNtN3TxA4JPXLCEJGVkwITXqxROpMYkxZY7k9UtdTPkEwqWUZQRjjOsWIsBgMkaHNizA8fSqCEEQYNZNEnVeWcEjdIGq5Gt7athRBhMA/Hl+yn0mmeHAO4uz1OoC7yKQbJia2/6XXo6wkHEOIUfkUleT3DbLBe7UapL5LkLhAD/v2vmNdqQUETHn3euWkrpd8aieMylqyDBjm6MeDSdy+3uVsRTi45cZ2OFm2xoIQhGgvqJrJuVZlb3GKICqT83SspQgipFSCh4soaVjkD/5xlPSLZY+LiiDchRCGIEQr9ZBPyCVSt+KF8CvnFMKv2JMHuT7+ByWIcjWT3MDBvvm7WeQpgqjbEmh4w4ogKoQ8H9DOPIMA4XtQTJTi19qzpPGy4XXpB+0FFUHUjiCKLdVSPn5CbfDfqSORPBtMl9gftmWqRDGuoAQhUzMxcFh2qF+YXlu/IOolUa5qgkaMbXYAABzvSURBVKhVnuxE6sslMaGAP0VJTwS1AGuwXKdld4ogqhcLxXqW7Ek5PgQaLwfjYz5B/ESMoUNH1q3+dbFrCUFIrYKqH2q+hSAqIq++gtR/K1RMLmOuWj6V4h6L984nMi8AtF0AFibBj2s8viCT+alQP1b8k/mXAHIHzKAEIQYjcZorMSoIol4KSxAAyizQvHJYhwHNaeAARRBh4LPKKoIIDZl3BeEF3Nb20j4GRZYRoxdkhcsu/TnMCCURP0M9GoedQpANvlqCkPomeCS2cesvFu89lsjyD7F+Xm8QQXCoRD5B2pWVkWy4T0dh9AwN3fZkpW0W65V59rv4sYQhCD81k+Nm4BoePMwNwiKUfPjyKS94Kx1rLtLtZkUVBDtpnuwQvj5B+pgNZRRB1FfK1JFIHQzGjXY1lPMDkGUdq3YjrHaDr0F9mW9CaN+OznjqYiacVyuCcFJ1EPlUukRiXakDCfiZLT9z4FhHfn3GEqnziHHxVLkaEISXmqmc8N0TDIUliLIHcqDqG3RHR/JdiNIAGCLfSP5w4ZFYyQ/v2fp3RRABJG+drNg8HEwiquhcBobbopuWBU3yHkskTyCm621dlXwA0tNOIbZ+MXFOgGGiQDQiKNyOBPzBBH6vmXz98HD6EXv9RtwgrJOhw0QVkD98us0t6C2k3vIJgr2sTCHw4C8AK7JvcZe6AyaWhpGrs22pj4nL5hfmBiH6cW7WxbcNh3rJM+hkWIKQeNyLoVSVM0PmhQ+JT1Clsp0t9RRBBJB0LJ48gohEzoN8wLmQYZGDvDE4YtuIXl7TWDs8k1k9GGCIVpHOROo4ZoiAcMXAeJPOh0ZRrlEEIZn3JDMtDeqQ19mZ7GDNCumx3dT+KlFTNUI+QWVgL5ffyHdeCfAxtn+fZObjs5m0yCtS0S8eX/whkyL3l5hbu2x+YQlComayvN910r9ge/D1zD4XliDm9ix9xxxd/wVAIjOj9RNOgJqZO3h4+PZNYUESasTWrV+9iZl7bXWrvpWEHUczlFcEEUCKEkc2gwgnDA/2T+nGvZop28Akwe1kHz0BD0y0cdIrf0Sx33zMp6hwdLOH4v6VMZk72BlSu1EEUR4S2vrws+Zk7nC/NKwuH7n0DaIR8gmwTKRF4vElCZPMu+wkB+BlZj4hm0kLr3fXaMCyBl1Cwrtu2GEJQnKbfYOZkgQ+G4RP58fknb86LEFYhxtpzozgwSrtWHUmUouZIZJT2WJd5bPdVXNzq3QNzOR6iiACSM/lJLgJxAtGBtPrvZpwiUpa5n0qvrqOROp7YPxraXt0U2vU/KJXzB2xEZNGl9tDUojkRcxYkc30/9A5vkYRhOi3PZ48SCMrmq3tY/WeUzKZjDzzLM4B0UXOGEXyYH3Sk3qt5RNgpZQXEWtnUt/5cgaf6virTqAfRSj3jaBml+2J5L4aY6Uj94dgmIuyQ/1flZFNWIIQY5S8CdwurFwB7CjWFREdNzzYt9oNkEoIQnorynewVte0070SUhXHYX0HETq9kJzJHggxcKTiioTcxJUUQQQUrstJ0DW5jrU5yj9o18XqcgsQ3/3DjMiK7NBqQUYlJ07RBzGJQH/2hDzCPM319tFIgnDL7SBuEkx84uhgeqNdBFYQu9aW74KtwIhFVdlUEbfH+0bIJ+BSKSvW2XnUThxpuQuM/SVt6CB+kEA3kcnrxsd3fK7oES1O85OTr+3MFDkQhFM2h43/uBMTv1tmJQQhic20ZdgB1KuVEIToINaVPItA35bIXeTJ6Adp17Rq4xsHBta+UhxQT8+RO0wYLfsw0yKCtWbKPM2Bym4ilcq7meopggguTZcTvtXA6wD/FtAeyzfH7xS3ZgA7lzfvvVg90nOKtw+RfnMdQC9aKTpNfFJqRgt6Epp55Mi69G9l02skQYj+XdQi4k8ih/ZTADJg0hm8HwEfAmD3Ei6Zgod1V0PkE3y5lJYUiZZMLXoHAf9SaRvOenk9vb7QK7d1JQQhN5oorOwAeasrJQhLrbjVq5cx+KSQ0W29IO2fbOOTgqhpayWXZmpHEUQIabpkbgvagkhRemNblE/3C9Hc0b3k4zD5JwC/J2jjtiPek9DoaLsjnrONRhOE6H//nuQuUR2rSrPr+c7uWWLcENTMtVHy8R21SwGRC0Kbk7vc7XYUol2hPlxrtPCKhwbSz3vVq4QgRHsSNZNF6My8KJtJT8XGkvVdKUFYh4m5J7XM2frlfwXThT4Op35w6WC6FKb5NfXu4AeV+98VQYTEztKPP0enbD7lCv34DgGriyTw5+y+K/cHTVBfQWIZnRhXGXrLBaOjt7wWctMIZeFRKcEUYgmtKMQS2tYbOx7kaOR46OZHggbrE+01Sj4B5S4rZuURJxiXEONgr9uSpLI4ZDykaTh3eF2/sG7zfeCulCCkaqYA6iUx5moIojjneDz5fgN0MREWhsUIwhCC+CSn+rIKmc3aqoogKhS9dR1uffVTICxlMuMAiZDMxYexMYCfIdYyBszr9PEdR/1SbLoNw1LPTNACMI5nYF8Au9iu3y9szkP9JzB+bOT0tX6WQcU+Kt3ga1U/H5o7upxAwvxzH1ueZpF/4EEmXLnHLpwVZBo2WF9xjI2ST4XLx6omZNsyTt2FEO3iDUmsIbta0gDwPAEbmek2PUJ3B3mstY+pUoKQqZlE1NjsUP+JfsRUC4KYkuMWjA4DuAtEu4DxdtscRSBMcYt6BMw/M/XoXdV4YFcjz2asqwiiGaWq5qQQUAgoBGqAgCKIGoComlAIKAQUAs2IgCKIZpSqmpNCQCGgEKgBAoogagCiakIhoBBQCDQjAoogmlGqak4KAYWAQqAGCCiCqAGIqgmFgEJAIdCMCCiCaEapqjkpBBQCCoEaIKAIogYgqiYUAgoBhUAzIqAIohmlquakEFAIKARqgIAiiBqAqJpQCCgEFALNiIAiiGaUqpqTQkAhoBCoAQKKIGoAompCIaAQUAg0IwKKIJpRqmpOCgGFgEKgBggogqgBiKoJhYBCQCHQjAgogmhGqao5KQQUAgqBGiCgCKIGIKomFAIKAYVAMyKgCKIZparmpBBQCCgEaoCAIogagKiaUAgoBBQCzYiAIohmlKqak0JAIaAQqAECiiBqAKJqQiGgEFAINCMCiiCaUapqTgoBhYBCoAYIKIKoAYiqCYWAQkAh0IwIKIJoRqmqOSkEFAIKgRogoAiiBiCqJhQCCgGFQDMioAiiGaWq5qQQUAgoBGqAgCKIGoComlAIKAQUAs2IgCKIZpSqmpNCQCGgEKgBAoogagCiakIhoBBQCDQjAoogmlGqak4KAYWAQqAGCCiCqAGIqgmFgEJAIdCMCCiCaIBUY12pBQT8tNgVE87NDvZ/W/x/MpmM/O15HMqM00A0D4y3W+UIL4F5PYOuyo3tcO+GDdfknEPt6Um+bdKgI5n5VIA+AmBbAAaA5xm4n4n/e3Qw/XsAXME0aV5X7/s08OcIOALAXgC2so3tDwTq0ydzt6xff/tLFbSPuXNPaom2vdwegXYiEycA7AkguqUP/JkY9+lEK9cP9f1f2HnUu/3CnCnWs2RP6MYiDXQoA/sC2AVApPB3Sx4EbGTmPiOnr60UL7FWnn6OPkXA5wB02/p5HeDfEtFVcyK8dmAg/Q/Rt9e6CyKvefOOenukJXokCJ8D0fun1iYwBuDPAN1vknlNpWusQfIJMlVVxgUBRRANWBpuH2pnZ+/+rJk3AvQBn2E8xhqlsuv6xGZv0UdHV+9hAP8YwDs96hoM3NgW5dOLm0aQ6bYnkvsS03UExAKU14lxlaG3XDA6estrAcpb44/Fkyki+i6AdwWpw0CWiU8cHUxvDFC+3u1bc+jsTiXY5O8CtF+AMRWLVIIXOhLJT4LxwwBr5UVm/lI2k+6PdaUOdzuYeI23vf3Y7SLR3EVMOHWKsD0qhJRNI+QfQhyqqBcCiiAasD5kBAGT/kjENxRO/UFGsZFM/TPDw2v+GutKnkmgbwX5eAsN/wQGHz8ykhYnP9efdZt5jk5j4L+mbgtBRpbnrCeh0dEj61b/2q+PZ57FOSC6yHbKDtrLGBN/MTuYXul2mxBzqGf7YqDi5Dtn61e+CcaZFczBmisDv9FMfeHw8Jq/+EyeOrpSXwTwnRAyMcB8ATR6HIzbi+3bb65ufeYPLVgN8HuCCqVQTqytc0aG+n/gddNrhHxCjlsV90BAEUQDloeTIIjwU2brdL5ToXsD4McBLZvfPHg/Aj7kJABiXGuCf05E19s2C/FhZsQGzeDtCPi0rd3i7AwiOm54sG+111roSCTPBtMlkk3PUmEA2mN5LuCPgPExyYa1CcQLRgbT6103oERqMTNuctQ1wHiaCVkC5W8h7n28zNDmZ4dWj8r66Kxz+9btzQ2nvFrwDzacWsHcDtB7ZWQu5DmnZdOpAwMDeki8RPHXSdyqQE8V5N4FYHdbO0K1tQHAvKAE0ZFIzgPTnZL1owP8RxCNgmkC4HeC0G1TOU2tMxB/ZWQw/d9uJNEA+TTgi549XSiCaICsnQTh6HKtofFZ69eln7L/e3ti6UeJjTQB+zg+ehNAC4AxAi5kgy+33wwKJ7QTQPQ/pZsw3wMDi91uES4f7ovMdGZufPt+5xuIeP+YyGEFiL7q2OwfM6P6/NGBNc84obV02nNa7gfwiS1/4wFDw+ec8xd/F32M6/RvBFxQusHSra3RF5Y7N9Z6ty/G1N6VnKuBHgCw45Y50JMmm6e8ezc8mE6nxcZc8hMqG61F/wrAZzuIYhOZdMDwcF+eeB2/ju4lH4dp3uvYsF8H8Teg4wqHLKmzO/kpNnGlmxrK6wbR2bloT9aidyP/hlL8WeowPZe70PluItbZX5/FpzXSfui4bYwx85JsJj315lZsrBHyacDnPKu6UATRAHG7EwRdMzm2/QrZA7QYVkdX8lCA7gAwxzHMMSIsGx7sv81l+NQRT54Lsm4Dxd9zrNEBtneMqT/s371kj6hp3gfgg7byG2HwwpGR9B+9IIp1pQ4k4Bb7JsbARdmhfkEcJY/jhbI/s83nMTMXOXB0dNULHn0Enku928/fHnovB/MK23j/CoMP8MPJendJJJcT07X2GxozHZfN9An8Sn5CjdW69as3MXOv7Q+bGDg2O9T/cze89u9J7hLVsQqgHmcZN4Lo6emJTuZ2uooJn7fVERv9Z8VbhpfKSNafeJMwJ3OHO0mlAfJpwNc8u7pQBNEAebsQhO/m2NV1zI4GcvcA1F4yTML3Rwb7z/L6cGPdvR8kk38BYNdC3UkGDpNtLh1dqdMAXGHfiPxURfbxdCZSxzFDvKcULXeeMjQ+wHkriCVSZxLje1N1CStHBvtP8BNBe/vSnbUWQ2yKHy6WlW2s9W5/bs/Sd8zR9V8A9FHbmFeMDPVf6TcH8XeZPJlwfnaw/5vO+p2dyQ7WSNwetiv8zWDGimym/4d+fXV0JN+LCAnZv9te1o0gOjt7P8yatVa2qDx9VEX2diX9SVWa9ZaPHy7q7+ERUAQRHrPQNWQEIdRDw0P9X/drrCORugGM5bZyL5vgg0aH0kK/7PqTbWYMHJEd6hc65qnf3AOT288Zx93CKGdq83W5Abh1JmsDTKeMZPqutteJJVJfJoZ4XC/+0q3RTcd46eALBcVDrSCgAwD+C4MeAbQbne8Q9W6/oyP5TxShL3JeRfYxEIwIG4cMDd32pJ8ci38vk6cLSXbGUxcz4bwt7fLwZBs+s+Hn6VeD9CUhfbgSRFfq6wx8zSZ/6Q3Aq99ORxsA39Ua3SY5MHDDeLFeveUTBBdVJhwCiiDC4VVRaQlBuJ7mnR1INorRCFrmDw3d+rLXYPJvBHQHyHq0tn4ygpDo1AMRkP84Ubb5x+K9i4jYrhbztUoKA3i92w8zFreyQQjC5aYx5TsTZBxdXYvfoyMyQMAeU/K3+d8U/026TiTl/Ppsjye7NLJuPNsU1trTURg9dvKcCfLxm+ds+7siiAZIvMzMFSj7eNyGUX7qKj+ZyeoGJYhYvPdYIr7ZRiJPcC7S7fMuUNZl+cfPj05GowdsGFj192Lhed3JvSOmpfrY29aAsOD6tbDxN/XoXWH7tQ+k3u3XYqkEIQiJevANk/nQ0Ux6KOgYPnrwsm3e9ubE7Qwc5EUQEiIxmGlBNtN3T9C+RLn2nkW7a3p0nU22ZYegmSCfMHOeDWUVQTRAypIbxJ+ipCcGB9c859d9GUEE1NsHJYjyGwqeZeA+ApV5bnuPlYUX9CE2opGRIMW6Uv9ZsEpya+4ZEO5ixm1t0a2H7CoKP6ysh+D6th9gCGVFKB5fsitr5qeZcSyAT5ZYfUnkKXnMfcqM6t0yyzCvAXXGUz+yPzzLVEzOkz+ASaEeArQpYg8yaWbemsjyuC++mUDyTjQd5RNkerO2jCKIBoi+nCA4kJpIDK3eBCF546gVIlJVlXivaJmgW4nxmQAdCfv7DWD8KGiIinq37zZmQchv6tpuEeJ9AX4fM32CgP1LwofIKssIolwVF/hAYe/CuXZkBOFjgh1ARO5FZP29VfKpaiKzuLIiiAYIv+wjZDzY2sILg4S/aDaCEHB3dCS3QhQrwPQfITzJhX9BlqH9W3ZotXDEc40vVe/2C0uG2hPJDxLT2dbJuRhDK+x6khGE8zE/xHqZ7gTRCPmHFYEq746AIogGrA5FEHKQLUc4A0cT4+yCc1fRTNZLKrqwAJsY2+Fbbv4jxcr1al/4jUQM81IiHBky1MYmKwyjPX7WLCSIesunAZ/0rOlCEUQDRD2jCCLgG0etYbNUD+PUTcBSEA72OZEbDP5ydii9xafCZ0C1aj/v3cw/CRCr6AWA/0SgIZNpINeiPSQe7AM9Ur+1KqaK1FnVrodayafacaj6pQgogmjAipjOBOF8yCTggX9s3XrUo/ff9EYDoHHrwnrYNWHMB+ELAM2VxDLydTT0GH9F7Rf8PdZKvJT/xsDNgHZvLkqPbRhYJcKfS1VgAQliPhELf5Upx8N6PVLH4r0lfRHwd5h80PBw+pEmlv9bOLWZ1bUiiAbIazoThMS79Y/QuWdkJP23BkATqIt8KBBDhEW3h4+oyBxT1mHQ9jsTvUuYLZPgLbkeCD+Czmf5Rcot9huEIDo7kx+DRg8w8I5CvdBmrj09J7RN6G+kATq82Lfs0VjSl8HMi2SxlAIJqw6FgsqnDl3P+iYVQTRgCUxrgsjHUrLHRxJmjgtHhtLC6Snwz/LcJVwI5udB9DAYD8HgHxc3TuGFjIgmNqtuwBSRbCMRmIcF9UJu7178Ps2M/NKeP8K+4dW7fRGvaELf6VaR42lqw3WJOeQGWjx+xLYmtYmbgTB1zf8kKj2XECuBQ3qIZoM6yknDhxBdMTLYd3qYBE0iXIep8d2bTZhFZNpHCPg/Yr4yk0n/VYyn3vIJvFBVwVAIKIIIBVdlhaczQcjiHBFR38Sb2y/zewQuoiGidGpzWu4qTTBU6tDX0ZF8F6I0AIYIfS1+oU6qfh6/9W5f6t0MXJ4d6j8j6KqQxDySEoRoL9aVuowAsUkXfuFCbUjiY7mF2qgmAOEUzUn8T0qCQ9ZbPkFloMqFQ0ARRDi8Kio9nQkiH6E09T0w/tU2Ob9osSU4xLqSZxFIpFDdonphPnkkk76uWFB2Ag9DRJINRjhiLc5m+taIPurdvuykLfI5DGf6vxBkUbhEZ3UlCFmwPiKcMDzYP+X17tZvwatZeEJPBTcUZd1iMbXHkwdpZEUNzqeUtX7ekYbtfcvDkvOa1ug2xxYdHestnyAyUGXCI6AIIjxmoWtMc4JAPL74QyZFRJ6G3WyT8w0tLcp2dC/phGmKrGXFSKCFbGm5g4eHbxdmnVM/uQ7fO8FMcbdyhi9nSbiSerYvDV0BPIGodkh2YPWfvRZF3i+DBAkLMikx5WXGzdlM//FOdY5buG9o2lEj61YPu/WXf0inawCknGXcCMIan4ZVIFpoq2MQcNHE2A4Xe90k86oj6gNwoP2AYTIvHM2kRd6Mhsk/9IepKvgioAjCF6LqC0x3ghAzlNwCxD+PMfAtzrVc6sw37eGM5nr7kKui4JqURgxAJNuhltwlBJxcsrlKQp7Xu/3yiKVihPywSVguy5VdSE16GJgvcc0l7eEE55YwiED/zoZ5vTNhUHti6Uc01q9zy5HtlTDIpS8xv7s0xtmZTPoPji/BI0GR/PZRb/lU/6WqFpwIKIJowJqYCQRhbWZbvXoFwCdJINEZ+B2BHhZvBwUfgLgk5ajBwH+3RTdd4BbCW67OsHosTWtJ7JWus5ifuyyfcz3bd8uzYGFSmjI1AjY/AaIPlKVWFbcO4J9tSZP+V+PxBZnMT1+XLcVYPHUKkZWrw+lEGCTlaDH7oNW0X05ql6yCxWE9w8CQSAnrkdpWqMxGotAXu8UZq6d8GvApz7ouFEE0QOQzgSAEDIUT7zfBODOkh7CoboBw6eSbO5zn87hdzKwmktvbdN6BBfEYa5SSZcYrtFDX9gtRa0Vuim0Djzhf8HUmOl/kD9eY77OZsPoF4qNYV/JMAok8GtGAfY6B8Q0QH2w3DfYjCNF2ZyJ5GDOJnOG2lKoBewWPmlHjaJ+ggnWVT9CRqnLBEFAEEQynqkrNFIIobrD53MZ0mfOR0x0Efpw0nDa8Li3MUF1jJNnrd3b27s8a/zh4H5a660eca/kPp7pLNq56tt+RSH5ShCd3VRuVDkjcvm4yNO1rD61b/bTksTuINZdvvmlbl8+SxsvmaFhflg+EcFZ2sP9Sv8Us/A5amL/DzIsDkpJbnmzXruopH7/5qb8HR0ARRHCsKi45wwjCmqdISv/MC9gXpnYiYB4IkEhfWTw1jwH8DIh+wazdsMeuxsPpdFoE0wv7o3ldve+LMi9nwiEg7GULsSHae56AjSDcONHKdwbNpmYbRN3aF7etlq1eOZSAZeLgDWAX263rBQIeZeY+SRTaMrPSoNZcjj67AexcmOvrAP8WjCth4nbxNhE03LuXwIQJdKTF6GWIvNgk1GLF/izZAPwooF3fGjXvCRJ4UtJX3eQTdiGq8nIEFEGolaEQaEIEJH4bNfM8b0K41JRcEFAEoZaGQqAJEZBkeKsolWwTQqOmFAIBRRAhwFJFFQKNQqCjKyWcDEUYFGFe+mvW+OaRdenfBu2/oyt5KGA5v80RdRioKJXs/2/vjlEaiIIwjs+sG4KNnsAD2FtExAPYiIUpFC9gKdgpWOYq0ZwgtRK0t7axELTQUhDyZBMDi0lMJr4Ub+ffjy9vflN8xe6s8/4eddUUICCqOVe6Slxg/H+RS6eevx5Pe3243O6UJbu5/z5xOq4fUYCAiIjJUQjEEmjsNPdVtfiMyGj/4T1Itnd/136Y8Ru6vds8l6Ct8qdPVPWkd3vdjnU/zvEhQED4mDNdJiYw/MR1vysim6Wrv4WgZ1+f6zeTdk2KTeW8VrsKKqe/Xk/t1vNwuOCbRonJcd2YAgRETE3OQiCiwF9b1IPXWiV7HD5fCGsapCEqG+MLjvokWf/A8vwiYgsclbgAAZH4ALl+dQUGzxJWPy6CyOUCm+0FzKyt8+ri0VkUAQIiCiOHILA0AcsW9egSpq3zpd2cg5MXICCSHyENeBAoNtufX1a2VMLRhM32n81m6Ylo5x+bzR4o6dEgQEAYsChFAAEEPAkQEJ6mTa8IIICAQYCAMGBRigACCHgSICA8TZteEUAAAYMAAWHAohQBBBDwJEBAeJo2vSKAAAIGAQLCgEUpAggg4EmAgPA0bXpFAAEEDAIEhAGLUgQQQMCTAAHhadr0igACCBgECAgDFqUIIICAJwECwtO06RUBBBAwCBAQBixKEUAAAU8CBISnadMrAgggYBAgIAxYlCKAAAKeBAgIT9OmVwQQQMAgQEAYsChFAAEEPAkQEJ6mTa8IIICAQeAbGWDfHvqz/kUAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-51"><g><path d="M 211 151 L 261 201 L 211 251 L 161 201 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 201px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="194.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-53"><g><path d="M 51 851 L 51 884.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 889.88 L 47.5 882.88 L 51 884.63 L 54.5 882.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-54"><g><rect x="1" y="751" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 801px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">ACPI s2idle driver notifies EC using _DSM</div></div></div></foreignObject><image x="2" y="780" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmAXEWd/+f7uiczAeVYlBtB13XXYz02yNxxNOEUCBB6JuEKKyooEUUXkWNXWQEPdkHzA6MoiqCQmTZcolyRHTNH94SNusgiIorKZYJsQITJzHS/74960z1587ree/X6mu7p7/sHMl1Vr+rzrVefqm99D4I8goAgIAgIAoKABgESVAQBQUAQEAQEAR0CQhAyLwQBQUAQEAS0CAhByMQQBAQBQUAQEIKQOSAICAKCgCBgjoCcIMyxkpKCgCAgCDQUAkIQDSVuGawgIAgIAuYICEGYYyUlBQFBQBBoKASEIBpK3DJYQUAQEATMESiZIA5ZnHh9zKafAHi967V/IZuPGB1Npsy7UlzJRCIR++MzeGeM6QQmHA5gfwB7zbRGeA6M3xPop8RIbt++6+bNm6+divK2np7Eqyam6HYQ3helnqfsFgZeAGOTZeGezMTUXZs23fqcaXvd3Sfsk+H4EIC/zddhwvnpoYEvm7ZRq+V6ek5vmcyOH87MJwNoBbAvgLjT35z8QNyfodi6Bzaue8JkHDqZMXBsenjghyb1o5Rp6+79NDG+5Jpz300NDZyua6O9u/d6MFaZlI3Sh3orq5nP22zwoWPDyc3lHks131Xuvs91eyUTRHtX79kAri4YCNHVqaH+cwBwJQZ5yCHH72EtaDqPgDMB7BbhHc+C+d9h47pUKjluUq9MBOF9VYaI1k8RnWey6M1Hgli06MNNTQu3fYxA/2oowywzbsvGrHPDMBOCMJnZc1emmot2Nd81d4hW5s0lEUR7e2IhYlgP0JGa7j2etXjJpo3Jx8vZdeedcawGO4vKq4tvmx8h2zptdLT/gbA2KkQQ+dduI+JTR4eSPwrqx3wjiPb2xN8gRt8HcEQY/prfQzETgigC1SpWqeaiXc13VRHCqryqJILo6Ei0s0V3A9gl19tJAAtcPV+dGh64plwjeXdPYu+mDN3AwKEBbapTwZ8AYoDV+PYGsNCn/IsAnZEa7v9B0EmnwgShurYVxMekhpKb/MY1nwhi0dLErs3bKamV47RKaSNAz4K4GUAn2FFfxjzYBGImBFGur64y7VRz0a7muyqD1ty1WhpBdPZexoQLZ8iBsAYMpVbKkQSPTrbgqM0bki+UOsSOjhMOZCv+YwBv8bSVZeABAl052WLfq3kXdXau2IfJ/gADn9KoMsaJcOro0MB6vz5qCYJxf3MTLxscTP7VZGw9PcftNjnZvL9t8TkE/POMjj1XmYG0PTl1tN+9xDwiCGrr6v13Ai724PYQE85KDw2Mesn63YtXHBDL2lcR4TgPUTxkxzNHjg3e8qRXBkIQJrNy7spUc9Gu5rvmDtHKvLlogmhtXbmX1ZTdAOBtqmsMPBFj62Tbsr8Nxhtz3S3LZbX/jpN+R5b9odGNyf8yuevIqTW+AeBED5xPW5w9bGRk/f/qYC4HQbjbbV+8ogO2fSuAPV1/V/r11emRga/r+jBfCKKzc/lbbYrdm7uIdoZKwH0TLZwI2kgoY4Qnn8Z5ILrUTRIEXDI6PPC5ynweZq3KJbUZTu5SsmhHx2wuahRNEO1diSMAun3mtMC43840HU9NU1/J7ZCnx1PiZXVPT098MrPXGgZ/ZDZAPJiJY+UDg8k/RQFOXYwuWPjC1QB/2F2PiPonXt71VJ2FU7kJQr23o7t3OTNunKX+CjiVzBeC6Ojq/dwrur/PurD/I7K8JJVKPhYmRyW75p1euJGZ+1xlH7KnYkvHxm7eEla/Ur8LQURHVggiOmZzUaMoglCL9kRmr+8CfFK+08S4fHRk4KK2zsSxRHSLa5dX0mV1a2fiUIscInLfI2y245njdKoFExBzFlB3EtDmKj9uMy8bG0ne522jEgThc8H/DFu0JL2x/1fePswHgujqOmn3LKbuAkiZsuZPD5FOAAUbE+Alm/mIsZHksInsK1FGCCI6qkIQ0TGbixpFEURX1/I3ZBAbJOCAXKcnX7kHeH96eGBDe3tiP8Rp0KVmUkWKuqz2WUS3Mawj08PrxkoBrK2z7wQivnn2pTrd1BzfsmpwcDDjbrsSBKHab+/sOxPEs1RKfrb684Eg2hb3vZlsVj4z++Twjby4a3FgWp4e6Vebkjl5hCCiwy4EER2zuahRFEEU+j7wg5Px+JLNgzf/WSmV2rv71oB59Y4BFXdZrbGSKlllle+TutdYsB0/BqjDBbz2tFMpgmjr6j2GgDvcgmemU9Ij/cr8c9YzFwShVDrxlm2tMVhnMHE3gAO9DmzEuCdD9N1Nw/2/CbsHauvsO5KIlaNa3iLpcTueWRzlJKi9fNY4DJZ6Sa0uxuOcXQEbK0H0D7kTbIaB/wX4Bp5a8K2xse//RQmpmgThjCtjHQnY/wzQP7mcQtWm5g/ENGRb/K0D9uZ0MpnMVnNRcVSAzS+8ly1WToLKqXTaYXXaMu1ehvXV9PA6ZanHUQiioOwOVexLHYt7u9nG5TkHS+VcqVSN9zPhmjwGYe/SrQV5jUgU/KY1K3veBCCRr8fAmvTwwMeD2lH3a088EzuYyD4d7Fho7vjOgC0EPAjCDQtifJu5UUyBc+9v45TpHhq65ZnW7sRbLMexk5Y489qRD29i0Nqp8d3udqvZIxOEdrH03DNo1AAzJ4wogLd19X6V4FhF5Z/IO86g93V09X2Uic8F048YfGdLnNM6AVSVIHy8o6tMENTWmeglov8EsJ+JzJQVFhOfMTaUfNivfPviFe+CzWcD/E4Ar2VgexNletSkNXmHKqOMI6gpu5GAN83UYTorNdKvjA9mnmIJorX15F1i8alLmaDuvKa9ufXPswB9IDXc/6O27t7zKu1JHd3/hx8hC2ebGnCY4u9TjjoWJ97LNq4BHDL1e7JE9APO2B+Nx7PNnsgAvp7UOoLITk31xpqbPgPGuRoTaPX+mTUnjCBU4cK7MR6LoenI4eGbtplio9GshBnpmOKW78KLIP48Mrg6zMlXM/9/iyy/BzFSloBX+Jr+e76lyASh833Iq5fyo1jUs/I1CzKZnwD09hlwI15W6/TVQHShmQo3qFxVCcInHES1CMLPWsgQx3Em/mh6KPndsNOEYXsFxVq7EosskLon2j33Y5aZjkmP9N9VKkHkVGADecs8gz6OM/M/W0QHMfDFHXMdZQ210daz4iDKZNe5724M+qaKZEG4avLl3S6MGl7GsH04p4aFz5+fMzwIIlR3kxsszn7Cppi6W8yHjjEnCPAgwxoh8Gd8yEFNvxkTeyOC6Oh7G1uO+jNvWRh5M1qoMvbXnCjCp5h1KYPV6cLr4xMCP4/Z8eyJQSdvHUEQsCY3T/38wgo0KJEJonBXr1+0O2b7SKgBR7qs7uhIvBMW3cfAa/JoEeNboyMDHzKdvOUqVzGC8MbwCbhwrRZBaK2r1ELDeIIJaQI5ahUQ/yMY6iTgnWxluSPyk513p6fMq+PI9gwPr/9dKQQR5GcD8COAlVbtv3LSPJiAt7pOF9vAGAXh/ZUgiMB+uWTC4F0I6MrFIvPAR9dOju+6ugIkQe3diX8B0xc0i9yLADY4Do/g14KwGIw9XB1TJvJvdp1QIxAEVNvKiXLa34rxe1j4qfP/Nt4DwkHue08TgtDdd5qoh/LjUfHEJjIvJQE6Ov83v1hpfpaU098VniPGzxikIlDENPMt3/zDZGeOGh295Q+6b0WzZikn5glX9ImtDNxPoJcAu805+RGt9YZHikQQXt+H6fHo7dC19wcRLqt1+nloVAnlIoGgdipBENo7EMJjyHBPKpV8ytufahCEsu6KLWhSPgpKt517eDBr4QO6kCkKl+0Z+lTO6c21e9Rf9pcqK+dewLbvyS0suebMDQv8DAB8zGcd/4yMxWd6x56zglPOfioOWOHuj8pzgvDx/3HiUdkxPk8jE2rtXvmPFmeuA+hgF95ZBn86PZy8slQZuOt3dq7otsm+0xVJQf38IpjPndy++w1uQnKCaj6N91lkff2VO5w3aPoRhSDy1ccJ+Mx++/A1rvsW6upa/vps1no+lUr+nypoQhCqnOYE4L5bDYROY4CxlWxaMjra/5CXrfWkyo/YjHNety/u994dBcy3e5rjfKKxWny6I1kCfZWz9sVuNZX6tmBlp7xuA5EIQmPC6itU/SWw+WV1wcWfS6dYzklu0lYlCKKts/csIifIoWuB8V9Yq0EQbV29SwlQMaHy4VJMfAyovTNxPsjZReYfX3NdE7x1ZXx2XZMAL0sNJ1W4l1lPlDsInSm1Oq2yzecE6Hr9d89lIgjNN5AF88X774srgi6gcxfZXwP4VBcoxv4mJjJydtwWbgbRMlf5rbCs41Mb1ylveO0TcCKKThDMF6RGkiqKbmBAUFOC0ESmNr471Rju3IUslnvnj0ZFqnDagCz35QnNBzpq606sIqavuU7tWSKcPjo08D2T+a/KMPCdqfHdzjQ9TRoThM73AWAtCPnO6hZ5vw/aO0BN3YqFAw77IMpJEDkd/+kg+n8e9YyvH4bqX1UIorv3XGLs2GUGLHRuzHQnSz9rrDCsfRcWjWNhVOdG3QlCN6/Dwp7k+zgdjfb5b8xyDFU/loEgdJgC5qoina9POb3ONRoC38Wq4NvuWtFKsNWdUf4eSRWJShDGKmtTgvCZC6FWSFr1lN4aUWPhiUBVkQc7TZga/abbJ35c2KV5wednTBCaI5RqLNC/oaPw4sfYTNUbN5+AP8PmQ0dHk78odpEptl6pBKH0ky9nxl8XB5Yy7I/pLD3CmL1KBDE7rwGQbI5vPcnrF6LBkdq7eq8HsATgPzDoF4B1Q6m+Kvn3dHQn3s9MyuvcvaAE7ohNTxAayxNlaXPK6FD/OpP5op/jpauYOrr7VjCz2hnmT5iRTwCFbcwyRzcZnm+ZwjtGc+2A3hQ+MkGYzk1jFZMabKGWJBwzzRzQkpfmhGJMqnlBmJ5y9AQR3cjHmCA0eR9C1Qi6ixvTy+qCxCrAjB1vSTO7iMpViOYauouoCkFMOw+6gxZW3CopTBw+cauKCrCoO0F4FwS/S2+/fups38twglA7xevcJxO1gUgPD5wRpk5x91NDfpEtc3Tj1n0PUf0GWjsTXRY5kaB3zr0j0gmCCRelhwaU/0PoY3qCUA1pTm6+asz8iwu1HXpVcQH5BNw5RplvOuy1GyTG99IjA6dFmUNGBOHjVGbE4G2dfScTOTsh9xPqWa3xgagtFVPotDQrwMD/UJZPDItFVA2C8MkOmH1FLfhzML5uZ+J3VjPmUe5ORDkNuoMaZhh8fno4eVXUEO06gtDshO9sju+cGBy8fruZBB1HOWPVnElGOZ2JNxM+mR4aUGM2fjo7j321TS3KMfE9M5XKYOgRFEnBtHMaf5ZoBBEhO2AUgnBOEV7/qwATfc1CnGXmE9IjyVkOsKpd71xTRhB/3an5+AfvvfElU9yc/nX3XkiMy3bU4YI5a+pQGvZeI4LQ6xvNjuH6RSf8OFrzdxBhyIb//jyUkG2+JszpRTVVDYJQR3+fUNzu0TwJwp3MWN8S32k4ykIaDslMCWrv6jsR4Os8SaGMyEG1YqhiorbO3huIcEr+zcWYUhd4iJd4B6FZgFX37gFIa9LohyuDmwhOGl6VwtV5ou70dW13dPS9my1W1m5OJsdi1L9vP+zUnV/18sStrpwgUQgi0kkoKkFoTje+xhqaS2dtWe1JE/wbwFKRqCM+jqPpIS6CKHDq058g9FEagl5uRBCa3bzxBZH+chvhx7bCk4fWISoiskUVL4OKSYVBeBrgJ8HYYCH2w+3bd/kfU0uCKhIE1GmxaYJuIsZRBmBlAN4MxjezU5nbouTYDmhb3Wd8VOPtqRJBnZcaHlBWHKFpbE0Ioly7rIJFokSC8LF0MRCHQRFDw4OgljQm6EWpfz2nqSgEEUmbEJUgNBoT3/VK45ejvdQuwxoSJJIC/E3mv8FsUeQf/OitKcJqmfwebCuvYXGVIK4gpILJm0otU+oldanvryZBqHdFD+vgjFDF/UkzrE/l4+1EHbdjyrrT85drwidEvgsx+UCEIKJKaLr8fCcIZ4xeJ1aNmklDJL4nm3lLEBpriuJmVWGtwFOI7phdzPG/HJ1tNILIY+Y4wmVxIjH+JWd5ZRISIKPMKSfGd/tSlBNSQDKn0PzTOhkLQfjMfDlBKGBCTyAay6QC1VGh6t1fdT4vCcJHPVSONTffhu9ltd4CKrqZVlBn1WVgBpn1SodKhDvItu4fGVmnAsfNUmE0KkG4sXNUT9tpMUFFN8VhnrAJXpgjee629pywv5WJ/UATa+hpsvnE0dFkKuqkm2cEEbqgRcWnlPKNcILQ+DYUXD5rLp19c5uU67RqKjeT+W/SVqCKycfG+zkwFZFjmlUoBnVZ5grJEHxZrbmojnQ5FQaALhyI7pQiBFGApJPn20b2SBA+BNAiTeRTEy9sKP8a2HzbrOis0wxtZN3lJ2PTD8TEqihsHhWoQ0u8g9DEIZuz+zfd2DV3JJEJTHNpWzN3EPkxey0w3abGGkszv9AaTnOaS3llMFCx2HKm8z9sbgcShMb3IfRy2e+FPsl/AtvTEZQCdUHT1o8YOG+Fjr0wbwW0TlJCEMFQTsdIyt4AUI+rZOii1tm54mCbbBXRc8bKJlf/bmT55JDQA4GdMv1ACkwGA9K++r2wwJS7RILQhTT3C/wWNskr8fv0iS++EcDrc+37mnb6vV+zwNYcQRRYYLr8FgrD0gRHlVA4dHT2fpMJH5zBpIi5ZipP0/kf1p4vQfjEUoocI93dAV0GNcD/stpHxfUXi62jR0bWDYUNLuj3zs7lb7Uppkz13IuTj4laQfINtcW9v7mJl5km8Cilr6pupc1cp+8ALBWJcvF0dEfEYrDf742S6jeO1sXL/86yY8pkbyZ/RNCilssN8YPCwG10Y3Pc/mipuJp+IJokRqEOoB4MCpzaSnWU81GvBoa10e70p3Nn3EPA7q+oBn9tA7+ybP5OqdEIdLvhKJFPVV+jnEKiWiF5sSi2vmb9mSHCAstOAwMazfoXeOrw+dby4TpUUqInAfoFs31fS9Oz692bZtP5H7Yu+RKEzoqo1FguOp+IMM9VXcRI01g5foP3iZIJvwWtEU4QmlSxkXaFUXSsPgHbypq7wPQD0aXIjbJb1/r5lHiCUPNWc3qPvDHq6O49hRkqBEreuMA4+FzYwqFR//4qY1mHP7Bx3RNhdZ3ddFfv53I5JPLFa+4EMS2HxBEAqVOuE8BSEWEc8c+5c6uHrWH5Aeo0Igxcmh4e+DcT022nP+2JNyJGKm/F6/Lt6sjZdP6HycqPIHRBpSLrGTUv17WrigVcVvfEJzN7rWGwyvDlfkwiIBZ0wS8Wu9J5W/bUYaOjt271VmoEgtA58gQFw/NipF1oNbmifaKAZgm4dGJ8t8uiWD4FTe4IH4huTprGPdI7FpaBIHQnXOV5O9HCic0bkqF3gDk1kAqI9zYXTj/LTk4dVg5/lVIWO90iFzFYX6S1qNgThHPSaV25l9WUVbkrcjjyGDMuJyKVWjQXJsQsvL1vBFziY1JDSZWKNfBR36hmLRxnWEelh9cNuitHmP+B79QShN73IVzHFjZAB/DCGCyzsj/p2vAPERwtraLSlTfZ9nUu783861RmsBU693hVoBEIwtnVFQaIy4L4gtRQ8j9CdjgFIb/9dlU+Yc7LntAmygeiVzfyYCaOld74+K75mU/L+p2CpEllIIjpgHa9V4LxidnfRLgKzslWZtGaWfpuQOWRWJ0eGfi6yXdqUEbXv1B/lZw5cz+ApZ531OQJQvXRo056nggbmXFsrv+RTtq60PIMPAqLjktv7P9VAO76+cZ8u8qb7o3GEGX+B8laSxBa3wcDHZvBpHI8dRdsx48B6nCVD738bu9OHAImFVfGHZdHNZF9JeXpAwS6crLFvte7u1InhoULX3hL1sk9jV5NBrTQRbBRCEIXIhpAhhhrM1NTl+h2niqHMzVNfaEgeQ7hK6mhgU+6iUWf8AcP2fHMkUHpE03mlcmpzy9h0PRirMuMpt+AOP4h2kRJuV6UhyDgcwpQkP43I7Za55CoEtITkwr0p+6RZp4opw9TvH02bs58yWaaLh4b+/509sHph9oXr3gnbPsGn5SuNUsQPsnP8uMystbLF/Y5Baift4H5PNi4qSCHhH+e9K3wOX1UjCD0MUOipQsNm2Aa/aqaPzc1x7esCrJOygnqBxqrF/eX4DLDZZWWcO+AnK8ZEF+4/964MjQByxTdDsL7Zl40zy6p8+PS7XByv6mwGo+BaAxMEyBuBnMrQG/UmLhqo9Pq5R42W8J/190ZRP1AAtNAAk8yMOykW/VPteqeg2XLSR2wMXLSU4Kx0UnrqeSxI92mBzT6HSz7uNTG5C/D0YxWIqB/4yD8Aky/DE6HOvO+miUIn01tjvboam+azjAE/e5Ac/VmcJv+t/0OHzNydVr7VHoouVb3vqjz36/PBScIre+DweIdBor7d59gZEY3+gFqoihdUGWftplPHxtJKv1iYGyfRjlB5Hd6msxVUbB9iC3q9R6XAz+yKK1rypaDIFSzjpwzljcTW1jvXiRgLUP5g+TyVZTpBJF/sb/FV1jX1O+KHOjE1MZ1PzcpXUwZn6i7gU0x4ccWkHGpamqWINRANJfy6s9FX/pPfw90LeBoNaI+oXHJKkYQpWSBMx1lgId2aBhw9Q6Vle2pLXiPbdNlr6TIfHfACUHXpecJ+M/sVNMazxHYt/sNRhAODrmInd/2UQfosBpn4Js81fSvOlx9Ek6ZTpmwxeb89NDAl92Fiv1AnIx/z5D6aL8K4LXBHeRHyLZOy1q2bYHuqxRBqD7kVHmfp2kiWmgAnJ+qx6Bq9CJq4xbL2lcR4biQ7zEDpqtg259FnNaCsSr3tpomCP3GOTwqdRCSrrl2hds8PKiOsuBk4jPGhpIPB5Urdv5725x1gvDZ5UXSsZlOLa/52HS96IA7F+rxzNEgej9A7wBYfdSvnunH9DH89wT6KTGS27fvujmqpUwjEkQOPzqkq+/v4syrmHA4CAe5Qmyo4Hx/IuBhEG6YaOYfBlnXVDJCablOEF6CmczSccz8ERD9/cy4nfnEmxi0dmp8t7vVXCp3NNegb8hRT0zQMWCcxsBbPCrULa/kof4tGN8uY3Rd009alaPW7sSbLdDHwDgUwIE59aOjnmRY67MWfSNvClur0Vx1A9b5ppRq9p9/j1Jvxlu2tcZgncFkdwK0v2sT8CLAf2TQnTbo25uG+38TpvFQ7VaEIKLMBCkrCAgCgoAgML8RCA33Pb+HL6MTBAQBQUAQ8ENACELmhiAgCAgCgoAWASEImRiCgCAgCAgCQhAyBwQBQUAQEATMEZAThDlWUlIQEAQEgYZCQAiiocQtgxUEBAFBwBwBIQhzrKSkICAICAINhYAQREOJWwYrCAgCgoA5AkIQ5lhJSUFAEBAEGgoBIYiGErcMVhAQBAQBcwSEIMyxkpKCgCAgCDQUAkIQDSVuGawgIAgIAuYICEGYYyUlBQFBQBBoKASEIBpK3DJYQUAQEATMERCCMMdKSgoCgoAg0FAICEE0lLhlsIKAICAImCMgBGGOlZQUBAQBQaChEBCCaChxy2AFAUFAEDBHQAjCHCspKQgIAoJAQyEgBNFQ4pbBCgKCgCBgjoAQhDlWxiXbunqPIeCOmQqM+5ubeNngYPKvIY1QW9eKQwj2xwG8D8BeufJZAH8C+A+wYqvZtvcvsn3jMcyXgj09iVdNTNHtIAfPSj2/jVOme2jolmeKfcG7F684IJ7lo4h4OQN/D2BfAPFce+MAP8lMY0R0x2SLfe/mDckXin1XI9bTzQMGjk0PD/ywEfEwHbMQhClSANrbEwth4QwQJlPDyWv9qhZDEGoCb8/QGgJOAxDTtU3An2HzobZFBwhBmAmulgli0aIPNzW1bDuBCJ8D6B/MRuSUyjBwY9ayPvvAxnVPRKjXsEWFIIoTvRCEAW6JRCL21J+so5ntL6oPmQnnp4cGvlxGgqD27t4rwfhEUHcYeJSnYoupKXuIEISB4ADUKEFQx+LEe9nGNRGJwTvoF5noogP2tr+WTCbVKVMeHwSEIIqbGkIQBri1d/euBeOsfNFyE0Tb4r43k80/AbCPpzvj06olYoCbATz00k7Ny3d6eeJ9QhAGgqtBglCnhuaFz5//ikA/61IheQeTUynShPMD8a5g7OEz4iwI30SGP5lKJdV8kUeDgBBEcdNCCMIAt/bu3uvBWFUxgvDeWQBZMF8GG1/UffTFqLAMhjkvi2hPEITvpoYGTq/2gBU5LFj4wtUAf1jz7qfAuIabrJvTg+v+AIDdZRwV5JR1GBFfAuBthfXp2snxXVdv3nztVLXHVQ/vE4IoTkpCEAa4RSUIgyZnFWnr7v00Mb604488FkPTkcPDN22L2paUn41ADREEtXcn/gVMX/DcMY0TcAlneY3JCcBRdz5DZzPwRQALXaPNMmN1emTg6zIHBIFyISAEYYBktQmCGd9Ljwyoy+pZu0iDrkoRDwK1QhBtnYljiWidZ1HfysDJ6eGBDVEF19Hdu5wZN3ra+yOyvCSVSj4WtT0pLwjoEBCCMJgX1SYIzJEKxACKuitSCwTR0XH8nrbVdC8B73AB+BciPml0KPmjIkGltq7efyfg4ln1CV9JDQ18UjYXRaIq1TzTSQAJRUAIIhSimi1QCwTR3tV7NoCrZ4HEfEFqJKnUikWfEh3fCdu+B8CbXW0/nrV4yaaNycdrVijSsbpBoGwnCM2HOMt56JBDjt/DWhBfRaCTALwJwKtzKG0B+GcMWjs1vtvdxV6yKd3sE8/EDiayTwfjUAAHzliJEJ4D868J1J+divWPjd28JUhCkU0jPTv+sEvk1q7EIgt0H4DdTWeK23IqrP2wNgOxArYQ8CAINyyI8W0Gzn3a1y1amti1eYKOAeM0Bt7udfoj4GFm7s9OZW7btOnW58L6XOzvc02Kf+WIAAAgAElEQVQQCocF2/FjgDpcY3jInootDZuHJmPu6Or9XM4iKl88S0SnjA71K3VWyY93cxT1dFts/XLPnyiX1N3dJ+yT4fgQgL91APQ4uipijtl8JoFPAHBQTs2XszzDRiZcc8DenC7W9Li9PfFGjtEHCTjW1X4G4McY1vqsRd/I+7+E9bXUCVBxgnj55df8eUHL8x8H4d9cpODX72cB+kBquF8du412Vs5i9zQtJ8LlMwINRiVDROuniM7zczKaxwQR1f7+RRB/HhlcbXKBqmB3sMvS58E406Mf95NKhhhrs5mmi8fGvv+XUie0t/5cE0RbV+9SAtR8XpDvGzEuHx0ZuKgcY+3o6HubbfF6Av6HCHeQbd0/MrJOeXQbfT9hfSh2gc+3G7V+peZPOQhCjWl7hi4j4KMBJsq5ofMjZFunjY72PxCGcf73d/ck9o5N0dVEOM7PWTZXVjlKfq0lzhdls9lXB5GZ6bv9ylWUIJDNHo1YTJnl9UboaIbB56eHk1eFTXIT7+OA9z5NFp86ujF5v9GiEjSAOjhBKC9wilmXMliF8dB6avsPkcfsePbEscFbngyCob098TeIUT+ApRHknduk4VFYdFx6Y/+votYNKj/XBNHR2XsZEy509fElm/mIsZHkcDnHWam2oi7w3n5EqV/J+VMqQcDOno2YdT1ArRGwfpGZTk+P9N8SVqd98Yp3weYfAPyGsLKu3zdYnP2ETbHb/U47EdrSFq0kQTwF8IMAHel68zgIvwDTL9UixeCDCXirho232eBDx4aTm/0G6BxBt1OS4aiTvM+LBKQZ9DjIcTDrBOP1moVRK8CentNbJqZeXg2iv5tu2H4vkP9/5w+bAPrFzEvZ3pQaSV6X/3eYCqitZ8VBlGHlNZ0zU+R3AjhkxyD4N4D1X+5BMeO29Ej/XepvYe17wQi0vyc8Rwyl4lM66yCZPEx25qjR0VuUjX7BE/COcTA/ArJ+BuXfESgPHpxswXHljDM0lwTx9sNO3flVL0/c6p6jeW/4cqiXSv34TepHWeB17ZnWr/T8KYUgWJ3OAOW06PpGYbrG/CpjWYcHhUTp6DjhQLbiPwbwFg+G6qTwvwT6b/V3/XrJjwCkYrZNq6vN476ZiB+VJAh3BxSYly2I8zVenbbS5zXZ9nUFCz3R1amh/nN0p4ienp74ZGavNQz+iGeUTzPTxw7Y177dq/9z9IZZ+yrN8W0rLOuI1MZ1P/dDLOolddQFvMAPIsSKKWL7Pvb3/IjNOOd1++J+L1bT90VNykJGqYncp417muN8ou5eoq1rRQ/BVpM8b5ufZeALLXH+kq78IYsTr4/b9A2P3MuqP1fynEuCKNAPT3/BdzbHd04MDl6/3egLneNCpgu86bfjd4dR6flTCkF4xvYsM507tX3XAe99aWv3yrdbnP2+xpFxdWp44BodRk6/MvQDAIe7flc+LbdlY9a5XmIJWMemq9chQagF+PjUxnWjfpNIexogPIYM96RSyae89Vo7E4daROpY5XIUMlKDUHtXr9IfXjGrLvPtsLHST89ezwThcyG+AVnuS6WS/xew/lBbd2IVMX3NvegT4fTRoYHveeu1dfV+lQBF6NNPAMHni+QucG8DqGdHe+VdQOeSIHTYM7AmPTyg1Hx18VSPICo7f8pEEA8jy8uC/Ey0p4GARbuju/cUZlzv2ohlCbh0Yny3y/wMdnLhWi7iaRPn2eriOiMIY+/O9q7EEYCz6Ocv854nmw7zXvKo08NEZq/vAqysofJPFAchnf34uM28bGwkqSyLCp46Jghq7+5bA+bVrkEFqoo8g9dgxaOTLTjKrQbyUaUYhVL2yp2BJ+LI9gwPr/9dOVbQyAYHUV8acNrTEkRIoMeor690+WoQRDXmTxkIInCNcMtBY9b8uB3PLPbe4Wkt3EI2qzObKxXTa6cXbmTmvllzoK4IIuAU4J3YXV3L35BBbJCAA/K/6eK1+wS28z3C6T6g1taVe1lNWeW9OhPThoHvpIcHztCptOqVIJQaJ2aTCgKo7l/Uk/U7AfgtNJo2Jhl4v9v7V/vxMZ2SHulXx+3Ap7XnhP2tTOw+gNRd0S8Y9HAT2V8dGko+G1bX5Pe5JIgCVaCaXIRPpocGlAFGXTzVIIhqzJ+SCSLCwtvameiyiO5+RVOxc07I2jtVTbm/kM1HjI4mUyaTQ1mwseUE+dxzpnyEfpq8o9J3EMnm+NaTBgcHM2Gd6eo6afcspu5yWwloCaKz72QinlFxFLvjLLQu4Qcn4/Elmwdv/rO3r/VKELnwDsqCYvoYGoGw8xhMn9j2vAlAIv83jZkmtXX23kCEU1zkXhGrpLB55P295giizpLUVIMg1Mys9PwplSCimCZrNrtagihYgyIu7rpvs67uIJhwUXpoQPknhD6mAuzo7P0mEz64o8HidNYa+3Rfy6l6JQjvBCTgvr/u1Hz8g/fe+FKoQFwF2rp7LyTGZUGYd3T3rWB2iNutEx1n4HYCf7c5vvPgXFzMzjFBFPhAhIWKjyKXapStEkGg0vPHdH1RmOqMC5hpuYm5qk/9grXFsZTMvJQE6OiAjVeoiNu6e88lxpV1eYKIcpw2EaAO1GIv/UxVWgr4eiQI7e4CheazoTPQKVBghlsQbTZn+XQnAW0+bU6b7DH6OUa3HbCX/WixnqZmfZ4uVXOX1HIHoQ21Xun5Y7K+5OeVhiCyzHRM3sw8bP5p6hcQxKKela9ZkMn8BCAVZWD6K4tAQvk6bZ19RxKxSps6vTGLeAoJG0tFVUxRcr6aCFBbpsgPTrtL8Dn+1ydBVDQXszYHc2fnioNtspWhgcqnHPa8COBOsun6iYld/6vYECthL5lLgtBtQojxrdGRgQ+F9btWfq/WCUKNt5Lzx2R9CSCIUL8st7xMCGL67i2+0XU/qFzfjQw73O8qMIQQgpidgL7YI7uWIHzIRgiiYLnSEoQqFWqnrV/5VCa0b8cpc9nQ0C0qTETZnrkliMJ7NfGDCE7WVKn5U3MEURiPLRIJ5T8QIYjhAXV8cp6KnyB8LG+EIMwJIl9yOqCZ/XECTp1lZRG89PuGPymWMeaSIHRqPvGkNsvmV+75IwRR3BckKqZ8xMZp+1btEW++EESxp63iptZMLWrrWXEgTfHhsHgVGCqsiDsTmrf5UM/2KP2ZS4JQ/Wzv7DsTxO4sb2WPxdTembgEhMNBWG/Z9o+3b9/j0XKp7KqpYvKRa1nmT80RhKiY/Bdc3UQwEWA5L6k1/hS+F1H1SBA656Na0H8rL9CWlufelKXYqcToAznhkmc/Bl7YpiQx1wShc5aLYjIZNk6ds1U5Tyk1QBCzICh2/pisL/kXmdwhBMnFpL7yxaKm7EaaTn3gPHJJHYCqqQC9IR2KNt303P4T8GfYfOjoaHJHEL5cf+uRIFTXC0yCy3yBFbZ4GfxO7d29h4Fxg1sNVc4Fbq4JQkXRRQzrPUErQwO4GWDnFNFEIFBuBTc1x7esMvE/CntPYQgVMxWRale3SYmaTyKkf8bzx3R9Ue8zWeBLJQifDVzkMPAFJ9Qyf+N1pWJSQmkTRznjaK4a9cZWsmnJ6Gj/Q2ELg+v3fLgO5Sj3pIpiy2zf19L07Pr8AuScxtg+GkztABYxMNoS33qq6QLV1p04nZi+43pnURd2pifTMi9SoVDqbPwZuDQ9PKBypBSdt8EhHws3g2iZqxOTAC9LDSeVJ2/JjzeQZJR86bpdsg77asyfWiOI3AbOEwqe70IWy81zrxQ6sTa0matDEGoxsh338n1mjmYRTV11qRrnY6gNnSt+1IVJZbdCzAnX8boZvD0B50r12NaoYeYVQfjY+P/FYuvokZF1KnNZMY8+Sm+Zd5AFkYaBn1q8/ZiRkTuUmXLgo0uWpCWIzsSxRFS0x7/J/KlFgtDgEynUhu7bbHiCqFCwvsBdV72qmHx2mFtBfExqKLkp7AP3Cas+zrCOSg+vG8zX19j7R4r5VEAwgDa4WVh/a/UEMX3ydRZBlQbUfUEfGunYb8wd3b3LmXGjpz3jgHKmWHpP7ACMTqFOxFFdMDlNcMNqzJ9aJAjd/RER9U+8vOupYUYGvikPyrxBqDsVk5rYunDfKqmHZWeW+SW0yX0Q+nDfgG+eA1XPSxBhl4wR8zWgkvkgfLAyiZNEbZ2JXiJH9bNjUdNEm/QhbSMi0of8hnEMr7CFbq7vIPL9C0iIEymtq0P6cZwHpn/1Jtoi0NoF8S3nmKr2wrBzvjVt/nS6dnJ819V+i5hKA/zk0zgPRJcWhKPWEEQ15k8tEoSjZtKE+wbzxfvviysCIg34rWON7Umdn9ABCYNUBNCP778PD3jBVRdPWcSvYMYKz6QNXcgKLuqAwLjwtUQQAVhtA/N5sHGTV+fZ2nryLrH41KVMUAmZ4q6FxBerzs4V3TbZdwLYxVXeN7mKs/h0J95iMb4L0MGuOmXdBdcKQajxhWRBfAqMa7jJujk9uE5l7XPfTVBn54p9bCt7MphULon9vIu7MtaYaOFEObPxqXf4xLJSyaC+wVNNF3jziKvov1aWrvDNq+wTHr3S86dWCcLplyZhEBHWxZA5z+s4GpDMa3pKyAliGocypRwdJ8Kpo0MD64N2U5rLXlVcRah9GqAMMTYuaFr4kXwwuloiCAOs3GlgVXrVdwC0SJMGdpyJP5UeSq71wUpZlFwJhkql6n1eBPiXgJW7HOfXqs0TAJUq0fME705Ndr3uMhUP1pd/meGHmcu7rMKgHxEwFuVZ/ieA2AkQDuwd4jtyN7J8ckgCqKjQzZRv6+w7gYhvduVqyf+m+jkC0O98UsmOg+lqEKvgmtMpMf3zZ1R0/tQqQShIOjuXv9Wm2L2aEDUGKUcxOUsuhvPQdDLUpYopP7hp9QSpBatXk286DAO1u11lEoArQIC5d/Cs4HW1RhB5kliwna7NYRWGjfd3tRCclxoeUNnlfK1upndD1tcAVt7TUR+1K72hJc7n6FKURm0sX77WCEL1K0hNFHGcGRB/HhlcYWr5ErF9p7ijHtvp+cvBODfCd6Y2FB+N2bGHbLLV4hdGENOnlQrNn1omiGmSiBTHLC/GAYecwZ+ZkasQxOwp7ug7nyFFECqNaMHRW/NBZAj0zYm49W+63A8+H5Au/aZLJrOzoNUiQajOFoGVYoM0E58xNpR82GRxyb3jrFdOIEr/vJtJHQAqrex5OtWgYX3fYrVIEPnOTidjsi7KZUcM8i73ji/DjNtj4ItGRpK/LhUjk/quufNVAOoEGPDwIyCclRpK/rTgDiMk33ql5k+tE4QCM0IcqgwBl0yM7/alpp2eV+G+v5QXRrF+YX7CrOsThHtQjtVE8wvvBWElE3cDONClJtlCwIPM3J+dyty2adOtz5l8FN4y0zpzUnlg3+dRj8wKn1CrBJEfj8Iq3rKtNQbrDCa7E6D9XSoMpQ76I4PutEHf3jTc/5tibPVny6PgHeMAP0lsjWRhX5fZvvtYmNVGMfJSdWqZIPJjykUI6HnFx+Q4gLsAeo1nfjl4MdMYEd0x2WLfW+67BlN8FZ6TWTqOmT8Cor8HY49c3S0A/4xBa6fGd7s7L8+oBOGeozu+59LnTz0QRG7s1NqdeLPF1ocB+3CA3phbxzIAP8aw1jfR1DX5u4moBi6mcnYRTtQqUl4QEAQEAUGgFhAoSApW5nDyZTtB1AJY0gdBQBAQBBoIgcJUrRGdhsOwEoIIQ0h+FwQEAUGgAgjkVNHfnA5hg/8morsWxLb8yNSPRYUysZqyGwC8Lde9SJnvTIYkBGGCkpQRBAQBQaDMCHjvZxizjV3CXud1sotaP6x99bsQhAlKUkYQEAQEgTIj0N6e2A9xGgRDXURPP8wXpEaSyiopMIhje3fiEDCpZGp77uhW+aL45tsUgiiz0KU5QUAQEARMEHCiHEztuZYJypEw/yjH27WZqalLdNaWyjqwqeWFXiK+ymNuXNZEW0IQJhKUMoKAICAIVBCBIC9qZdYKojEwTTgOimz/E4j+QeNVHxbloOgRyAmiaOikoiAgCAgCpSOQC/utwq+41EXG7b4I0OrUcL+K7Ft0bhG/twlBGMtBCgoCgoAgUBkEInhR5zuQfSWeV9om/rBplINiei4EUQxqUkcQEAQEgQog4BCFnU0QsBygv53lUU94Dsy/ZmB91oolH9i47okKdGFWk0IQlUZY2hcEBAFBoE4REIKoU8FJtwUBQUAQqDQCQhCVRljaFwQEAUGgThEQgqhTwUm3BQFBQBCoNAJCEJVGWNoXBAQBQaBOERCCqFPBSbcFAUFAEKg0AkIQlUZY2hcEBAFBoE4REIKoU8FJtwUBQUAQqDQCQhCVRljaFwQEAUGgThEQgqhTwUm3BQFBQBCoNAJCEJVGWNoXBAQBQaBOERCCqFPBSbcFAUFAEKg0AkIQlUZY2hcEBAFBoE4REIKoU8FJtwUBQUAQqDQCQhCVRljaFwQEAUGgThEQgqhTwc2Hbrd19R5DwB1FjkVl0noWsP8ExgaOWesO2Mt+NJlMqkQqZXtUDuDmnbd1s20dC9hLAXodgFfPvGA6Rv+fABpm4EdTLbxx84bkC1E70N7dez0Yq3T1mOmU9Ei/yjhW8tO2uO/NZPNPAOzjaWybDT50bDi5ueSXSAPzBgEhiHkjyvobSIkEoRvwU2D+Imxcl0olx0tBpLX15F2seOZiEJ81ixDCG80w4/YY+KKRkeSvw4tPlwgiCADJ5vjWkwYHBzOm7fmVa+/sOxPEX9f8LgRRKrjzsL4QxDwUar0MqQIEkR/6Q2xRb3pj/6+KwILau/reD/C3Aby2iPr5KhkQfx4ZXGFCVoEnCOCJOLI9w8Prf1dCf9DTc3rLROalJEBHC0GUgmTj1BWCaBxZ19xIK0gQaqxbGTg5PTywIcLAqa07sYqYvgZgYYR6vkWJ8S22+Zwwkgg5QaAcaqYA9ZLqv5wgyiHwedaGEMQ8E2g9DUdHEAwcmx4e+GHIOGhRz8o9Yhn7b2KM94LsDwL0LgAxT72tID4mNZTcZIJLe3fiEDCpd+/pKf8UAV+HTbctWDDx5ODgbc/nf+/pSbxqctI6CBYfx4BSR+1X8C7mC1IjyS8BYF/VT8AdxHQduqk5vmVVKWqmAPWSEITJBGnAMkIQDSj0WhlyCQThHQK1diaWWkTXA9jX8+NDdjxz5NjgLU8Gjbu9PbEQFm4G0TJXuSyYL4ONL4adAFSdRCIRe+oZOpuBL3pOIKG7c80J4kUAzQAW5PrzeNbiJZs2Jh8vRn7O+GJYD9CRufpTACwXqYb2sZj3Sp36RkAIor7lV9e9LyNBODi0tyfeiBjdDuAts4AhfCU1NPDJoB18R0einS26G8Au+boMXJoeHvi3oHoaAeTVVN+adaIhujo11H+OX1tegmAgTYxJEBbn3pElolNGh/rXFSP0jo6+t7HlWC/lT0e3AugBsHuuPSGIYoCd53WEIOa5gGt5eOUmCIck9GqirWTTktHR/of88Gjr7j2XGFe6fn/InootHRu7eUtUDLWnEcJjyHBPKpV8StdewQmCcT+Au0C4Ykf54tVM7V29ZwO4Ok8GzLiQCJcLQUSVbmOVF4JoLHnX1GgrQRBqgG1diU8S6MvuHTwBl4wOD3zODwCNiqck09KO7r4VzPw9Vx+eJ5sOGx3tf8CYICy+AEy3uXwWilIzFaqX+C6bcblFpHxQ5ARRU19FbXVGCKK25NFQvakUQXR0HL+nbTXdS8A7dgDKD07G40s2D978Z5MFmhnfS48MnBZRvTTTtMtiyALoJQZnYmx9cGRk3ZDJ+8G4f7Ip1rcgk/mOyyy1KDWTRr202ganLdB9QhAN9clFHqwQRGTIpEK5EKgUQaj+dXT1fo6Bz7r6+pLNfMTYSHJY1/+Ozt5vMuGD5VAxFYOPTsXU3MTLJqask92ObQx8Jz08cEYU4vKolxx1W9aym4UgipFUY9URgmgsedfUaCtJEK2diS6LnEvnnfODZsL56aEBpXoqeNo6+04mclRC+ScL4gtSQ8n/iLIYFwuwH0Fst60DZoXGCLnL8L5fp15CFsvtGN4iBFGstBqnnhBE48i65kZaUYJoXbkXNWU3EvCmGYIIUBsdsjjx+phNysrn9S6gMsRYm5maumTTplufqySAfgQB7JzxeD9nmfmE9EjSKIaVTr2UGh64prUrsUgIopISnR9tC0HMDznW5SgqSRDKgW1iim4H4X0z4DDuV2qbwcHkXzWAUXtn4nwQfUHzmwqbcT/Y+tZki31vMcH4wgTkRxCqr14HtyhqJp16SVlzCUGESUR+VwgIQcg8mDMEKkkQalAay6TfxinTPTR0yzO6QS9amti1aYJuIsZRIaA8SUwbbOJbpuKxlN/FdxRggwiiIESGoZpJo166szm+c2Jw8PrtQhBRpNO4ZYUgGlf2cz7yWiMIBYg6eWzP0BoClAWTN3SHHjMn5LfyW6BksSeMIILQBNkzUjMVqJeYzkqN9H9DDUIIYs6nf110QAiiLsQ0PztZaYLo6Oy9jAkX5tEj4M+w+dDR0eQvQhCljsWJ97JNXwXwtojoZxl4wCK+dL+9cbdpfooggnBOQ94w3SGe2apOW3fvp4mhYkCp5xm2aEk+wq0QRESpNmhxIYgGFXwtDLvSBNHW3XshMS5zjTVSOAkVW+mJZ2IHA/aZBJwYMS+EMn56BISzUkPJn4bhHUYQmkisgZ7ehXcwPKNekhNEmDTkd9emSsAQBOYGgUoThOYOYtYuOsqonUB8T1kH2Za9jIhOBOOdhiHB1QX3hfvvjSuDThNhBKFRM00CvCw1nFSmvAVPwQnBpV4Sgogi+cYuKyeIxpb/nI5+Dggi8JI6Chh5wmDCUli8KoQwQn0qwggiqprJrV5iTcIhUTFFkXbjlhWCaFzZz/nIK0kQPT098YnMnjepKNz5gTLwKE/FFhcTgC8MLJW7Ot6yrZUsuoAYhwGIe+psY1hHpofXjenaMiEIUzWTxsS3IK6UEESYROV3hYAQhMyDOUOgkgTR1XXS7llM3QVQ68wAg/0gyoZDa/fKt1uc/X7hBbd/NFYTgtCpmV65EH+/N2ued/HXZaMTgiibuOd1Q0IQ81q8tT24yhLE8jdkEBsk4IA8Cir95+jIwIeqgUpHxwkHshX/sTs3hU7Vk++LCUHo1EwMrEkPD3zcPaYw9ZLcQVRjBsyPdwhBzA851uUoKkkQbZ2JY4nollm+DJ6LWgVaT89xu23PLFCWTksJaAKwd27HreqW9GhCfk/qdvzOwu9NOepz2ilUM82OUmuiXhKCKEmsDVVZCKKhxF1bg60gQVB7d98aMK92jVhr4rqoZ+VrFmQyPwHo7a6TxuWjIwMXlYqW5s5AJaXW5tw2JYgwNZNHdeQbHlxUTKVKtzHqC0E0hpxrcpSVIght4D2fHblmwYXyig6I2WSMZWvPCftbmfhGdwDAUgkiTM3kcY7zTTAkBGEsxoYuKATR0OKf28FXiiA8i2R+kKtVFFPdiDW5I8Zt5mVjI0mVUKfoRxNy3DernOkJQnVGo2Yai6HpyHh8amp2gEL/S3EhiKLF2lAVhSAaSty1NdhKEIRPTupfZSzr8Ac2rntCSxAdiXa2nNwRu+z4nQcnW3BcCZFbqb2790owPuFq0zerXRSC0Jx6nGRIIIy7QngHZp8Tgqitb6FWeyMEUauSaYB+lZsgdJZDALIM/nR6OHmlH6TKh6F5pxduZOY+T5kfIMtnplLJ/4sqjo7u3uXMuNHtbR2UFzsKQejUTMS43Lbwgiv2UmD+aiGIqBJtzPJCEI0p95oYdbkIQnk1P/kMDgXoOgD7ugdHwH0TLZwIOwm0L17xLti2OkXsORscfoTI+sx+e9t3mgTeUyG2KUbn5NKdLnS19UdkeUkqlXxMB35UgtBcgG/Otbto+r/+6iX1qxBETXwCNd8JIYiaF9H87WCxBDEd5iKzB8fiBxHTYcw4A4SDNEg9THbmqNHRW/5ggmJHd++HmPE1jRe0qv4UA98j4luaY1OPDg7e9nyuTVrUs3KPBTa/CWyfCsYKALt53jdOhFNHhwbW+/UjKkFoL9d3NB4aDlwIwmRGSBkhCJkDc4aAjiDK1xn6HSw6MbVx3c8jtEltXYlzCaRCZHtDZURoZlbRDIPPTw8nrwrKbR2VINQbCkKA519rkFBICKJYcTZWPSGIxpJ3TY22cgTBQxyPnZYeXPf7IgZMrZ2JpRbR9V51VRFtPcvMH0uPJAeCyMFZ7A0d5dx90PlZqN9NUpIKQRQhzQasIgTRgEKvlSFXgCAeIqKLTe8LgnBQHsmTGfoAA58GsF9EzJ5n4Bs81XT52Nj3/2JStxiC8FEzhaqXVH+EIEykImWEIGQOzBkCJREE4TlmJ9XnJsvCPdnJ2H2ViNLqJA3aYr2J2D4aTO2Akwdib08uiBcB/iNgbSDLvmPipd2HNm++dioKsMUQhFbNZKBeEoKIIpnGLisE0djyl9ELAoKAIOCLgBCETA5BQBAQBAQBLQJCEDIxBAFBQBAQBIQgZA4IAoKAICAImCMgJwhzrKSkICAICAINhYAQREOJWwYrCAgCgoA5AkIQ5ljNy5IlmZoWgQgTzk8PDXy5iKpSRRAQBKqMgBBElQGvtdcJQdSaRKQ/gkDtICAEUTuymJOeCEHMCezyUkGgLhAQgqgLMUknBQFBQBCoPgJCENXHXN4oCAgCgkBdICAEURdikk4KAoKAIFB9BIQgqo+5vFEQEAQEgbpAQAiiLsQknRQEBAFBoPoICEFUH3N5oyAgCAgCdYGAEERdiEk6KQgIAoJA9REQgqg+5vJGQUAQEATqAgEhiLoQk3RSEBAEBIHqIyAEUX3M5Y2CgCAgCNQFAkIQdSEm6aQgIAgIAtVHQAii+pjLGwUBQUAQqAsEhCDqQkzSSUFAEBAEqo+AEET1MZc3CgKCgCBQFwgIQdSFmKSTgoAgIAhUH4H/D7YtySn4oWQAAAABSURBVLYKOmtRAAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-55"><g><path d="M 51 991 L 51 1024.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 1029.88 L 47.5 1022.88 L 51 1024.63 L 54.5 1022.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-56"><g><rect x="1" y="891" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 941px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">uPEP driver (amd-pmc) sends OS_HINT</div></div></div></foreignObject><image x="2" y="920" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXmcXEW1//fc7plJRFkeSsKqqM+nuKERZu0wGggJsiZ2TwIE8OljjSg8EUWeuPtckfxYBFEIIMl0S1hEkEXeODO9TDA+UETkg4DsRHlhUSaZ6b7nR/UyuXO7bnfd7p6enp5z/8kn07Wd76mqb9WpU6cI8gkCgoAgIAgIAhoESFARBAQBQUAQEAR0CAhBSL8QBAQBQUAQ0CIgBCEdQxAQBAQBQUAIQvqAICAICAKCgDkCsoMwx0pSCgKCgCAwqxAQgphV6hZhBQFBQBAwR0AIwhwrSSkICAKCwKxCQAhiVqlbhBUEBAFBwBwBIQhzrKY0ZUdP5AgCbqmiklEAzwH8LBh324Rb9tkd98VisYxJme094QUW6C4Au5ikrygNYW1yKHqSLu90y1+RPFVk6ghFPkeMb08UUQKbKqrJZq1nXdW2VfI3FgJCEA2ijxpMkDpJngZwzl67c7QcUTQBQVQlf727QT0n7XrWVW8cpb6pRUAIYmrxNS59igiiUH90bA6fvOnu2EteDWpSgjCW31hRNUpYz0m7nnXVCB4ppkEQEIJoEEVMMUEAoCvGRndavWnTFeM6kZucIMrKX+9uUM9Ju5511RtHqW9qERCCmFp8jUvXEQQDR6aGo78wKWTBgpNbWlu37EZBK8RsXwDQO135MkQ4KTEUvc6UIPzUb9LGUmmmW/5q2+83fz0n7XrW5RcHSd/YCAhBNIh+qp0gnWJ0dobnkkVrmPBJ598ZuN+yxxcnEjdudout20HMJIKoVv56dwOZtOuNuNRXCQJCEJWgNgV5akkQqnkLDg7v1LaVYgwc4mhuhpmXpeKxIm+pZiKISuSfApWWLFIIot6IS32VICAEUQlqU5Cn1gShmtjZE14C0M0AWgtNZmBNajj66WbeQRRk8yP/FKhUCKLeoEp9NUdACKLmkFZW4FQQxILelW9sTad/DdD7JlrFuKethY8aGIj9w9nSZttBZHcRPuSvTGuV55IdROXYSc76ISAEUQLrokHsMbl6FeEn/1QQRG9v+PXbxulmED6yvY08EkDL0uHh67c0O0H4kb8WQ+6AhSv2DnJmBWysBGWdBOYCSDPwR4Cv4fHWK0dGfvayqssPQbjTFs6G2tuP25Faxs8j4OMAdgMwCuaHmHCdPZZeu3HjjS+Y1NXVFe5ki34FYMc8Dv+0mZeMxGPDfnDp6Vn+1jQCAwTsnc83xsBHU8PRu0uVk9VT2loK2B8H6IMA5uXTpwH8lZiGbIuv3Hs+p8rd5ynUEwot2z3NwSEAb8v+bfvY/WfXwkiIbXwTQDuAIIDnAdzDhEv81OEHm5maVghi1hEE/hKkdGhoaMOzs5Mg9PJXM4DVRB0Ijn+dCaflJxyv4v4G0L8nh/t/2RGKnGN6k1pHEGRZT8Hmn79GPG/VV8a3tgV3CA8MXL21HBmp86rWrbgNoK5CWcT4ZiIe/aIfXDq6+44jYoeXHCfG5uAwr/s3ypkCQawG038BeEP5uvghsnBGYjD2P9kpv8SnI4jM+Hgk0NbyeTDOAhDQZDcitPLtbJ4UQhCzjSBmkYlJu4PwuQssN9Q7Fva9i2yOAnhPubT530eZ+eMW0VsY+O+JPKXCkLjCchDwaQZOAbCfR50ZIjo+MdS/3mQHodJ09kTOAHDx9vL0O00vGXt7T5qzLf3PGECHT5AM8JXEcPTLujwdvSveQunMeoDUKt7PlwHhwrFXdz7P606PKqyIIMADDCtO4M97kIPinJKE5qeRzZJWCKKJCaKzM7wngjQAxtsdYsbagpuPHRgYUNv3ia8ZzyD8yF/JgO7qWvZmtoK3aSbqDMAPAVZKlcvgDxHwbsfuYgsYCRA+WglBgPACGLsWCAdAHKBH87uJbgB/sccDB4+MrFOmEyNzVldX33vY4l/nTVUqm6/VdJ4oVf7d8+3aYoMPGRmObXJjWxI3xpNMSBHoZQbvSEAPgL2K9VP64mcxQeAVAG0TDhuMx2HhN9lybRwEwlsArE4ORy+ppC80ax4hiCYmiK5Q3wrm7JZ/YjvNhHNTQ9HvuMVuRoLwI7/fAa4uJra97qVrmbnPmZeAu9IWn7JxMPaY8+8HHnjMrlZry1cpt/IvNm/42EFsL5dHOBhYkRpY/3jhb2rXNJrBPiNDsQcLfytnYlLpdDsAL483HVad3X2ngPhHjrbdjgyWJ5MxFURy4vN2v8ZNdoDPceOmrsC3h1a+1+L0TwD6kKOoDIM/lxqO/UDXHg1BFJKNEvD5PXfnSxznGdTTs3zfTMZ6MZmM/Z/fvtDM6YUgmpQgurqO2c22Wu4k4P0OEV8mm5ckErFksxOEX/n9DvL27vAhFmVdiNVBdPYjxpVs85nuSdFRNnWGwp8F07eKSMInQTDwMILWoU5y8JLBhCBU3mJC5d+PBYOLNg2s+3spfLJnCQHcANBSRzrtaryoLUAGzOfvtQe+W+oAOn+QfSnAqxx1PIEML0omY4+42+dJEMxfSMZjKopuyTMMv/2hWdMLQTQhQWS9aezMNQD1ThKP+WblYaObwKY6FlO5W9m19OKqRH4/A7y3tze4LT1vLcDHFvIxkLLHxg8veA55lad2Hi1zX7w873m0PZlPgqAS9n133aYEceDC8L4Bm5SZaN98GUZmJrd5ioEng8j0Dg/f8KizLe3tK+dZLRnl0eQ4ryltKnLmz+/CbiWgY4KUPXDwIIjHMhYv0uxS/Kh/VqUVgmgOgqAFvSt3bUln9ifC8WBEnCvbvIhbGNbS1PD6EZ3IM5wgqpbfz6jXuHNOOhQuV5bG3g/4IwhfbqimBKE2QR09kZ84ycvEzFR8wE3XtwWfP9F9zqUx+XnuALwwNN3leBCE9vytnL5m8+9CEA1MEDXsmBkQfyE5FPue19a6EQminvL7qaujO3wkEW0omIm8VsxeZeZ2ILtdDyA8kcYfQTxmB9MLRwY2PGXSbh8Eobt9/4DzwNtdn8ZTzIssdeRzVWo4+gk/5h4NOWvJUkcQTPhiaiiq7j/IZ4iAEETzE0QaxOftNR8/KGXjbWKCMJLfcLxkk3V1R77BhPO259l+58C0nI5Q5CxibD9g9UMQPl11/RCE5vb5GMBHJYdj6iJd0VfUbwiPIM29yWRMPVY18fX0HLtLBuO3O91amXB2aih6oSlmKl1395FvsGmOinB80EQ+plOT8f7LneVoCcJHdGQ/bWrmtEIQzUsQ6qnRlE18stOjxUvcRvRiqnLg+ZLfR13U0R25JmvKy3/qcDoRj/6HjzLQ0d23lIjVRJfzaPJDEIAvU4kfgtASINHFyaH+M3Ur/eKy9Wk1K39V1R0A/dUPbgxuIeBQAHs48C+61KchCF9mOT9taua0QhDNQhCEF5iz/vEbLQt3ZMYCdxX84E068IwniCrlN8FIpdFdvvNyHS5Vpmbl7f1ed5XvV/smiOLQG1ozk+YGtueh9pTuUDXkqiEIz3sZprqfjemEIBqYIMp5/tSywzYiQdRTflMsZwNBaGTUmpmKYzh5374WgjDtYY2VTghCCCKLgBCE2cCcDQShkCjyTNKYmbp6Il9m4IIJU08J11shCLP+1WiphCCEIIQgfIzK2UIQRa64rsNnzaGz5yVMjwXIlJp8xMTko1OXSCoEIQQhBOFzLHWGIleDceJEthIHzF5Ft3eHeyzKhtjeIZvGzyG1z/r8nkHkzlqKXHEnvUZY1H6wNrRGQf6urvD+sOguBt6Y/1uGmY5Ixftv9wm/UXIhCCOYyiYSgigBUVF8GZ/uhV3dkR9Pehe6RP5a3iQuq3VNAjExmaPWEYqcR4xvTOTw2S9UvqLQ2A1GEKqN7ktpDEzcW+joiVxEgPJsKnwlA92pW9TUkhkk4B2FDJUc7ptqSQjCFKnS6YQgSu0geiJHEOB8v/k3Fm89Ih6/RUWGLPm9b/GqHV7/6rYbJ70JLQThidl0E2Q5fTp/L3JRBZ5lixalBvv/ZFhO0aWxRttBKDmKQm/kzUxjbcFtrpcKy8qvCwaIMrsOHZZ5ormDgF0I+LMN/Mmy+apEInafM70QhGFPLJNMCMIfQRjfYNWEP3a+ajXpuc/sirKYjFQ0sSNTw1HlKz/ln+wgzCHWhRH3sxrWxDxqOBNTHg3qDPWtAfPq/P+zbqzMvHWSeczwXkZxSA68bLF1eDy+Xr38ZvR1hSLHM+NqR7BDrWutEIQRnGUTCUGUgKjYzoqSt0odRakVogrtfP6k4mUH0RQ7CHVi4Jo4lVymcYX0faMBTUxKqM6e8BIgG7W2Vf1fvTSn/nXcJDeOQ9XdvfzdNgXunHTJDbhr2xwOe7065+ww7b3L9rLSQXVm4Xyc6XeZsfHF7iCJQhBl536jBEIQJWDSrRRVvP8yHZo6usMRIrqqKGCeEESzEAR0kx3AA+kgVt47EHvOQ1DvvtGgBOEOvcHA/cRsg+gDWRk9Qmt4yd8ZivwAjM9M/p2ubQvapw8MxIp21oV0KqQ4WbRm0pkekGHG6lQ86niHIpdDCMJo/i+bSAiiNETFtuJc+pvSlnXmvYPrn3RmV+GIgy0tF3i+TSwE0TQEkdtF6N520L+brNxjt6bpP/O7ymAREA1KEKqd7vsOk9peIgyHTtkeuwC1J/ktI7A6Nbx+ozukR3sovB8xqSizE2G+c9zkvVgTgig79xslEIIoA1OJCz5pgDcB1v3KHqp5VjJDjEuZoN7ozcXXn2EEYdSDfCbystVP9xmMTzGyydXbDq1zX7oY4JM1+Z9iYFg9nQni94KxvyYE+/ZsjUwQxaE3Cu02NblOgqczFD4QTOpsbTcNUapwMYMA/Q3EbY7nQF1J6VFY9tHJwdgfdLoTgqikRxfnEYIojyN19kROf81f/bslB/jkcjIEfB12+iq2guoBlrcJQeQAaiaCUPJ4vHRWrle9QsBlDKgAf7tkEzcwQeguB+a1afTinA6MzoUrPgCbf55/R7scXhpyoI8lB9f/r1dGIQifkHokF4Iww5G6FoY/zDYuAeidZbL8jZk/lYrHoqHQsvlpDioPDSGIPGjNRhBKrHA4HHjqWVKPNF0E4E2l+wc/RLZ1QsaybQt010wgCCWPxgNJmXi+khiOftlsCBWnam8/bkdqGf8a5Yhy4unWEuWliXFZJt1y/sjIz14uVa8QRKVamZxPCMIHjvnnIpcQ+DSAPghgXn719wKY/0xEl7UG+KbCYVtRJxUTU9PtIJzdR620xzJ0NDOfBqJ/A2NXR//YyKDLxkd3/tWmTVeMN3I0V92Q0Lhtlwyt4WNYQUWFbdtGR4BxAgP7AZjvcGN9/rV3qP8Cxk8z4+mbyj3pWqhXCMKPBrzTCkHUBkcpRRAQBASBpkNACKLpVCoCCQKCgCBQGwSEIGqDo5QiCAgCgkDTISAE0XQqFYEEAUFAEKgNAkIQtcFRShEEBAFBoOkQEIJoOpWKQIKAICAI1AYBIYja4CilCAKCgCDQdAgIQTSdSkUgQUAQEARqg4AQRG1wlFIEAUFAEGg6BIQgmk6lIpAgIAgIArVBQAiiNjhKKYKAICAINB0CQhBNp1IRSBAQBASB2iAgBFEbHKUUQUAQEASaDgEhiKZTqQgkCAgCgkBtEBCCqA2OUoogIAgIAk2HgBBE06lUBBIEBAFBoDYICEHUBkcpRRAQBASBpkNACKLpVCoCCQKCgCBQGwSEIGqDo5QiCAgCgkDTISAE0XQqFYEEAUFAEKgNAkIQtcFRShEEBAFBoOkQqClBLDg4vFPbVooxcAiAUZv5qJF47K6mQ60BBAqFlu2e5uAQgLflm7PFBh8yMhzb1ADNkyZUiEBHT/hsAn0HQACgK8ZGd1q9adMV4xUWJ9kaFIGZMn5rSRDUGQp/FkzfUp2biPq3vbrTKuncU9NDZ0oHmxrpm7fUAw88ZlerteVWAjrUIosIqxJD0RuaV+LZKdlMGb81I4iOnhXtBPt2ALsAkNXsFPf7mdLBphiGpiy+o7tvGRGvA9DKwMMIWoemBtY/3pTCzlKhZsr4rQlBKNNS61bcBFBvVt+EHyaHomcD4Fmq/ykXe6Z0sCkHogkr6OwMz4WFdSA6SonHwFXjozufIrvx5lH2TBm/NSGIyXZTPIEML0omY480jzobT5KZ0sEaD7mZ0aLu7hUhm+xbAeyoTE3MvCIVj90yM1ovrSyHwEwZv1UTRGdn+O0I0K8B7JPbPOArieHol8sBJL9Xh8BM6WDVSTl7cy9YcHJL2+teupaZ+/K7iJQ9Nn74xo03vjB7UWkeyWfK+K2WIKgzFPkBGJ/Jq+6xjMWLNg7GHmseVTamJDOlgzUmejOjVV1d4U626Ff5XUSGGatT8eiPZkbrpZWlEJgp47cqgujuXv5umwJ3Atgjd/ZAFyeH+s+Us4epHxwzpYNNPRLNW0P2LCKAGwBamt9F3G/Z44sTiRs3N6/Us0OymTJ+qyEI9+7hZbJ5SSIRS84OFU+vlDOlg00vSjO/9q5Q3wpmvi53LwKyi5j5Ks1KMFPGb8UEceDC8L4BO3v2sG9OZ3w7MlieTMZGfeqQOnpXvBnpzDILtISB/QDMzw8IVVQGwHMEPMjM/Znx9E0mdtje3vDrt43TzSB8JN+evwQpHRoa2vBsQUHj3HIGwV4O0NsBBAGkAX6EYd2QsejyewfXP6mRhdpD4XdZoE/BxhIQ9s639RUADzPxxXMC+PnAQOwfPnFA1u7c9tKH2eKTgGy75+V2ZngBjDsZ1kWp4fUbFdj17GAdPZEjCJg4IGXCuamhqLrMhbzHzTEgnAHQewG8IS/38wDuYcIle8/nVCwWU3os+3WEIp8jxrcLCRk4MjUc/UW1deXuFwRPJMbxIHongLk5feOvDNyUsayLPPRdts0FvYGwkolDAN6c70/ZvgsgwcC146M7/8qvJ1JnZ3hPBGkADNVHleoTY3Nw2Ka7Yy+VbVgFCcLhcOCp57EfbOsTr9W1GMBb8lip0l4B+AnAuptB6/bePfNbU73qxtGBPX3/aoH/nYAjJ9WT7e/8ZwL1p8fGf2Yy3gvll+o/Sk8tc19cQuDTAPrgxPjKyoWHGXy9PZZe66c+p1z1Gr/t7SvnBVoyfQzuA9G/gbFrvh2qPz8DYASgWFvQvr2SecgpU8UE0dkTOQPAxY7CVieHo5f46JPUtTASYpu/D9CHfORLE+OyTLrl/JGRn73slc+LINLpwIuwrK+A+Kz8IPYqIq0O3LeN7vztwqDu7Az/CwWtS5n5Yw4C0+V/hixelRiM3WMoF3UtDH+YbVwCZCcvry9DRD/ntH16MJhpq9dNai+C6OrqO4At+5oybVaT2kMgnJociv2mHB4eA/zWzp6+jwL8UwBvKlUGAXeNB/mEewdiamKGmvCefM46nZi/4SAvXRFF+i7X1uxk+ixFAHwXwJ7l0gN4GqBTk8P9v/RhhqWOnshPCPh4vvwxgI9KDsfU2URNv85Q+CAwqTH9HrOC+SGycEZiMPY/PuRBeyi8HzEpmdRlwHKf0XgvRxA52fCj8n0VaTBdCNu+wMdity7j94De8PyWjPVDZl5eZu4qwPEKiL+GNC72IcskfVREEJrJ19fFOMW0ra978ZtgqElabZ19fwzcb9npoxKJDX/VZdYRBDKZwzkY+D4xDjOsMAPiLySHYt/r6lq2j20Fbybg/YZ5X2FgWWo4enep9NlVx9wXz2XgAkOlq+LutjjzGZsCN9cj1IaOIIisYdj2jQB2M8RD7SzPSQ5HLy01mRQRBPNRILydQGpXoXZ5ZT9FEtvmcBhbdnm1be5LFzH4ZMN+lmHgh6nh6DnlJjy1WECAfgZgSdkGTU6QIeDr20Z3/obpbqKjO3wkEW3YLgNd3xZ8/sSBgQG1YqzFRx3d4QgRXeXYLZiWayyPItSnn6UzGPhv//XQo7DoY8nB9f9bqmG6/kNEapevSFztGo0+Jtw23sbHltup1Wv85r1F1XhXFhafHw+kg1hZWDT5yVwRQbi8K9RYGgmgZenw8PVbDCqfFJJjUvr81hKwHsj+nbgNzO0OE9Dk5IwrW1s2n6YbKBqCeBrg3xcO/PIFqa3l3QD9DeA3gbDQsV0r1LUFZIWJbTWJqxhTuY/xOCz8BkzbAPv9AC0onsDKmgO8schte0u1TRHPuxwrV18kbaCniSRFBAH8GMBBBLzDUc5mBu4h0MsAvxVAt2ZAlg0d4R7gANSN4qMdZSkz4CbAuj/bP1Q9nDVzTlpoEPB5JrYLoV+0+gYO1uwqyp6luWKOTYYyZw4cVH2KwTsS0ANgLxfeGQZ/LjUc+4GJHnp6lr81jcAAIWvOVF9NvQU7Q+EDwaTMeG6yV2bCRG58KEsn78u5VX/BjFho/hgzn5CKx/pLyFOmr/MfHOP+vWDsr+k/m0F8RHIopsys2s+g/2RAUF6WcTV2S8gEpynVo7q6jN8S/U2Z/CZwK9HfKr5sWRFBuJVAjCsT8eh/mHT29p7wAgukAvipkBz5jx612T51nz1wj86m2d5+3I5WS/oLAH/WNQlvJpsWJRL9OUJxfBqCcP68hQhnbnt1537nKi5vT/88iL7omnDGVNiDXAH8W5tw4shQ7EFngQcsXLF30M4oc0vuNnnuK2kOcF2GKuR5BcxnjW3d5Rpn29Tq64ln8BGLrB/lJ2C3yHUjCFfFavL49F67c9SpO4X/1jT9JwHnu3UGy1ritRLUDPBCdWp1f7k9Nv4ll41YOUssBuMa1wSnAtxZeT1uAfPqvfZAv7ONnvou4Y3X29sbHEvPW8NZO7bz44dsxpnuPpw1Qz1HR4Bxhcs89ozFmcXx+A1/LDduNH05w8zLanFxrrf3pDnb0v/8GUDLCu1Q4T3IslYkB9ff595JZXf/c7acAKILJxNF6cVQVyiynBnXuib9vzHTWeNbd4q6d1M5mbEaRF9y5XnADqaXjgxseEqHW8n+w7jJDvA5bjf8bD8I4hww/Zerrz5gjwcOHhlZp4iy6KvX+O3sDn8CRJc75iS10Pr0tld3vlqzC6XOhSv2h61Mv5NMhWUXPjoZfRNEvkPFADp8okMxHZ+K96vtdrmPOkN9a9RgdSQ0vXlNHaHwicR0pXPyZo+6SxBEyVWI27XQKVDBdOG17ezoXfEWpO07nCtrBtakhqOfdgPjDqeQ/30zLOuY5OD6hBeQXV3L3sxW8DbNVrPuBJGNE2TR0anB/j95tNfDdOFtIvEY4BOmPi/Tj+ZMrNCkkvrOH1xe7rDxqyp+PxYMLto0sO7vbrmKd8/ZFHcjw33JZOz/vPSmXaX7cAvv6IlcRIByIc9+Xv2q3AB0/96xsO9dZLNyNtm90Ae9Fl3OvB3d4T4iUpNQfuGEMQY+qjOp5hZP9h35HW+hmAeR4aPKRVzo6IkcTICaWyZ2Nwx8PTUcVcRRFMrHs/8wn7/XHvhuiUN13W7Ak4jrNX518y2Asue9fuaiUn3GN0G09y7by0oHB7d7L+GfNvOSkXhsuFznXNC78o2t6fSvAXqfI21ZYQtpe3qO3SWD8dsBap8YKIQvpoai33TXXYIgytbX0d13HFHWtdD5Ga343AMZ4FvbgjuEBwau3uosTDPRZIhwUmIo6q63CFZXYMTC7/UmiC0Ma2lqeP1IGb1TZ3f4XFA2yu/EpO01CekHeHkPOY0ZRtVl5Baan4TUwXFhsnuWLVqkIb6iBY6fYHoa2YxNRZo++RuLtx4Rj9+iTJEVfx3dfUuJWJmXciY6xj1tLXxUOe8X3UKKGN9MxKNq9z3p05B3WVPRpLESihzPjKsdC0NP3LT9h/lm2FhZ7qBWeQdZLRllup04pGeP+aVe41e1iVoyg45Fp/E4L8bd11FAVgX+CaI73GNR9nbnDnklPmYH0wu9tnxORWe9gAJ0OgPKxWx/EDIBzhw6PHzDo6Y9vDMUuRqMEyfSE9Ymh6LKLXTS50EQRgNSZwZTAdNSw9FPlD287O47BcTbb7t6DLiu7sg3mHDe9kaXPa9wyqfbiRl3HFOsC+ncZxC5nmN+KVK3gvSy72oGuPLcOj4x1L++VLu1+iY8gjT3JpOxp0vl1ayitVgWu5zCxE49UXWxa7giMDoiFe9XUZBLfu1VjLuSsrtcmP2cJ3aEIucRZ3c1TwF0H4NvLrgkF+rMB/K8DaCuwt9K7QB0bdWVAaZTk/F+ZXaZ9On6j6k5TpkPt6V3u145v5WbX+o1fjXu7MYL8pyXId/IwD8B/M4C7uMMrylHlJMmmnIds0gBuVDEjvj0/lnJb53O9NURhH41726PbjXqZcoqwsc94DQEoZvMvFZfXlhpJox6EoRve6bpzkozwL1W864FgWZwA7G24OZjy3n7mN4p0ew0PM/AdHpTE9DW9G7XEqAmy01g/gMHrPUlTHQTxWj65Itk0+JEov/easZTfhJR0RB2zpdj7JVkUq9msVVRPy2ekPW61bhJPxlEptd0EVpUj2YBWs/x29195BtsmqN2eAdtx7tyryQTnVVHEK6LTMou/4/XtR3z+zuvVSw15V81BGE6CWsmDE/7aiUEoRnsxuUX6qtm6+lXScU7CP+LgvwbBxMLC2Wa4fHAQvcBYDFBmNfl7humdnpjgghFziKG0/OoJmYeE31o9K0M8BOXCE3KMF6dZxNm7678lAOBWGpgvXIlryh0v9s05qX3cu139x+vcyLNAsOXjoryawii3uO3qyfy5bwbvBOmF0FYb9m0duvWnTaZukyXw9n9u28Tk+kE7bchJdJTd/eK3dmyP8KM4/JMut2f2YeJycBtLdsM0wlD1+aiyVSzg3Cv2gj4O2w+JJGIKa8Ro+99i1ft8PpXt6ntY8H1tqKVmUllRW6ujOtS8egJfiYN05Vk0QA1tIkrOYoIwnHju5ScpvouKr8CHEzw1qXRrVqZcHZqKKq8iar6uopt/JPLm7jJj3Xjc3iw3N0AZ2bNyv8ZBu4gkM9nVFndTj+0UDYD2p1BkYelzwWsCUHUe/y6I2ZrlD0Kwn2waS230B3VEHrDEoQxSPg7AAAbC0lEQVQaAK+mrT0CxPsB/K/M9EECDnCELdAPAj8EYehtZTphVEoQGpv+pDAgpqPdNWEVEYRGDpOii9qiuSindQwoVbhm1aW1pWpWgEZmojoQBHV0R64hwvEFOf24d5sAXyqNB0FMhDyppvzcZS/jC4WZ/O5ibZoC68uFJylaUFbT0Ml5tQsikwm+VBNM8tdr/Drbmd9BqYN69x0UnTibQdjAbF1dZTgUf4fUfg5xDPpBNqYRMX2WCEdqLqgZFJE9Zjc+pDbdkgtBbI9ZpZRQKhaTmZK0uzKticRkgHrVOZU7iKmcoE0w1LqXG+6QTMp3hA25qFw4E1d5DxDR+XvOt2/VuZAKQXijX26B587pM0RJIfuLBHw/M96yplRoIq9W+jIxeXiKaCfoUp1SebUEMvaFRNkbsn5Cbagwx6rN2+PxCEEUPLrquYPwvXLV7WZ0hC0EYTyh+PKgMiEJlWZ7QDucC0C5k5uEN1GXGK+ZE+Qz3e6xQhDG+jQ1EW9fWAMqJpzJjkI14gG2KGLiEOFssU+CMHcD81zlLVzxAdj8c4/bwM5szwP8FwIN20wD4y3WverikukZiHbFZ3ioJzuI2u8gTF1JG5cgivt+PU1MmjOnKSEI5wBUu5at6Vd7iLAcDHUx1h0yZNIwJ9BlrcHnz3R6jZmOV1MCK5eumv6T3S27nHB0ForpMDHp5M4Gonzeegdl+Ggm9BHw7jKEXvIWuq4OXwShCqhG4Xl/5ptc4ShUsU8zcB1g/Wo8SA9sGlinnlXUek2Y1t/IBGF6YFtqMGjMfaYrkHJjrOh394CoZGLUuFNq3VdNBuh0mJh0fZ+n/5Da907Ot/IdGXIXXe0Pvxby+9+LnEVy6Yrcfru6Iz9mwicLxUy112M1/ceUIBp1/GbNkPbWD4LtVWCo0Cm6QJplLwpXvINQGd0K9zoD0HVE1+MnKokKnPVjpPls08sbTUEQxbfRfcfW0dwqrxtBeN0OLzX5FN8E1oezqGaAT+UZRH7yUBfDVNjwwufLhTJbhronQ7gKzM+B6LcqsF9bcPM15e5qTKUXUyWkoeKjUXD8EuehvSrH7VnV4XYNNry8WEmbTCf4kv3UYAehiSbRcOM3e6b0DE4C0f9zxrLyS9C+dxCVuiHqDrgZ8PUQu/bSyAw8g9CaCzxiNnl15lqsYkwHoWZLbXQj3VG++02DbHRJ3c30hiYId1gKwOgSnxNn0wtfbt2YnuGY6jS7I8o9aboEoEWKuxjYjZmPNQmbo/JndxSZzG3grLdh7nONR83lwores8iGjSB8xUGs9yLDP3UvLKvpP6YEU8/xmw2rw5nDiKhTPaaWHzcqqKDJp8bdtwjZ86TC58tj0j9BFN+k9gxs5pRAG0fJ56TY1dX3HraygcW2b51mIEFoOyLwp7RlHVrObbCAqebyTB13EDAKf1Foqy7EhFf4jGoG+FTvIDRyKBGNt+x+Qka4R7/pGY7JrFFIUwvPrHI7el18IyLq3/bqTqtML3flXgNsuXXyA0P6qAjV9B9Tgqjn+K10QV7QcbXnJb4Jwm1L9rqw4u6oukB9fmzZWV/t1710LTP3TSp7hhKEjuxMY9R4XJypJ0GoHcD9lj2+OJG4UXmWlfrcb5ertJ5hlKsZ4FNNELld8Ly1AB9bENhXsL7c4kq9b1EICmgcqmOqYjG5dzR+dvUeBOO+I6PTf9l3QZwdqqMnfDaB1BO3BY/HDJhPScZjPykiUgMTUanOatr/6jV+NXp/2WLr8Hh8/ZDJYqDam+W+CUJz5d8oTITHtuxhBK1DUwPrHy8lbC5eO/0ADPXmxCS3WK+DwkY+pM7Lqh04THx6aii21vOQPveSmXqYRT124/zqShC5iunatqB9eonIn9TZEznd/ZpXqRvtpgNU11+mmiBUnbpw3+XCwKt8uhfB1ALJ68Ert3ydriCQfm3JXuNLH76crhgb3Wl1uRW+JuS39vJjd/fyd9sUUPGe9nC0Qz0wdVy5Fxc7F67ocr9cWGpxUk3/8bODUJ2/MxRRc9JnHDKN1nr8anedwINkpw/zek1zYvegeX7ANNxQoQzfBKE9SzC88q+PKaJ/gEc1MP806UfB/C3Pt2Q9QjHMAIKAx9sOXm/wej0EUtDlNBBE1tUsxcSfcD+glD3EbBlX9s9TJr3fUebcqZoBXg+C8JgY8g9JBT8xMrTuD05yz11AU+FQSK12nROkUfj4gnKLvIE8QmubrCqdaTx25ipMuvZxnTzZzaUAnZmPD7Q97E2JsNqaXYAqapSBb/N4y4XuS1z5R3xW5x/xcfr6l9x9VNN/fBJE3cavPhSK/oGqwuIz98a99WPXdQJffS67BPTbobIdxB3S2jBqZomYIhkwnmRCKvdkJQJg+4MgeqfrNakM1CM1wNsc23StJ8lMIIgslt7PPebiqzD9odRTgg791ZMgnK+1qSZMesbR+wlWlH0HoJoBXieCQMknR4GnGBjOP73q9YytLxOLxjnDaNduOrY9Vvg5vZqNS5W27MNMrXNfuhi598HdX5qBPxLot9k6vZ+sVRfyvjcnuPl8L6+vavqPX4Ko1/j1eJwohyHhBWL8jkHqGdXcs7CED2oiU5R9dEvXXyoiiGIPGu8XuNyV+owp4sz+CueeAo1bzHcw8Mb8j9r3KGYKQWQ7pebVrHKDWz2qbgFpZhyZT1tHguABAv2GAaUPk5u2qutW9Oi8HzfqehGEwju39Sf1jGiknK5cv5c1QxSNmeJX30o+hemzPdnklfRBRz3PkMWrEoOxe0rVnbcIfBOMs3xGUCgsQi4ce3Xn80qZvupNEJVi53f8HtAbnh9Mq/fZJz1pbKrqNAFf2Ta687fLmQ3dBVZEEJrXpHytaDpD4YPA+JGn2WhyK9Xq4tqMZV2gPHw0h91aH+SZRBBKXB/hR9JguhC2fQGCdJnj8aT6EQTjHjvdcozVkl4I8E/LxO7xMplpO3c1A7yeBKEa7zd+kZc5rtwoL9qx+3isqVzZzt/VM5WUzlwFUMhwAk8T0Q3jROeYet+plULO/EEq5tPEy22l28kPkYUzEoOx/ykXQbia/pOd7Cs85K7H+FXzLgfogvzTs9tNe6XBe4As/rQJdjXbQaiCig7NfNpEHTFfVqmzPwDzHZ3yeQJ+z8z9mfH0TcWP1E9+11rnNjfTCCKvnGycFQv0KXA2jLcKcaxW6GmAH2FYN2QsurwwGP0G+/IzWUyaOEo8gpQ7axj75Gs7hBMcV/1HwfwQLKwLInPd0NCGZ03rrnSAZvuk67XBeoR3z+4mFpzc0jL3xSUEuPty1kRDoAHb4iv3ns+pEm8iayHSnPn5fqzJFPtCuuxkZ2fCBCx/zRFBmXPnOcoohMDpz4wH+t3veZjWlSXX57EfbOsTgH0wQPs44gqNAvwUiH7tNyJpNf2nGoKo5/jNzm1paynA6uU7FS9LnW0VdvKvvHbT/QnAuhuW/ZO95uFBv33OqcOKdhCqgGKfcHMzk2knknSNgYDJGxeN0dLma0VxmPTy73M3Hwoi0XQhUDFBqAa7vJIquiE5XYJLveYICEGYY1XrlK6H531dUKx1W6S82YdAVQRR7P1A17cFnz+xXFyZ2QfzzJZYCGJ69Of2gfdziW16Wiy1NhsCVRGExifc+GZoswHZzPIIQUyPdjt7wipO0s15l+4MEU5KDEWvm57WSK2zEYFqCaJwQ1TFR1KHTOri1NdTw9EvlfM2mI1gz1SZhSDqrzn3BTbZPdRfB1JjhRfl3MC5PAeeQIYXJZOxRwTg5kBACKL+euzuXhGyyb4VwI7ZG8fMK1Lx2C31b4nUOJsRqHoHocBzR1uUXURzdSkhiPrq07178Bv9tL6tldqaGYGaEIQCyLXi8R3zo5lBnumyCUHUV4Pt3eFDLMqePajLULIjry/8UpsDgZoRhDqw7uiJfJWA81X5fiJVikYaGwEhiPrpx/Us7xgzn5CKx1T0XvkEgbojUEuCcAcxG7WZjxqJx+6qu1RSYU0REIKoKZwlC+vojpxKhItzUQXMwm7Xr3VS02xDoKYEMdvAE3kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKKZtSuyCQKCgCBQBQJCEFWAJ1kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKKZtSuyCQKCgCBQBQJCEFWAJ1kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKKZtSuyCQKCgCBQBQJCEFWAJ1kFAUFAEGhmBIQgmlm7IpsgIAgIAlUgIARRBXiSVRAQBASBZkZACKLBtRsOhwNPPm+9gzL2ChAOBuhtAOY5mv0KA89aTAmbeMP4HB7cdHfspakQ64CFK/YOZvgwIl7OwH4A5ufeLch+aQDPEPBnJr6Lybp173n2w7FYLDMVbalFme094QUWSL1XskuhPAaOTA1Hf1FN+b294ddvG6ebQfjIRDmMe9pa+KiBgdg/vMruDEWuBuNE0zxF73RkM9bmDYmithDWJoeiJznbHgot2z3NwSEAqk9O9bfFBh8yMhzbNNUVSfnbERCCaNDe0N5+3I5W69gpYPo0gD19NDPNhDsZgS+MDK37AwD2kVeXlLoWRkJs8/cB+pDPsp4G8CVkeF0yGRv1mXfKkzcnQWCUmVek4rFbqgFQCKIa9JonrxBEg+lS7RieepYir61qLwLwpiqal2HGTZkWXn3vQOy5Ssrp7Az/CwWtS5n5Y46dQiVFPWBT4LiRoXW/ryTzVOVpUoJQcD1gB9NLRwY2PFUpdkIQlSLXXPmEIBpIn8o0sTVNawg4ocSErEw2zwG0Ldd0ViTyhhJiPEMWr0oMxu7xI+oBveH5wTTWAdTrJ1+JtJthWcckB9cnalRe1cU0MUGAQJe1Bp8/c2BgQJn+fH9CEL4ha8oMQhANolb1WH3LNrqeGIdpmvQiAz+xbLpszz3tx912fUUsYxlrkc38eQIO0JDLK8x0Uirev8FE3AULTm5pnfvSxQCf7EqfNV+B8SPK8O/GxnbZvGnTFeMqjdr5PPdcZrdxDn4IhFOJsRhA0JmfgYcRtA5NDax/3KQdU52mmQkCQFVvwpsQhF/9+D1j8Vu+pK89AkIQtcfUd4klJuRRMH+1rQUXlzrcdFRInQtX7A/bvgbAe1wN2QziI5JDsY3lGtjeHT7EIroZwNxCWjW5MwXCpmaijt4Vb6F0Zj1A7c76iHFla8vm0ypd2ZZru5/fm5wg1OHT/ZY9vjiRuHGzH1xUWiEIv4g1Z3ohiOnXK3WGwp8F07cmr/zpUbKxIpHov9dvE7NeNGnrUoBXufI+SHb6sERiw1+9yuzt7Q1uS89bC/CxjjRPIMOLksnYI37aEgqF35Rm/MJFEpvJpkWJRP8DfsqairTNThBZzAg/TA5Fz/brrCAEMRU9buaVKQQxzTrrXLjiA7DtXwHYzblah0VHpwb7/1Rp87x2JeVs0+3tK+dRS2aQgHc46l6dHI5eUklbtLsRwrmpoeh3KimvlnmakCCUp9hWp9tupaYmIYha9rSZW5YQxDTqzmO1XhM3RSVWV9cxu9lWy50EvN8h5stk85JEIpbUia6ZNP9pMy8ZiceGK4FKna20bsVtAHVNECDjulQ8qg7iq3XBraRJE3makCD+QsAaBv7bZR5M2WPjh2/ceOMLpoAJQZgi1dzphCCmUb9dXX3vYYt/7dw91NpG39Hdt4yI1wFonRCV6OLkUP+ZuglaM2m+SDYtrsTUVaivoydyEQH/kfe+YmIMtrbMPW1g4Gq12p22rykJwk4vYqvlPLeDAQNfTw1Hv2RKykIQ09YtG6piIYhpVEdXT+TLDFxgurqvpKnaW73AYxmLF20cjD3mLrOrK7w/LLqLgTfWwsRUSZvrlacZCSJI6dB4oLUNafsOl5nwZYutw+Px9ermc9lPCKIsRLMigRDENKlZZ3oBODE2B4fVOlRGZ3ffKSD+kUPUDBEdnxjqX+8Wv6fn2F0yGL/ddbBc9nB7mmCsqtpmJYihoQ3PdnSH+4hIebNt3zmCB8bm4GiT/iUEUVXXaprMQhDTpMqOhX3vIjtrXtq90AQG1qSGoyq0Rk0/v3VpdjbKMvGQzThznz1wTyPHV/IDXDMThHJSaJn74uUEfHwSJsxfSMZj3y5nahKC8NOTmjetEMQ06baju28pEaugcIVgd2Cm41Px/p/VuknaXUGJ4HHqDoPGRFFo1tMMXAeL1jZ6ML5yODYzQSjZPfS4hWEtTQ2vHymFjxBEud4zO34XgpgmPXeEImcR4weO6qvyFiojBnV0R64hwvGOdH9R9mpljtDlzR9uX10mjMcoCPcBfKNl27dt3brrw4Wb1dMEq69qdQThqwA/iesTzbVIp12hyPHMUHqcWIjAwNQkBOFHuc2bVghimnTb1R35BhPOc1Q/peGM855EynOp8D3LFi0qddeia2H4I2zTtQD2MIQpzcAfLcYv7QBd1+g7jNlAEMrU1Pa6l65l5j6HDjMg/kJyKPY9L1OTEIRhj2/yZEIQ06TgogEIlFzRV9vMjlDkc8RQtufCZ0RIKuw4tYx/Le+mOhF6w7A92RhSLZT+vtdOxbCcKUk2GwhCAdfZGX47AqTOu/ZxAFky9IoQxJR0uRlXqBDENKlsphBEAZ4DDzxm10BL68dA9qcAeqfP8N9qZ3HpnCB/0TCmVF20MlsIQoHZ0R05lQgXu/R2R1uQP6bTiRBEXbpgw1ciBDFNKpppBOGEKRfrCT3EVh8THwxgLxMYVfA4yvDH/MZ0Mim7kjTNfkjtxKSzMzwXFtaB6CinqYnBn0sNx5xnYdmfhSAq6VHNl0cIYpp02tUd+TETPumo/jE7mF5YzSMvpURxn3kQ8HfYfEgiEbuvWgjy71h0EGE5GIeXIYyGuVMxmwhC6bi7e/m7bQrc6TpTUu90LEkOrv/fSYTifv5U8+So334j4b79Ijb96YUgpkkHlZ4JVNhc315MFdaTzaYC/lmt6RO8nktl4Krx0Z1PmW6Pp9lGEFlTU0/4bAKpQIkOryYUmZpkB1HNCGievEIQ06RLzYPzGWY6IhXvv73WTeruPvINNs1Rdy4OcpT9G4u3HhGP3/JKresrlJc1awRxDpj+y/V4UMmAgVPVHne5s5EgcuZB+jmAQyeZmhirU/HoxG17IYh69cLGrkcIYpr0o7vdTIxvJuLRL9a6SR51XZmIR1UAvan+tO9dTJWsfoSZjQSh8NGFmAfwjMWZxfH4DX/MphETk5+u1LRphSCmSbX6IHpTE4upo7vvOCK+zimq7tZ27na3fT5gzQd4BwAP1WKX4RF36ta24A7h6YzoOlsJwtPUxHwzbKxMJmOjQhDTNDE0WLVCENOoEF00Vz8RN02anjXzBHADQEsd6bXRXN1B/Rh4MohM7/DwDY+a1FUqTdFFPYObxdXWWS7/bCYIRdptWynGwCFOUxMRTkoMRa8TgijXe2bH70IQ06hn7XsQRP3bXt1pVa0OcHUvunm9OdHREzmYgF86IoBmmHlZKh67pVqYOkKR84jxjYlyhCCKIPXr5aM5x/J12bIzFD4QTOpsauI1QwDZ52URpPPBOHGikeLFVO0QmJH5hSCmV23UGepbA+bVjmaMMfMJqXisv9qmqcttVmvLrQR0OMryPCA+cGF434CdvXG77/aJfLvZodL25F7O2+16AOFCGdwAr8rN5h1EXg/U2R0+F5R9D307FxD1M7MNYKUQRKW9vjnyCUFMsx49fdOJj0gOxTZW2jzPN6kZV7a2bD5tYGAg7S7b4wnUqglLKyPTqcl4/+WVyleLfEIQQP586CaAep2mJgCKIFqEIGrR02ZuGUIQDaA7D9/0zQwclxqO3u23iTlXRutSgFe58pa9pNbdvSJkk30rgB0debcw80mpeEyZI3y9I+1h6/Z80c6vrNWkF4LIodfRs6KdYCv36l088RQTUzVdbcbmFYJoANV5rfYBjBLwFc7wGuVZYtLU9lB4P4uxFqAPudKPEmFVYih6Q6ly1C5iLD1vDYNPc6VLE+jHARr/mmngPa+2+H0f2UTuStIIQWzfH+hMTZMwFYKopIvN+DxCEA2iwtxWn64AENE06W9gXEhM0T33tB93v+hWiI0E0GcAqNhIzluyqrhRZv54Kh6LmuwAurqO2Y0DLbeCcYCmLWkQ30Oga8nmwa1bd3m2cKDe23vSnLGxl+cxBQ4G4VSAPuBuCwF3bZvDYZNnL6daNUIQ2xH2OK/ankAIYqq7Y0OWLwTRQGpRLqkUsL7OYPXsqHuSL7Q0A+A5gLZl/0C8Exi7lhDjGbJ4VWIwdo8fUbu6lr3ZtoI3E/B+P/lKpVXB+iw7fVQiseGvtSqzmnKEICaj52FezCUSgqimq83YvEIQjac66loY/jDbuCQfVrvSFmaYcVMmYJ117+D6JyspRL0FYbWOrwFnX6LzIiyTonNtaeHV9w7EnjPJUI80QhBFKFNHT+SrBJxf/AvWJoeiJ1WjF79uvNXUJXlrg4AQRG1wrHkpuQtupNwMvwpgTx8VpJlxcwD8xXg89mcf+bySUnto5XsJmW8RY7ErplK54jMM3GtZODcxGB0yMW+VK7CWvwtBFKOpzIu21XJn0c5RdhC17HozpiwhiMZXFR3Y0/evAbKPAuNogN4MYL5jRf8KwE8ANAzwTW3BHQamKnyFOidp2UoLKRfoTd2tUO9AzHNAmDV/EfAgM92QDtBtle5e6qEWIQg9yrrLlWJiqkePbLw6hCAaTyfSIkFAEBAEGgIBIYiGUIM0QhAQBASBxkNACKLxdCItEgQEAUGgIRAQgmgINUgjBAFBQBBoPASEIBpPJ9IiQUAQEAQaAgEhiIZQgzRCEBAEBIHGQ0AIovF0MqtapHnTYErlZ8K5qaHod6a0EilcEGgSBIQgmkSRM1UMIYiZqjlp92xAQAhiNmi5gWUUgmhg5UjTZj0CQhCzvgsIAIKAICAI6BEQgpCeIQgIAoKAIKBFQAhCOoYgIAgIAoKAEIT0AUFAEBAEBAFzBGQHYY6VpBQEBAFBYFYhIAQxq9QtwgoCgoAgYI6AEIQ5VpJSEBAEBIFZhYAQxKxStwgrCAgCgoA5AkIQ5lhJSkFAEBAEZhUCQhCzSt0irCAgCAgC5ggIQZhjJSkFAUFAEJhVCAhBzCp1i7CCgCAgCJgjIARhjpWkFAQEAUFgViEgBDGr1C3CCgKCgCBgjoAQhDlWklIQEAQEgVmFwP8HEnP+awTfrQQAAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-57"><g><path d="M 101 1081 L 154.63 1081" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 159.88 1081 L 152.88 1084.5 L 154.63 1081 L 152.88 1077.5 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-58"><g><rect x="1" y="1031" width="100" height="100" rx="7" ry="7" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 1081px; margin-left: 2px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Put all x86 CPU cores into ACPI C3</div></div></div></foreignObject><image x="2" y="1060" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfXlgnFW59+95Z7IUlMIFgWIRQS8I+rl8hWaZpDeylEWg0DpJSqnUjUUqXLigH4siCi4Xr0tvEUX5EBCaZCjQiuxibDKZSfmqgIi4olKWFrmlrZAmmXmfr2cyk75557zbLMlM+rx/tZmzPO/vnPf8znnOsxDkEQQEAUFAEBAENAiQoCIICAKCgCAgCOgQEIKQeSEICAKCgCCgRUAIQiaGICAICAKCgBCEzAFBQBAQBAQB/wjICcI/VlJSEBAEBIHdCgEhiN1quOVlBQFBQBDwj4AQhH+spKQgIAgIArsVAkIQu9Vwy8sKAoKAIOAfgSkhiLa26FuGR2kNCMf6FzWv5CYGtoKx3jDwcGp49MH16+99rYj2pKpPBBpaonMM0KMA9slW2WKCTxjsj22wN9HU2v5jMM4Z/zvhtkRfzzKfXVVksTlzzq2pmfH6SQQsBdAM4EAAIQBpAK8AGGDgjtGhvR/asOHm0WJfYu7cM/cN19YsYXAHiI4AY99sm0Ngfg7AT1Oh0I+eWNf1QrF9FVs/Go2G/v4yPhhiWsiEEwHMBnCAZfxfA+OvBPolMWI7dszcUAhGja3tnyPGN4qQdztArxL4z0z8aJqNNev7u/8IgP202djSfhoBay1lHb8BP+3lyrS2LpyV4nAfgHdVwjdTzQRhxz1FRKtHiS6vhA9FNymamqIzYOCTIIwk+mM3B5k4lVR2NyYIaoxE24novwG8zceYvArg4tmzuCcWiynyCPQ0NCzZKxQevY4JFwAIe1ROEeOmdKrm6sHBO7cF6qgEhRWJGbU1lxNwHoC9AzT5Kpi/DBO3JBKxIb/1SkAQuq6eIYMvHlgX+4UXUQhB+B2pAsqV6ATh1PMWIl460Bf7WQGilaWK2lW9+IpxKrP5dYDew4TPJ/t6/rMsnU1Co7sjQahTQ+0er38VjEuypwW/SKdB+CFSfGmQBbBpXueHYPLdAB/mtyNVjoGnDDO1YGDgnr8FqVdo2cymJ4zlYPoCgLcW2g7Az5FpfGxgoPsJP22UiSBU12q8vj3y5t5Xup1shCD8jFKBZcpMEEqqzSA+LdEXW1+giCWt1tTafhMY5+caFYKoOhUTNbVGLwPT1/LIgaBUJuuUuoLBexEyatP98ycQ3TwyNHO5H3VKU2t0Lph+qmlnCIQnwfQbENcBiIBxqF0mAh4drufohsdiW0s6kW2NHdMWPbAmRbczcIJLP+pU8ApADLDSWCh13AyH8krt88lEf/fdnjv44lVMbtCkQXxFoi/2TSc5hCDKOLO0BMF4vK6GF/T2xv7pp+u2tjP2Hhmpm20afBEBH7cfwRlImiOjp1bCvYRdDy8EUV0EEYl0tppk3g9gL8vc3ALm5bMPQrdVfZTRwb+EYw0yvm/b/Y8w0+JkvPset/nd0LZwtpEKPwjgfZZyjuqjhtboUQbjNoCOtrbLwHXJ/p4vei20fr41XZnm5oWHsBF+AMBRtt/TDDxBoG+N1JuPaEiKIpHOWUzmJxj4D406aogISwf6ela7yaY5QQS6A2hrW1Y/MrLtADbCp+7E6BqNynCbwcap8XiXug/Ie4QgCp05PuqVgiCs3TTN62yGad5r23GlmbE8Ge/5vg+RylpECKJ6L6nb2trCw6kDbgP4LMsk8TyhNjVF340QrZm4gPLASD1OcdnZU1Nr+7fA+HdLX0PM/PFkPNbjtNjPOT46s24HxWw7+efTBh+3fl3s+VJPbof+ANBfyDA/7UeHr2Rqaor+C0L0AwAftcn4ksHp+fH46t86yV4sQUxYP8bk6AZw/IT+mNfAxGKdalAIotSzytJeqQlCNd3c2r6IGXdMOL4GPJWU65WFIKqXIFpaFh2WQqiXgINz88PvCbC5tf1sZvzYogJ6w2Q+aTAe69fNtUhk0XtNCj0C4KDs756qjlw7mnshpdE5PxHvVgtwyR5FmCOpA1YwWF2cWx7uTYWx+InemLLi8v1k7nZmbF0J8LnWSkTUPfzmzKVOKrlSEkRm/dCfiBxPJUIQvoc4eMFyEETmsiyE1QCdbJHoZTbouOS67t8Fl7J0NYQgqpcgGlvajydAGTzUZmeE7znV1BR9O8LUC8a7x2eTy6Ld3NL+pZ2KeqXuGHsCbHDGTjr73wUgOl6d8ZNkvOdjpVQzNUSiJxiUORlZ7xE2mOHUGYO992ws5KvJWkDdT0Cjpf6QybxgMB5T5tR5T6kJQnXQ1NJ+IYCVE3lPT7JCEIWMtM865SCIzABHOs4D8QSVEgOnJ/t71IXflD1CEFVNEHZ79z+HKdXa13fPy14TSjfPnU4fLS1n7ZPG6IMANWTbHQF4QaI/9pBXP7nfs/NfLXAvAZQC+MG68B6X9fb+eIffNtzKOWzCtjCMk5P9XYPF9NEY6VhIxKssRKxUVnfVhTed09vbm7K3XQ6CaJzXcSSZ/HMAs8b7c/DbEYIoZrQ96paLIDSDBmY6OxnvvtMqUt7kCrBTU+141Q9spVUm5zF1fK+v3zqHCVEG/xsI77Q4WalXGQJ4I7ERB2PV8PDMX/ixsqkEM9fGSPR0Iuqy7WSfMcOpk/3sZLN3BGoxeIdlbjxLZuoUq4moZk49b4ZT8/z0EYQg8jHlp0fC4eM29K76Rxk/xUBNNzdHm9ggRVi7LuuJVib6ui8q9pSi7jVqd+ABgJTjYe5xvEcpB0FondSAWF1481l2khKCCDR1ghWeVILQ+Bx4LfBeb+NVf6oJYpdDHikrFj8OXblX9uXYVQkE4agLJ3wn0ddzqduCpYizbo+tdzBzh2Ws1WVwZzIes3rHork5+kEY9CgD++V29jutdD6S7O95zGueaO8vmBbpLJnsp19i/Ggg3vNprz4m8/fGlvbvEqDIIPe43qkEla25peMzTHwJmH7G4Pvrw5x0smqcNIJw2DwKQQQd3QDlJ5UgNComrwXe61W86k8lQYyZSYbutqgqvF7H/nuaQDcPD8282Ok0UQkEoYR2MAl1NU9U9TSXx8rBTGsWqtvZel2gZgGlxpb2LxNwtZ8dcXOk/YdM+NR42TJcMAedCNbyGhWY4uDBEGpO7u+/a0sxbRdSd9IIQlRMhQxPcXXKRhD5zjPaHY7XAu/1dl71lY318Oiby0H0r2NtmR8Gcv/O/GE9QE/uWgzM9Yl47Bavfr1+dzY/HI8RpPpVpwT1cauThTrO74qTs6sDV5v9SiGIDEloL025d6QeZ+jMSTWWQgoLx/IOhJIm4Lrhob2vdyDRXEiOW60qMCcSev/8pXu+5c3hey1mqiO2Uwo1tEaPNECfhYmTQBmLKhX7KROLiQk/MUdSt5XT50dzklKub1N2yikHQegswYjx1YF4z1X2b09OEF6rURG/l4MgtDpMwp+Q4rZEIvaiVVyvBd7r1YLWn6xLaofwA/elDb7UyR4+EokekSa6iYAPT3xvfhBpLNLZgFcSQaibTI3vQJrBn0v2x75lfaes6m0ViBZY/u7p0+BkiqmcMUNsfK6mpv4JdRGsys2YsfWoNPhLAE6zeji7eTfPaVu8X20q9XOA3p+Va9y8UjnCEdMtNgsf3RQdYmAFpfnaICE9vOZ67nfd/V45zGh9y5O/GQzkKKfrJ8+KDEgz80K72lHVFYLwO1IFlCsHQTRG2s8nypioqZ1V9tFbQQRd4PN2D/bJ6XHJPRkE0dCw+ACjJq304uMeuD5VIcphaQZCdNsEE0nghTDSbf39q/9if/8KIwg0N5+5v2nUPELAByyy5jlbNbZELyWQioGVmyNaItFN6QxJ1L9+MQjqXidIzCHlWXx7fZgvctKnay5HMxfhNFozl4iVH0WA/grzR/D6jDWbD/spx6uJkv5e6hOE/mQJ50tyieZa0vGc0FgpCUKFNtj4EpZhLMKm1Tbb0Y56OhKExl5/M5l03MBA9zN+RrIhEm0xKGOhsme2/Otk0nxd8LRKIwglr07VZCXIseB3pnq/XXGSXDxlnTDL+DaEjK/tDC7XCaDGA9sNMIxPJ9Z1KXWiYxhpjfrmzwSsYODrtjm9mYHHCbTNLe5TOWIxlXpB9jMn3cqUUp6G1sXvJ07HCDh8Qp8uBg9ygih2BF3qF0sQSsf/ZmroHWHgeIb5WRUh1d4dA7eODu19nk5HPB0JQi0ypkEfJ/DRAB1CRP21oU1n62zIdUOjM/Fz8iGpRIJQqibNpXDGMqm+Bo8Pp0gFgFP5CTIPA39A2Dgx2dv1Vz9TXW1EXniJFhHhqxNi9XtXVurNy91CfmvwHAEwvOvkwM+RgQs1ISyoeV70w2ziRvs3QKCbasObLvI7/l6vYT8FE/APmHzCwEBs112aVyMl/L0YglBj+cor6f1HOXw0COcTY74mlpvr/BCCKOFg2psKbOUTXJY8e3ZrE9ORIIJDNLHGNCAI6Dxy1T0BgXoBvtyiWvIVEC6H0Nj9Fqn8He0anDcy0G/Z1bdkk+TYiz6ENC9JJGL/Y/9BGyYjW8jPaSAb08geS8jTmivInMlL/AT4dhgM0o/fsmUM961E8JwfQhB+R6qAcuUkCBUPn9L80UQi9icn0YQg8pGZDgSh3soh8qrthf2H3nayDFPEw8SfHOyLPWtHc+686KFGmm4gwhl+LqpdCMJ1o2PtV+/45+yJHPSz1fhAFH0pHFQG101eMY1NrOsr5LgQROkAz2upTATxOhjXw+Qbvaw4hCDGhmTM0/q1w02ETgfxkqyaYvySv8pUTLl5plM1Weegb29rVUl3se3lJ6LqZe/GLgfRdRMMJzR6bQeCSBNh2UBfz0/8fop5VjgOVnx+2/NYkKcbQQTKSCkEUcgs8lmnBAShYrO8pMJEgPGYgdBPd+zY6yk/YSIyH31AKyT7awWtPxlWTC7Q05y2xfvWplOHsYlDDaL/DeBDDCiTSp0PxHhTVUoQyJo83wdQmw2XQGoXnWXYzpBfD9eF+aN+8pY4mMfmGQ9oCaKAxV13l+HX69vr022MdCwhYitZpZnptGS8W+WumPSnBCqmTQD/A6B+Bn42Ws/rgiRYEoIo45AXe0ldrGhBF/hqIwi1MNXMeP0kYlwGwjEuGbxcoaxWgshsAsZiNamL6V2WRoQnRkKhU/zGN8q2oRL85E5V28jkkwYGYgm/c1Cpm0ImqZhPKvNb5rEH7NOF5AD4/rrwntEggfYUoVFNep3VGkcXi8yv7NZyGiu3soQT9ytbMZfUfvtwKycEUQoUHdoQgihbTmpHqxaP4dzp24UXQJkTxbipcLUShEP46LHFOUCmteZI+/VMuNKC3S8N3nFaPL52e4DPQ6m8lKObynqYeyYEgNMGiSsggGOQ4IAB5M8U1ZHYdPOkDoKJhiBKEpdKexc4hTnsVY7YSX+EIMpCENTU0v6ZnX4MN3icGLLqOWwAsdoJ/7IutOfT6fS2fVIcVukV3zW+03UIlV6hZq45sb3uIHyrmfIsdwpYtJVQja3tlxBjl1e3zbFS+z0U0Fc5CSITPib1RgwglaIz+5Q2FpOK95RCarUyoSXCWjKNx+PxLhVWPc+HZKpPEM3NHcewwSq5095e30uQBVYX0sRvgqog/fgtKwQxtq0MlA87qIpqMu4gGls6Gwim0gfvYxn89M6cAr8mGLfCxLraWvOvAbx51VepzaVRyQShsWJKZ30K9rB8yE8Z5uj8gYF7N7t9KCUjiHyvW7uJKDVG2m8nwtmWxTewismBIC5N9vV82++C4FZOsyiXZNec61MXTtzplDLVBKHLHcGEorHWOLw6foelGFOvNnZLgshLLBSQIPIjb7oTTLkJwiFvsi9zvdwEmQ5mrnrVEveC6Osq38UE8vQRFjxv3ArM0OZ1gtCeMhA8H4RmDEt6kdzc3PE+NjIJdca90dUCXluz+YISOORRU2vHCjAvt25wiOjsgb5ulfdjwjPVBKGJn6UW8luT/T2fLCY3hua9HCMaeC3upfh9tyQIjf7Qt25ZE3nT8wRSboLQ5h0IoGtXEynIkblCTxC6oH25bGfrdV7Wbikts4v2laRMp4tUqeT7EORfQGsugQNnlNO04Ts9qp/FxGEj4ltl59aHQyykZ8zR0PGDg6s2VRpB6FK8AnCU1w+++ox9wTcKfvryW0YIYgwp31nCtGkJpzhYn25xCWreqMvHW00qJl0sJuuFtC6gn3KqdFM1aY77wRdtXRBFTQhpfb6FQI5u+TvwgCdjP4uGzhFROQ2aI6OnFhpu3NEZ0eVydqpPEAorTYrjNDOWJ+M9E9Ie+8FVlXHAtuhTid/+deV2S4IoYremvwCdYoLQLGSBVAsO6Ter5g7C7+KvTVPqomrS+0G4546wfWTUFIl+HkRfs/zd0VRWs+g5BpzM21Hr76CWJ/p7bixmgbDXdczkBzyGNHfoQom49e8SSt2dvMsQ7jsoTjoTZgCe4eN1/TiQZOANSdB38Cq/WxJEJiJnmHrBeHcOIB8xb7RJYDL1AxKEUxISr8Fy+l1r+eASrNDajiIHDtHdtlDZY6/lkB6zwlRMOtWSNn1o1j/kBzaTU23ZHEY6hyyld2eTL/Lw2KfG1ug5xPQ9q1WZWwj2Y+Z1Hhw2zYcBHLlrjOgvMOijiXVdv3Ye/4WHsBF+AMBRuTJep6NC55qq19yc399Ye45BBbXdqfetMc1bLImScuVcx0QVqoQThEMuEiXeFnWXMvsgdMdiMWUk4fqonB8G4zaAjp5QsIBow159Bf19tySIbORPu226wu6+lGFc9MS6rhesQKrLz3BNzTVMuMAe9dEPQWji2DyLNC9wixcVZCAdEr6rPAQ/4NGaKwYH79xmb0+9U6gufCGYLnPKN+BkXldJBKHPKOcca8nhtOQYfsMtFhNg/Huyv2u9/VIye1mu0o2eNzE/CfJyVNjHpbm1fREz7rCZKm8H8VeQwkorKalwHn9/CccaZHwf4MMsbY0w88eS8ZgK4FeWp6k1OhdMP50QPn2sJzXvniDQt0bqzUfs3snjSZUyuaczwQ+tIfoz9UF8RaIv9k23y94KIQintLdZzPk5sPEdA+aDO3bs83Iu0sNYiJsts9igeQxeCqZjNetKQSeRUg/27koQcAmQlgJ4A2A8pT7uncnTjybgvZYBTBPje0xQ9uBj3rFeJ4hIx3kgtusls/4IlCLGutqaGRcE8ZrVLCxnM0Mll7EkTMqUyqSlBBm/Gvt4eS9iNFrSVuaa2g7CJuupiglXJft6VHjrCU+lEIQuJ7WfMN66nNRu4bGdd8xqq4HXwFiXSeVKXKdUyeDMvLCPw3ZmWpaMdyvPbLeHmlqjl4Ezaqm8NkgFCQQ975YPAvAfjLCYBSVrlqq81Q9ybCeDD20d+z2Dz4Ga98pVT4H4ytkH4lteO+9KIQgleCTSebRJ5hpXHIIBvYWIlw70xX4WrFrpS++2BJE5HvpzLLOinslFDDN1KxthZe435lTmQRAOFhqWdot3OFK7kroZW7/L4HNdPkCHGcTPkWl8zAxxi9Whix3MOiuBIBx04b4C3DmkHnVVa7ip4nx8li+RwUsH1sUe91E2E+jvxZfpQk3CIK/qaRC+PfLm3lf6jUvm1aDX7y5qIq+q9t9fMpmXDcZjKiuiY3KlXKVKIggl09yWjsNDMO/MUxMFRUGp6UzjY7pEXYGbKkGF3ZkgFHxBQlO8ysyfTcZjPa2tCw+c4HXsbS2i1Ufnxo9d0nsGGeMC0mK+CuYvw8QtSnWRd9ntEDCuEghCe+EcQGerzTCnzBTDqZMHe+/ZqMM9QyxhLAfTF3ymAQ0UIdTeZ4B81Krqi8z8H2p++llgg8wrr7IZQtuEfzNNup6Qif1lP/m4NfE6Af+VHq1ZoVOFOlWsNIJQcmZT9y4G8GUAb/fCzfa772jUAdstqvjuThAZ8MaD24EvADLRTseinGaOx/x7IrqpNsT35byQ8xySvAki09zYZRRdDUDpHK2RVEvqkZq5M6mtWcLgDhAdAca+2Vmi1Fp/I6Y+5Tg2PDzzF9adpsZqR7sjn2qC0KmWVHRfg9Pz4/HVv/X5RegsjNSg3zwyNHO52w4847GcMk4GOKrubG1qExUl9M8E6k6NjN5ZqOmn5R2ooTV6pMHGuQCrzGfvtOjtlX/AOgbuGB3a+6HJOjW44ZuZQ+HUqSD6CEAfAPhtE8h0TCX3VwL9khixHTtmbihE7kokiBwumeyDm4zDic1TiekEBo7Iqp/CFuzU2G0sFgefc73gYlNCEAVLKxUFAUFAEBAEJg0BIYhJg1o6EgQEAUGguhAQgqiu8RJpBQFBQBCYNASEICYNaulIEBAEBIHqQkAIorrGS6QVBAQBQWDSEBCCmDSopSNBQBAQBKoLASGI6hovkVYQEAQEgUlDQAhi0qCWjgQBQUAQqC4EhCCqa7xEWkFAEBAEJg0BIYhJg1o6EgQEAUGguhAQgqiu8RJpBQFBQBCYNASEICYNaulIEBAEBIHqQkAIorrGS6QVBAQBQWDSEBCCmDSopSNBQBAQBKoLASGI6hovkVYQEAQEgUlDQAhi0qCWjgQBQUAQqC4EhCCqa7xEWkFAEBAEJg0BIYhJg1o6EgQEAUGguhAQgqiu8RJpBQFBQBCYNASEICYNaulIEBAEBIHqQkAIorrGS6QVBAQBQWDSEBCCmDSopSM/CDS2tn+OGN8YL0u4LdHXs8xPXSlTGgTa2trCw6kDbgP4LGuLBFw70N/zpdL04t7KMfM6Dw6n+RQiXsTAEQAOAhDO1hoCeCMzDRLR2pF685ENj8W2BpWrsaX9NALWBq1nKT8E4BWAXwbjMZOw9h2z8GQsFkv7bTNvvgN/DlOqta/vnpf9tlHOckIQ5URX2g6MQDUQRENr9KiQSZekU6GrBwdXbQr8khVeobm5431s8M8B7G8T9RlzNHR8ud55zpxza2rqtywkwpcAek8AmFIM3JE2jGueWNf1gt96JSAIXVcvArh89izu8UMUQhB+R0vKCQIAKpkgWlsXzkpzzRcY/GkAf6uknV4pJ09TS/uFAFZq2kwT0dkDfd1dpewPADXPi36YTdwYkBjsYmxnoqsOPtD8nq/FufgThBsMPSP1fK7XyUYIosQzSZqb3ghUKkG0tHR8IA1+GMAB2RGoKFVAqWbFnOOjM2t34AG1ZmfbHAVgAAiN/Z8fRBqLEomYUq8U/ahTQ92M1z/PwDUWFZK9XaWyeQWg4cwPxDPB2Neh8zQIP0SKL/WSsUwnCItYdPPI0MzlGzbcrDDUPkIQRU8haWB3QqBSCaKhJTrHAD0KYJ/pTBANkWiLQfQQgD0zazFRN7N5JEDvz773NjL5pIGBWKLYeanIoXbG1pUAn6tp60UwbuQaY1Wyt+tvipmsZdraom/ZMWrMJ+JrAbwvv76PxVlzgmDg9GR/z0/9vFtG/tot+1PYaGU2r9GcftJEWDbQ1/MTIQg/iEoZQaBKEdhNCIKaWjtWgHl5bpiIsNRkHEvAx8f/VprLampqjV4Gpq/tOp1kehhSl+Gc5hVeJwBVOBqNhl58mS5k4OsAZlimV5oZy5Pxnu87Ls5FEoS13aam6AwyaAUTPmX9OwNPGebo/IGBezfr5JATRJUuCCK2IGBFYHcgiIaGxQcYNenHLDvyLSb4BGK8nYjusSzkRV9WN0aipxORusuwLuqbGViS7O9RMgR6mlvbFzHjDlt7f0eaj0skYn/SLs4lJAjVvlLP1e2gGAMnTCQqXpiMx7TWUkIQgYZZCgsClYnA7kAQza0dncys1CG5+4bBEGpOTqdH90CYesF4d3Z0irqsbm4+c3/TqHmEgA9YRnsbEZ810Bf7WYEzgBpb2r9MwNUT6hO+k+jrudSuolJldHcQQVRMOjmbWqInAbQGQG3udwZWJPt7LpYTRIEjK9UEgUpHYLoTxJjvw/53Ka2NZmFTi+8tVjVTMZfVWisp5isS8Zjyf5lw1xBkXmR8J0xTGRIcaan3fNrg49aviz1vb6scBDGnbfF+tanUzy13NuqNHq+r4QW9vbF/5slg9/sRPwh/Q650iy+8HDqawIsBng/gnZbj4yYCngbh9toQ36cD3quX8fbJXAbOHAkPGbeiILwG5t8TqDs9GuoOYvdtPzLmdiQNDUv2oprRK7MfmbIvHwLzc0z4iTmSum39+ntfc5LZVVagaCzGj8fDdBoYH2NAXUjmrHUyFiQEPMvM3enR1H1usnrh7vW730tqdUk5PEprQDg22+YEq6K5c8/c16gNn0Mg5ex1OIC3ZsttAvhXDLppdGjvh1wtTIKZQWbUMYP9sQ1u76jkCtWEzwDhEyA6wmKNk1Kms8TUl4Z5S2rHPoNusnnhGPR3je/DCAMfyal7NDvjgi6rNVZSStSiVVa5921uaf9S1iIq9yfH0045CEIzLxXnZU5i/f13bRGCCDoz88tnbaLpu3rrhLwKrwO4evYs/r4f2+fMYvsSLSLCVwG8y4e4KSJaPUp0uR8nHB1BkGFshMl3A3yYvj++vy68Z7S398c7bL8HtQ/fDuKvIIWVfi74VF+ZCZ2mr4Bxnk1/6wRNihg3pVM1Vw8O3rnNB36BihRLEG++ud8/autfvxiEL1pIwUmGVwH6RKK/W6k18nauAc0gXQlC7W5DpnktAUtdzDmtcgZyuAoEsqawRhc+YdHW3E8owBxVJ07yNLa0H0+AwntcBUOMrw7Ee64q9h1UfUV0psGrCXiKCGvJNB6Px7uUV7Kv8S1WxaQnCGfvaLmDCDDqyhIAYfoWGMoRKasH9dsA3VEXNj/jdprImMalaAUBHwvePl4ig5cOrIs97iaRfcAJuJiRWXyPcqin3eFkrCJCxnUMVrrLgFjwoBlOf3Sw956NbrI2NUX/BSHqBnC8X5Rz5Rj4Aww6I7mu+3dB6wbBDw6hNnQnCKTTpyIUUmaP7QFkSjH488n+2Lfti0ipCKJ5XvRYNkldoKpwEUGeNAO314f5okJOyX470u3qdYt/Y0v7dwm4yNJu4J1/c6T9eiZcaWnjDZP5pMF4rN+vvKUqN2mO12EnAAAcJElEQVQnCFExFT9kHjbR2wlIMuh5ENcBiIBxqH3hJNBNteFNF/X29qrj+oTHwcIgV8ZX+wC2M9OyZLxbWXRoH80O+DWLGkE5F8UB+kv2NBEB8Gd7+AJXLAivEUOpSJRONcTgowl4r2ZX+iyZqVMGBu5RNuR5j0sfGdUXyPgVgLQb3gD3jtTjDC9v0SCzo4gTxIsAPw3QyZb+hkB4Eky/8cBKu/tvmtf5IZh8/lh75n4AnWrZ+W5jxloiejPb3xCH6TvJ3q6/Wt/XwbpGFVEOXWoM42AaJvChDDTqTj1MeGC0js8qJc5WGTW7eu2ibfeRUO8QxLP6/fOX7vmWN4fvtVr5qI0Gj4bmBVHjBplPbmXLQRBNTdG32y70lQixuvDms3TrkpwgfI5mY0v0UgL9p23Rf5WZLhndMbPHpo+lSKSjwSTzVptzypDJvGAwHlMOTeOPuoAbSR2wgsEX2MR5iZk+e/BB5hq7eiqjEkib3ybCGTaZNsMwTkqs6/q17tU0A54txoMcDnVaFxC1Cx5K4x2DfbFnLW052IfzcybjoncchMftso7p22uUBYc6qVhPGw/Xhfmj2suxls42gvmARa2kdqtfqw/zN3Tl586LHho26Qd2E74gC4SfqVAEQVibV4R/fW2Yb7S/ixrXGtO8xfYeyiNsZaKvW+2OtZekhVxSjxGMqZzOrDGNlPNUVwipy+0B2ZT6c+PLpE4/Sr36NusLuW1+/ODqUibP92Hnnd/ASD1OsROS/v7Av2e1ClWS4nDfRNWuo3q1yNfyrl4Ogsi3BAOY8PlkX49a2/IeIQjvcYJafEImqeBg6lSQe55Fmhc42TCrQs3NCw9hI6wWOYv6Jn/CNkSiJxiUMT2z2Fz7UsNQU0v7Z3Z6ld4woS7zGphYrNPz6wgio44JGyfad5c6aDQLkSr2GNLckUjE/sftQ29sjZ5DTN+zLvpOnpx56gKPBVL1m10g7gOobZccpf3AS0AQisDPTKzrGnDCSnuaJPwJKW5LJGJK95/3BCWIjLrUwCoQLbA0pk6Qlyf6e9QYOVrrNLQtnG2kQncD1GCtq9v8+Pi8XIvodrxuC5rmEtj3ZbVubhdyj1HsO+fql5ogHM13XTzPhSB8jKbG7G0Lwzg52d816FW9MdKxkIhXWY/+1lAADqGLXR1obH3q7Ku1JxVVT0cQAcIka3ZzcFUVecuavxt0OOr7CjFgt2Zh4IUw0m39/av/4jVWfn4vkiA8vWdzMmiscl4nk+YPDHQ/UQqCaG6ONrGRCVmxV649Bq5L9veoy3NPU86mpui7Ecpsmt4xLo/LxsQPtroymh3vZjLpuIGB7md84uB5+sq1oyUIl911oe/kt14pCWLMxDZ9+8TNkxpp582kw3pRUTG+pjzct+7Wnxg/qq3ZfIFOZ2cffO0FG+HSZF+PunRE47yOI8nMhC6eZam7PNHfc6PfieRgwXFrsr/nk3kXm/l2zb4v4TQnKc9YLvZ30LQxwVxRlddhzkxnJ+Pdd3phkt3dPgqQugt6kkHP1pD53b6+2Ktedf38XhRBeJwCrP23tCw6LIVQLwEHWxZwR5IMeoIoxYWubuPkx5TWD85j82BZ/XDqjVj2biVbzV1llDkZhbDadtfj67JauyBbvlW/cpeqXJEEQXPaFu9bk0p/kAhngzOGEVavcCWm50ZXThAeo6lZwEcAXpDoj6ndl6+nsaX9K8q/QNnqA/i1StyR7OuJZwgi0rGEKOMdmnkK3fHmW1/w0yPh8HEbelf9wyqkZsCfN8OpeV4WRWOyZsIP7AppEGDBy8mgc3jSmBFSY6T99szE3oVLWaySfA2gpVBRBOFyGWiXo6XlrH3SGH3QqsZxM3EMQhC6tgsx5dSRGJjOT8S7fxAUV115bd4HH+03RTrOA7E1xpGvy+oiF+RSvPKENgJaqQXtXxl4XJHoi33T7cQoBOFFEJGOk4lYRU/MXa76XlD9jFhzpP2HEwNoFaYz11h6aC1f8gbcxcTNLr+dhAh49J971J359CN3vOHnXXNlGlvbryTG9bvq5L+z7jJNOe8xsGanRc1tdeE9ezV+GUHEKKhsMQTBhKuSfT3Kv8Xz0Z6iXCJ5BiGI5uboB2HQowzslxUk7xTnKaDTDr+EGfbs9wl+N09a4vIRBlznA+F23+EHo2LKlJEgUiC+cvaB+JaXb5YQhBdB5KtkfmnwjtPi8bXbixl8VVd3hC70UsyvSkIz4I4mbtb30+38Af4jYPwiOA78QQBzLQSR58mZtXy6n8ZMK3WPytL1W2J0c4juO/gA8w9ekz24nPk1iiSIcdWilyzlJAjNQvgyG3RcIT4jeSfXABsONwz0Fkl0V1140zleql2Hez3Py+pquIPwmjcev6uoA0mT+FybZaJjNSEIb4KYsNstdNes60a7CBR4KaYz0dOpJPwucHZ5HTwwi5yv49W1F1+RSOfRJpnKusuPA5ci7PvJpB8PD8/8RbnCQPjFL+gC7wfvUqmYNDvTgi8eizmRuk0eDYn5UhPl2tSeQD0s4XSbLHXfOBDvUY6xk/4UfYIgvMaswvJgvWHg4fRI6NGg/hxCEB7D3tTa/mMwzhkvVsIjdNkJQkM2fhc4PwtWCb8YxwXKxd/DrXtlrvl/w5S6vtTJ1f3it9sSRGmCuems5Uox3RwD46nGdXczQGEq31IIWwl3IkIQ05kgNJY/fhe4SiGInBzZWEEXZ2MF2ZPVO42ir/AjQT5mv/jtvgThHPjNL84Ofkd+q7uVcz2FOESMnVae1EFBFILwQKxUF7PTUcU0RRd41NjWeQiN8okw+Bww1H2G3XzPCrerZ3nRH4z/WEzKQs2XL4eSKSjBBLmkrnQVk4OBQtChcijvYSabbwHl2wzcr4BNkei1IJwIwmrDNB/YsWPfP+hUonKC8EZ0yv0gNAxakZfUGnPcNDOdlox3P2iF2e8O2D40Oue1qdTP5uRTMZvq6187PE2hpcToAGXCrk98fHhhe0/FsRJ+8Qu6wPs5sZXwDsIesbTgS2q7P0Wxd3QOfgxKZfgKQJ4OfPnjyCokSC6UuvrZ9bJad1FdiAmw03xyCDyoPaUIQXh/lVNPEGOe0KstogY2c1WTjkD3ZnIsAL8ylANXNqdtqT6wRps5LgH/gMknDAzEniwFQag28kxyS2Sx4j0NfJegptb2+WDcbo0vVMqAa9OCIPKdM7WbCS/Uy7Fp0Po+FEHw2uQ/Lu05ENTvUoZxop9w+l6Y6TK6AXrrLCEILzSBKSeIUtiM253hrAk6ptxRLsClu8YByTXsgcPw5i4gVWawjQA9yWw+Wl/z6uqc+WLmNMTmqWBqUiGWGBioD29e6mXemOuvsTW6jJhutfTvK1mO93ScHieISnaU08RSKkrF4xCpwPWyWhvQLkAYEqd55BD/ytHxVgjC+4uccoLQ2mMH2NHobLKtvg66CRxUt69LZciAv1AbAQhCt7sLEr9HDbcuho/d96NYj22NmkAIwvataUJtBN4la9SvReHsYEWkjdzqvXSMlXDwiYBb6BYHH5xtBhunxuNdKtprIY8+CrJbLgZNxsAg91iFCGmvI5fUPlDU7Go8Y5jkmtUsVhN2DGUK1ue8K7E7/gUgCIcd0GYQn5boi633gtIhrPkQwzgl2d/Vm6uvsUcPFPMpj2CAwGpBp3eZDiqmjLqwHMH6fHgru80Rnfol6GZJ175ereN+WZ2dQ102AwjPSLxO7+eQd8MxqKZqR04QXitKBaiYlIhO4b7dEt5kd8v5GdE0OwZduG8GnjLM1AKnhDpZ6PThvgHnPAtFEITq00FWP3GSqDESbSfKqH52WR1pokk6kKYvItKH/HZOiOI9BSeWqBaC8ApL4UD2jtnrrCg4hftm5s5kPLY2KKbjc7m1YwWYl1vqF6LCzOtefzJxv6x2SVgVKG3uWBZKXA6mL9iTZnnl0BCC8J5JU65iyonYGGk/nwgrJya8ob+YbJ6vSZJDTfM6PwjTVJel77O8ZtCEQSoC6cWzZ3GPPYyE8pxOI3wDMzrzEga57Oj9LnBOQ+OS3GgLmC+HibvseSgaGpbsFQqPXscElRApbF0AnE4fkUhnq0nm/dZw1ACcEjRlmmxojR5lMG4D6GgvzL2nnr6EX/wm24pJZ8UG5qtnH4QbnEKQOCUMYsZ96ZBxif1SdixfurGAiP8737udbh4Zmrm8UA92/SasdE5qGi2AZxhwjyyPL4JxI9cYq5K9XSorotXCiiKRzlmmkV4CJpWS9+322aSsvYbrOeqWhU8IwvsrrRiC8EqzCcY6gF5l8F4EtACYbXs91+iJJUo5OkSEpQN9PVarqwli+F3g3IbGQ1ZrGk2VCvMDAM3RpBwdYuL/SPbFbnLoS1kkqfzf/675fTvAvwGMbE6AjCljM4AD8ssWt3DZ2/OL32QTxJy2xfvVplI/B+j9E2QmFWqBtjI4FWLjU3YdeilSjvpZ7Lw+9XxDjmDpQr3a11pHAa6X1RYtgAozf5JLHxYzXFZr1oEevjkPIc1LPBJsiYrJa1BRISqmnJwZktjj9a+CcYk937THu6QJuG54aO/r3XZYY+oRUgumit1uTc3pA6rM7vocu99DoQucV4dZWW/OyupV3P67r8xlmUU2ZXwP4KVBO1D5iBm4vT7MF+lSlBbQXqZKpRKE+lZcCDUju9PFbNZE+jZ7GlEfGKngbz0j9XxBMfmoHUxLPRdvH/KNF9EHm8z87Jl7xU1NFEQGACqK6leQwg26bI9536pcUnvCWzEnCIuk1Dwv+mE2caMt37TDy/BzZODCgXUxFfXU09HHkvdXpRHNO5pqOkkR6IfDYeOL9twPOoH8LnCeIwOgAFkVAEkm/qTfaJLZPs7feQK5DsDefuQCoNJyXq5Tzfms71jML36TfYJQAh/TFj0wnMKqvKxh2bdxc/jK5MJmvoGZF2lOexo8+DkiXDbQF1MpdT3ntRvu2jS2ASwF/Y6p3kPbf87qMTWYcRXAZ3mcEOwipZixJgS+Kh6P/d6vvKJi8kaqEgkiI3VGH/sKNRomfYqJWwEcYvmwNgFYx8Ado0N7P1SIXladVurqtn4YhMW69gl4mpm706Op+9avv/c1byjHSvhd4Py2p8opWcP1WxpCMD7JZEYAUuq13EW0Ugf9nUH3m6D/u76/+4+FLCgT8cjrYwjgjcRGPA3zltSOfQYLwdzPO/vFbyoIQsmvdrsUMj7ObH4KRO+xLWSeod3HYl6lowQsAtERYOybxUWd+v4K0CMMWnXwrPT/K1V4dc39QOCkXH7GTpd50cuzWtduNkx/204fnjMAbgFI5dWwqjcz85GZBolo7Ui9+UghJywhCO9RrViC8BZdSggCgoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK20LAoKAIFDFCAhBVPHgieiCgCAgCJQTASGIcqIrbQsCgoAgUMUICEFU8eCJ6IKAICAIlBMBIYhyoittCwKCgCBQxQgIQVTx4InogoAgIAiUEwEhiHKiK21PGgLHzOs8OJzmU4h4EQNHADgIQDgrwBDAG5lpkIjWjtSbj2x4LLa1FMKpfkMmn0fghQDeCWBGtt1NBDwNwu21Ib6vtzf2z1L0J20IApOJgBDEZKItfZUUgTlzzq2pqd+ykAhfAug9ARpPMXBH2jCueWJd1wsB6o0XPaYtemBolFYS4QwAIY82toP4K0hhZSIRGyqkP6kjCEwFAkIQU4G69FksAtQ8L/phNnFjQGKw97udia46+EDze7FYLO1XqKZ5nR+CyXcDfJjfOmPleNAMpz862HvPxmD1pLQgMDUICEFMDe7Sa4EIqFND3YzXP8/ANRYVkr01tdi/AtBw5gfimWDs69BlGoQfIsWX+tndz23pODwE836A/tXW3hAIT4LpN+pEweCjCXivRsZnyUydMjBwz98KhECqCQKThoAQxKRBLR0Vi4Aih9oZW1cCfK6mrRfBuJFrjFXJ3i61+LK1TFtb9C07Ro35RHwtgPfl16ebR4ZmLt+w4eZRJzlVG8MpuhvAiZYyaQZ+wKM1VwwO3rnNWre1deGsNMI3MKPTqoYi0E214U0X9fb2porFROoLAuVEQAiinOhK26VEgJpao5eB6Ws2nf8QAddymlf4OQFEo9HQiy/ThQx83XKhrORMM2N5Mt7zfSehm1s7Opn5J5b+0yC+ItEX+6adkHJtOJDaZjLpuIGB7mdKCZC0JQiUGgEhiFIjKu2VBYHGSPR0IuqyLeqbGViS7O95LGinza3ti5hxh629vyPNxyUSsT/Z22tqis5ACKsBOnn8N+Y1MLHYi5gaGhYfYNSklYzjJxcmfD7Z1/OfQeWW8oLAZCIgBDGZaEtfBSHQ3Hzm/qZR8wgBH7A0sI2Izxroi/2soEYBamxp/zIBV0+oT/hOoq/nUvuJoKElOscAPQpgn2z5EYAXJPpjD/npv7Gl/bsEXGQpG6sLbz5L1Ex+0JMyU4WAEMRUIS/9+kagqaX9QgArJ1RgviIRj33DSbXjp/GM74RpPgzgSEv559MGH7d+Xex5axuNkY4lRBn1Uvbhp0fC4eM29K76h5++GlvbP0cMJe/Yw3i8roYXiH+EH/SkzFQhIAQxVchLv74QmHN8dGbtDjwAULOlwjPmaOj4wcFVm3w14lKouaX9S1mLqFypNBGdPdDXrdRZ409z88JDmELHg4wmgJuJ8Kva0OZlfk8AzZH265lwZa5BAh795x51Zz79yB1vFPsOUl8QKBcCQhDlQlbaLQkCjS3txxOg1Ei144sr46sD8Z6rStFBc3PH+0yDVxPwFBHWkmk8Ho93vVzMycQul+7+goEVyf6ei0vxDtKGIFAuBIQgyoWstFsSBOw7bwBvmMwnDcZj/SXpYBIaaYxEO4jodgvJDZnMCwbjMXWnIY8gULEICEFU7NCIYO+fv3TPt7w5fC8DJ+TQYOAPPBqaVwr1UrkRVia1G1+mcwB8B8Bbx09ARN3Db85c6uZzUW7ZpH1BwA8CQhB+UJIyU4KAcjRLcbgPwLt2CcD314X3jPb2/njHlAjl0akihb9uCh2kAgeC+PKJsme8954yzNQC8aSuxNETmewICEHInKhYBDSmpWqBrUjdfUMk2mIQKZPXPR0AVY5496VrePkTvbFXKhZ0EUwQsCAgBCHToWIR0BJEhTqYNba0n0bAWgcwRwn4toHw1/v779pSsYCLYIKADQEhCJkSFYuAbtFlwqXJvp5vV5rQeX4OegGHGPghj9Z8wR63qdLeR+QRBBQCQhAyDyoWAS1BAKcn+3t+WmlCN7d0/BfDPAVEg2AVRZbfBsI8XRRZuYeotNETeZwQEIKQuVGxCOh8IKophpG6sP77SzjWIKyw561QjnLD9RwtVWa7ih1EEayqERCCqOrhm97CV9MdhNtINDVF/wUh6gZwvLVcNZHd9J5p8nZygpA5UHUItLQsOiyFUC8BB+eEJ8aPBuI9n662l2ls63wnUubDBBy+S/Zg8Zyq7Z1F3upHQE4Q1T+G0/YNWlrO2ieN0QcBarAsqhXtB+F6ksgPOlh1XuHTdrLJi2kREIKQiVGxCLS1tYWHU/vfBSCaE7KaPKntwGpVZkxnJ+Pdd1bsIIhguzUCQhC79fBX/ss3RTrOA7E1y1vJd91Nkei1IJwIwmrDNB/YsWPfP5QjDIbOM1zuISp/Du7OEgpB7M6jXwXvrtt1UwmjuerCiZfrlCIEUQUTTkScgIAQhEyIikZAm+oT+F3KME58Yl3XC8UK39QSPQmgNdZw4gDdVRfedE4u18OYDPQJEI4B89EgOpBNOjcZ774nSP8NbQtnG6nwOgCHjtdjOj8R7/5BkHakrCAwWQgIQUwW0tJPwQg0t3Z0MmeyuYUsdxHXJft7vlhM3obMwm9gFYgWWITLSyUaiZz+VpPqlXPev42XI1qZ6OtWKUTZ74tp/DpGGPhIITm1/fYp5QSBYhAQgigGPak7KQjMnXvmvkZtzf0ENFo63GawcWo83qWivRbyUFNr9DIwfc1KPE6pQDU5pbWpSZ0EURfuI6P738SET+0iGfwJKW5LJGIvFvICUkcQKDcCQhDlRljaLwkCjZHo6USk0oDOsDS4GYZxZmJd10DQTppb2xcx4w5be46JfHSqKAZ8n2K0/RVwCgn6nlJeECgGASGIYtCTupOGwJw559bUzti6EuBzbZ1uB/FXkMLKRCI25CVQRq0UxuVg+gKAsLU8gW6qDW+6SJdnuq0t+pbhUVoDwrGWOkNEWDrQ17Pard+meZ3NMM17AexvKfeSwen58fjq33rJLL8LAlOFgBDEVCEv/QZGQFkc1e2gmDXDnKWRF8G4kWuMVcnerr/Z7gYoEumcZRrpJWBSeaDfbu/cT2ykSKSz1STzfgB7WUkCzF+uq8HK3t7YP63tKlIZSdGFDKj82eMZ5QCkibBsoK9H3avIIwhULAJCEBU7NCKYDoFsXCPlWHaSC0LqJPEKQAywmuMH2lRJ9qoPIc1LEonY/3igTo0t0UsI9A376QPAEAhPguk3mTaI/xcYH9T0mwbz1bMPwg2xWCwtoywIVDICQhCVPDoimxYBNzVRQMhSWfXUDX7UU9m2qaml/TM7M8fd4EE6OlGGCPg/b5/FNwo5BBwpKT4lCAhBTAns0mkpEJg7L3poyDSuAvisgIt1ihlrQuCr4vHY7wuRpaE1ehQx3WKzrHJqSp0UkibxuYN9sWcL6U/qCAJTgYAQxFSgLn2WFIG2tmX1w6k32gA6A+AWgPYDcIClkyGANzLTIBGtHak3HylRHgaa29Lxrwb4EwScDuCdFqLaDuAPxHg4RXTb+v7uPwbxmSgpQNKYIFAgAkIQBQIn1QQBQUAQmO4ICEFM9xGW9xMEBAFBoEAEhCAKBE6qCQKCgCAw3REQgpjuIyzvJwgIAoJAgQgIQRQInFQTBAQBQWC6IyAEMd1HWN5PEBAEBIECERCCKBA4qSYICAKCwHRHQAhiuo+wvJ8gIAgIAgUiIARRIHBSTRAQBASB6Y6AEMR0H2F5P0FAEBAECkRACKJA4KSaICAICALTHQEhiOk+wvJ+goAgIAgUiIAQRIHASTVBQBAQBKY7AkIQ032E5f0EAUFAECgQASGIAoGTaoKAICAITHcEhCCm+wjL+wkCgoAgUCACQhAFAifVBAFBQBCY7ggIQUz3EZb3EwQEAUGgQASEIAoETqoJAoKAIDDdERCCmO4jLO8nCAgCgkCBCAhBFAicVBMEBAFBYLojIAQx3UdY3k8QEAQEgQIREIIoEDipJggIAoLAdEdACGK6j7C8nyAgCAgCBSLw/wHDpzQgBC3utgAAAABJRU5ErkJggg=="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-59"><g><path d="M 191.61 1051 L 230.39 1051 C 247.29 1051 261 1064.43 261 1081 C 261 1097.57 247.29 1111 230.39 1111 L 191.61 1111 C 174.71 1111 161 1097.57 161 1081 C 161 1064.43 174.71 1051 191.61 1051 Z" fill="#0cf232" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 1081px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">s2idle loop waiting for IRQ <br />to wake</div></div></div></foreignObject><image x="162" y="1060" width="98" height="46" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAAC4CAYAAAD5Tns/AAAAAXNSR0IArs4c6QAAIABJREFUeF7tfQl4ZFWV/++8qnS6QQVGZWdAxxWXUVvI3gZZ7JalpZtKuoGGRkZkaUUckFHAFXWUUZQPbQWRTSGpkk1RBBQzSVWq0tijIoOMo6IDgg36b6CFdJKqd/7cV1XJy6v7tqqXpKpy3vfxfXTqLuf+7nv3d+85555DkEcQEAQEAUFAENAgQIKKICAICAKCgCCgQ0AIQt4LQUAQEAQEAS0CQhDyYggCgoAgIAgIQcg7IAgIAoKAIBAcATlBBMdKSgoCgoAgsKgQEIJYVNMtgxUEBAFBIDgCQhDBsZKSgoAgIAgsKgSEIBbVdMtgBQFBQBAIjoAQRHCs6rZkb+/GpZOF8Xcx80kA2gDsCyBuCUz4Gxh/BPFgnmID9w8PPBpkIL29iRdNTNEdILyzXJ6B43Lp5A+C1A9Tpr2n7yPE+MJ0HcL12ZHkRl0bHT1914FxapCyYWSY77I9PWv2yXN8BMA/zTW+8z026a95EBCCaOC5XL78jJaWZds/QKBLAOweYCgFZtxeiBnn+RGFEEQANGsoIgRRA3hSdd4QEIKYN6ij7aijI/EPiNF3AaysouXtRLxhdCT1Q7e6QhBVoBqiihBECLCk6IIhIASxYNBX3/HyIxK7te6kFANHVrRSVCkNA/QUiFsBdIHxCgAxR9knQXxsdiS1RSeJEET18xOkphBEEJSkzEIjIASx0DMQvn9q7+77NAEXO6o+yIQzcyPJUQBs/+2QFesOiBXMy4nwHgdRPGjG86vGhm59zCmGEET4iQlTQwgiDFpSdqEQEIJYKOSr7Lera+0bTIrdUzJEW60QcO/EUk5s/UnqGbdmE4lE7LHHcQGILrWTBAGfGk0nP1mlOJFUEyN1Eca5cgKIZJKkkUWJgBBEg017Z3ffJxn4hE3s/0OBD89mU7/zG4oyarfu8syNzNxvK/ugORU7Ymzs5m1+9efqdyEIIYi5erek3doQEIKoDb95rd3dfeIeBUzdBZByZS2fHkKdADq6EysBugPAklITz5nMK8cyqfS8DsbWmRCEEMRCvXvSrzcCQhAN9Ia0r+h/PZn8UwD7VLu4a3XfTGtzmcFbFwoKIQghiIV696RfIYgFeweUSie+dHtbDMbpTNwD4EDnBTZi3J0nun5LevB/ncZlp+DtXf2riFhdVCt7JD1ixvMrdEZmt0Frjc+EC3MjyS/a69RqpFaG8TgX1sHEehC9DsAyAHkG/hvgG3hqybfGxr77rOpzPgnCGlfeWAWYpwH0NgB7lcadB/AnYhoxDf7WAXtzLpVKFebq5YnCSD2NMZNSGb4GwItL8o4D+CNA98Awr9l/LzxUy1gs1WTrM4exwery4goAe9vewW0EPADCDROt/AMvO5jP+/X7OOV7RkZufUKV6+pKvNY06EMwsRKEA0r9jYP5YSZ8x5zMX79ly21/m6v5kXanNRQCxRwgQO1diT4i+hKA/YK0z0COiU8fG0k95Fa+Y8W6t8LkcwB+C4CXM7CzhfK95Y8qSD9tbev3opbCMBUXlNLWlc7MZga/GQVBtLWd9JJYfOpSJpw1TYZ6wZ4C6L3Z9OAP23v6Lpjrm9QdHYlliGMT2LpUWF5IPSDjh8nAOaPDqZ/5EXcQ3J1laiGItp7EwQbTlwEcoXFf1onzIBl8btixWJgZOB1EH1fvW4BxjjNwNU+1XFIm/xAbFYsg8vnY0zCMT4H4PJ/3R202vr40zhcNDaX+HkA2KVIFAqJiqgI0rypu3kIBuxln4rNzI6nr52JRUjK0dSeWG6B7AexRkqnATMfmMoN31UoQJRVYEsAbA4+X+TSD6CAG/n26TsShNtp71x1E+cKA3XYTUL4CCJdPPr/7x7ZuvWoqYJ1AxaokCOro7t8A8JXBSG6WKHnlsTYxvvsXgozlkN7E3i15ukF718Z/hA+yQX254cHfhCGIGApHmRz7KBP+xb+L6d3NmBkvnBDmFB28bSkpBBHxO9DZ07eWGTeWVCrl1gtgPMqEHIEstQqI3wSGOgko1Yv92c4wVuXSA2MRi2Y15/SCYuDROAq96fQtf6iFIDo71xzIRvxHAA52yF0A+GHAyKm/M/jtBLzBtjvcDsYoCEfPBUF4ymWbEwa/hIBuAPtX4k5XTY7vtinIwhp0zqogCOroSZwPps9rTg2W6o5APwdQ8Hi3CgS6amJ8t3O9xuKBWTm2l3UR0xszPG6wsTqTGVAyVTwaFeYjDNxFwNm2wnmAtwLGr6xLnybeAcJBzsaCuHkHnRcpNxsBIYgI34hDDz3+pbElLeqOgtJtlx4eKhh475bh1CPOrtRHsjNP/1q69FYMrmc9dFNrfNupQ0NDSi8e2WPprE3zbgCv9+srjA3CxX3Wup+RN/j9zrErnIwlLeqy3/u1KpKIThAuN86teFRmjC/QzAm19ax/k8H5awB6uw34AoM/kkunlFonkicsQbR3JY4jogHHhiIP0H+YU/HPO1U6Hio1z7EUbTT0PQDvcgzU9SKmUnkR0zUEtDvrhLmIOQtvfcww6lix7i0wzRucp1QCbV4S3/bBqL+ZSCa7gRsRgohw8tq7+44gQMU3KruQBrljQB1diQtB1s6w/DzBBh3udUQPK7ZaxJcse+ZKgM+w1Z0EeHU2nfqxjryCRnNt60ocaZDlOjt9GiLGt9jkD2azKWUw1T3uO+KICKLC+K1218wX778vLvMy2pYM2V8HeINN8MD3TYLMTRiC6Ow8fk/TaLmHgH+2te0bT0uVLdmtvveCY8ArbXUfN7hwVCZzy387ZW3v6juTCEqFZQ/Nkpxcymd4GaCt92uXpz8HhrIdzNQlfCU7kvywU2Wq24CUZCkQcOnE+O6fdTvllOKQDZZsMOUhPEsmrxwdTWWD4C9lgiEgBBEMp0Cl2nv6ziPGzC7TY6GzN6gMx0ZL4Sf2XREznZzLDKpgfJE8OtUXEQ1OPL/bBt2HGPQE0dvbG5/I73U9wCeWBVUGd3Ny6hg/L5NiNNqnv0nAabMGGQFB6DAFgquKSqecO+274ihvnYciiJ7+dcz8HdvCWyDCxtGRpPqb79PVta7HJPNOAC+ZLqxZuNWJa8lO/OgF76fOsHOpyrucJLVk5EYQDFw7Nb77+/3UecquhLx59yxnC6IrsyODH5wr+50v0E1YQAgiwknV7FhTrfEnTwxw7KWO7r7rABwO8J8Y9EvAuCEqO0RnT+JoZlJ2kbJhWo3ac0cclCC6u9e+Mo/YEMFyRVRPgYhOHh0ZVOoQ36ezs/+NbFh3O/a0LV4154PorFxUQ58AKtvgBybj8cO3Dt38V9+B+RQIShAq18dE/rkUQMfMNMl3oYC1HqczZ+/U0dN/BZg32TD+HfLcm82m/lz+m+YE7HrCdBuebj45oBs1gCfJpMNHRwcfDIKvxj26YkxB2pEy7ggIQUT4drR39a8h4ltsTc65V5Kf+B0r1nXCNG+btQAD40TYMDqStMs6q6mgBFHSjatLdpZawc3o7SZn8QSy500AEhEShApoqHTi0ycTtTPNpZOnh9ldasgvslvnQQlCIwOqOV22dSW6DSKlSty1hPMkA0fn0kl1crWezq6+zzLhYzYiGouhZVU6fdN2v/es/Lt2PsF3tsZ3TQwNXbdzplxlQqqwtjfNxdGKMQWVW8rpERCCiPDNOHRF4hUxk9RuWIXXLj/Ki+cXYHzDzMfvnM+YR6UdoVJTzezOrctqfGEunbrca7EMShCaRaViMfCDOIxqLkhGOV1IEiZ8ODeSVGMO/HR1Hfdik5aqi4nvmK7ElXdGAjdoKxiUIDSXI6uyTy3vXf+yJfn8TwF6c1kM+87+zUdt2PVFz0/cZndrZeCKXDp5btjxdXT1vx/E37DVm3UJTv1d+36FVKvq5qeaeQ47vsVUXggi2tl2C8Vt7+UxEO5kxi1L47uk7buqCEVR/vInAHyNw18+EDm4fsCVKUepvavvBiKcXJZdGadHM8n3hRlLxSJYow1Ct+t+wV57N0B/CiMXg1uo6M2jUrhaDzE+N5pJXhSmHV3ZoARRudhWp+byO6npCARVkqHmtLLdBB85lk5tLWOhIYinyaSjRkcH7w+DbZANQ5j2pOxsBIQgIn4jlKGvZYJuIsa7AzRd9PNmXF2Yyt/uZ9QN0J61hnV09ylf8sscLpHKm+iCbDr59SBqliAniKBhO/zkrri8VyNBaC4D+okQ/PeAjgd+DQYliAo9O+O+1hZeXc3tYa/FNKg8fuNSv2vwD0IQocPGqL4qT7AIavcLMpRFX0YIYg5egfBhHSwhVNyfHMP411x6QGV5m5X0J4iYrq6GQGhbiBCEC+LNRBA2spljgqjIdaF5vyrUUEHe+SgJNEh/i62MEMQczrh1Ea6AE4hxPmAFrHOm/dT1HiokQrmBkm+4iqd0gqPRQP7yTkGEIBYBQWBmtz3HBFFh3BeCmMOFJ8KmhSAiBNOrKUv1tJNWEFR0UxwFxks9yoe6udvWu2Z/Ix/7nibW0ONk8gnVXB5qMoKoUHHM07S7dhN0QY5yh1znKqbqThDdfV8lQN19KD4RnfAW+v2ol/6FIBZmJqira90+JgqrQHgfQMs1kSuD3MKGcvWDybfPujBU1E/9igp8QpBMczoIghCEqheFkbDCqFmjDaKzM/EWGHQvAy8rjU0bkHBhpr7Ya1CCiM5IXXmfwu5QEKWR2nmfgoC/wuQjR0dTvyxj7ny/dGUCzE+Fk0S1nlcB+lqURYQg6mDaizGSCjcA1GsTx3dR6+pa93aTTBXiYtrLplT/xyjwSdls6v9VO7ygBNHe0/cxYnx2up8qjKjtXf0nEVk3hX13gUEISRfSXHdZq1psoqgXlCCCLLZB5HHBZNr1V3chrxqPNCWL022Zgd/yVGyF3cVb836FvsOgc2eu1vMqCIaLsYwQRESzXrQBGOq26wrAVEHLYjGYRzujpLp117Zi7asNM6ZyD0znj/Ba1Fxi7Kgz9o2tcfPsarxc7LIFJojKJEZh/fQrLrV5qQmCEEREt49RWlRVOIc9XlAN/o8J/MYw+Vr7Trja1ycoQSymi3JhQ5kE8Zaqdn6kXnmvJkhEgkBHR2I/xGkIjFeVGiww85pcJvX9IB2EcRl1Ccccae6CoAShGTfC7Na1lwtrVDEpvDu6+84BrKBz5edZg41jMpmBkSDzocp09vSdzAwVAqXsXBB6l+vWV1CC0JJdyFOaugMxObXn5ll5FgiBQm0w0/ow6Wh1LsbBQ23w6ORSvDtoVjpn6HogXP2g78FiLicniIhmX3cRySsYnrNb7UKryRVdyvJ1M4hW29rwjYAZdphBCcK6d+GM8+MT58kmi/5iYQQE0dW19g0mxVTo9ZlLbsC9E0s5EWQBKhr+4yqJkj350X8VJqeOiuK+SlCCKBLVXATrqwxsN0fB+rTxlVyC9alQ7JtymaT9Frb21dXNL4BN2XTya2HfdSnvjoAQRIRvh+5DBvFHsyOp//C511AR8tstppE+HHPwKKVBhxuCIKD/WHkoH8f6+4dSf3Hps5yW9dqKpEkREESRuPq+DMaHZvfvr4JTJEwGXeHIbBZ48QqCcSiC0If73sFMG/129y6qyCdhGCuzwwO/cMqqe7+Y8KOpVj6xmnDfbnkaPMJ9P8nASfYYUU4ZXbz2fpM3jHfdPzzwaBD8pUwwBIQgguEUqJQuRDSAPDE256emPqXbeaocztQy9fmK5DmacMz6hD940C0pSyChXQqFIYjiYqzLdqbP6+yeKKkkTDQEAZdTgOLqnzNim3QXEt2S30SdtSwMQShU3BIGqXerkG+5WJcwiGLGaQxWqVztObhVxjnXTYt7wiD3HN1dXYnXFog2E3CY43V6iMz8u0dHb60IceJBEKqJHSD+DPK40hGxljpXJA5j07jakd8iVPjzWr6LxVZXCCLiGdclzyl1ocJq/A5EY2CasFIoMrcBpGwWtmxyVmnth6XRq0cifVAdMVfGYpru3yUhUfn3xxhIW+lW3VOtzowlIoJQDXb0JA4Fkwq4Zw9YWOyL8DcwrPSZXiktAfoDDPM92eHUryMBPISbq62/KFKOqkH7pk+NKOXokyA+NjuSUlEBKh4Xgpi0JduyiIKs6AL0SCm96Tu18xhgTFHN22JrRwgi+hmn9p7EqcSkYh45800H6U2b8F2nHw7SWJAyURCE6qe4+zScmdj8RFCLwGaGug9SylcRIUFYJLFi3VthsjOrmp9cpd8VOdAJOnVMwAa0xcKeIMrCdHb3n6U5FQQRReWt/tLU+O6X+CXjUY2V1Dg3AdQTpPHZZfhhGLETvTDTEQQB55rAex2Z87y6j9QxI/w4m7+GEMQczXFnZ/8hbPC3nblzPbobZ+Bqnmq5xKkuUHU0se8jkzwqglACJRKJ2GNPUN8Li/1XAbzcW0h+mEzjlIJhmgbo3rkiCGvBK6ryPkNFIgpC3JZqUKe+iQL4KgnC6lqpwQymq9RrESR8i8rwF2PjA5nMwM/DyF50iKBzQFDRa3cPUFeR/ReXxPkrfm7WbipMKnCG4sbXmVmFjPEITeOu8gogpxQJiIAQRECgqixGh3b3vzrOfCoT3gXCQbYQGyo4318IeAiEGyZa+QdeRsC5jFAaJUGUcVILwGSB3sPMZ4HotdPjttQ6vIVBm6fGd/+x2s1GHc3Va67USax1go4F4xQGDgawt20h2vZCHurfg/HtCKPrRnmCsLc1+90CXmOzNewA8Fti3J0nun5LevB/qwn+aJ/LibyxCmCV1EmlIy1jZr3DAEYBSrXGzbv8iGFWm1N0BwhKbWQ9NhUmtXevO5RgnusIS7PthXtG9zHw7QP24Z955RWv8nuVag4EhCDklRAEBIF5RyCkE8S8yycdFhEQgpA3QRAQBOYdASGIeYe8qg6FIKqCTSoJAoJALQgIQdSC3vzVFYKYP6ylJ0FAECghIATRGK+CEERjzJNIKQg0FQJCEI0xnUIQjTFPIqUg0FQICEE0xnQKQTTGPImUgkBTISAE0RjTKQTRGPMkUgoCTYWAEERjTKcQRGPMk0gpCDQVAkIQjTGdQhCNMU8ipSDQVAgIQTTGdApBNMY8iZSCgCAgCMw7AkIQ8w65dCgICAKCQGMgIATRGPMkUgoCgoAgMO8ICEHMO+TSoSAgCAgCjYGAEERjzJNIKQgIAoLAvCMgBDHvkEuHgoAgIAg0BgJCEI0xTyKlICAICALzjoAQxLxDLh0KAoKAINAYCAhBNMY8iZSCgCAgCMw7AkIQ8w65dCgICAKCQGMgIATRGPMkUgoCgoAgMO8ICEHMO+T+HbZ39x1LwPenSzLua23h1UNDqb/71/YvMdft+0vQ+CU6OhLLYOB4EM4B6E0AXlwa1TiAvwB0X2t84vyhodufbvzReo+grTux3ADdC2CPckkGjsulkz9wq1nxDlYP0g6AngL4VwxOIx67NTc08CcAXH2TFTWpvXfdgcgX1higlQwcDGBvALGZOefHmGmMiL4/udS8Z+tPUs9E2P+CNSUEsWDQu3dc7QJeWrROB2Eym05dFfjjjJiA6hDSSEVqX9H/ejI5CeCNbg0TcO/fd2k9/oF7bnwu0s7rsLEFJogKRBjIMfHpYyOph2qBy/qeYrQewKcB7BeirTwT7gEbn86lB7ZETFYhxKi9qBBE7RhG3kJYgkgkErE//8U4htn8d4Bex4QLcyPJLwpBRD416Ow8fk/TaLmHgH/2ap0Y3xrNJN8XvQT112K9EUQJoR3MtDGXGby1CsSoo7v/aIC/EZIYnF0VmHF7IWacd//wwKNVyLHgVYQgFnwKKgUISxAdPX2bwTiz3JIQxNxNakdX//tB1sIx+yH8DUxFtQLxbgx8NjeSvHzuJKmfliMiiEJJNTcRfGTc6lD1OKs+CeJjsyMptYsP9KhTA8WMSxl8rk2FVLHwz5aVdwWwl0cH25l5Yy6TUiq3KFVfgcZUSyEhiFrQm6O6VRDEdWCcKgQxRxNia7ajp28W1gCeBOGU7Ejynkb7+KNCKyKC2G6CjxxLp7aGkUudnh/bhoNh0vkATgQQn12fRyeX4t1BbALLj0jstmQnKdVsn0aGpxm4xjBp8377mX9MpVKK0Kaf3t6NSyfyz/Uy0QeIcVSlHFC2qQuy6eTXG+k9EYII8zbWaVnnouV3gqjTYdS9WLokN8T43GgmeVHdCz+HAi4kQdiH1d7ddwQB3wWwp+3vBSI6eXRkcMALguXLz2hZsuyZKwE+w1Euz8ClS+P8paBOIm09iYOJ6RoC2h1tjRNhw+hI8pY5nI5ImxaCiBTOhWlMCGJ+cNdmQWNaW6Wee36Enode6oUg1FA7e/pOZsZ1s9VDdFNrfNupQ0NDeTc42rv6ziTClQ610pMMnJRLJ38SFkYPVdXjBheOymRu+e+wbS5EeSGIhUA94j6FICIG1KU5SZOpB6aeCKK7+8Q9Cpi6C6C2GWn5gcl4/PCtQzf/VTeCjo7EqxCjnwL4R9vvoe0XzrbdTiVENDjx/G4btm69amp+3tzqexGCqB67uqkpBDE/UyEEUf8EoSTU2Il+H6d8z8jIrU9oRkAdPX1fBuNDtt8KRNg4OpL8Tq1vlrJrtO6kFANH2toaN5lXj2VS6u5IXT91SRDtXf0nEbFtcrx3AHaE27v7vkrAB6f/Rvgd8tybzab+7DcTzrp+uny1Q1i69JnlTEgw+B0gHATGS+0vAsCPERsZMG6emNjtZ0F2DX5Gat1C5Tk2wvXZkeTGchm/9mfh2dP3EWJ8ofw3+wUoNf6WZU+vJPBZAL3N5smx4wWD4W8ZfJM5mb9+y5bb/uaHve53ZYB89Ak6jID3Alhh81jZAfCviWjzkhjfXtYNO8flN39BZGp3jD9AHV9jq8KttfWZw9hgNSf2canmtxHwAAg3TLTyD4IYV1UlzTsxvSgqnbhhzSEdDmAZLI8r3sKgzVPju/84yDvpNe56OkGEJYhDVyReETOt08MrpsfIfAdMrM9mU8qwXPPT1bWuxyTzTgAvmWnMX+1Vc8cRNFCfBFG8iKQmbZ/SGJ8zmVeOZVJprzG7LJyB6mqOps+SyStHR1NZZ58zF9Lo4wBeHmIengJw7v77cNLpBeEgOc+b1PVAEB09iXeA8Q1178Jn/HkwXQ7T/ESYDy5E+08x8wdymVSyvbvvGPsN9HojiCrem3EGruaplkvGxr77bMh3//co8DsQo/cAuMwiBt3DdGY2M/jNEO9wRdF6Ioje3t74RH7PmwAkbIK6niA0bsuTAK/OplM/rgUTe11rQ7DLMzcyc7/t70+SSYePjg4+GFU/c9FOXRKE9ijvc/lLgVO64WonFgszJnzYzye98iXnsRhaVqXTN223A9/Wu2Z/Ix/73mwdZ6ipKRDoqonx3c5127n57fAXlCCYVxPRAZ6LjgYOJvxoqpVPDLAjpo7uvrNDtl8A88Uw6GEwbit3X08EcUhvYu+WPN3gUDUEfXEeZIP6csODv3GroDtBEHAFA//uSg7AIwWDD98ynHokqCC6cvVEEDp7gtut9pJragqgY2bGFVxbEQazzp7+dcyWVqQcnkMtTDWTcxgZqilblwShBtLZ3fdJBj5hG1SqNf7kid6eCP1riFjnQuZft1KVckUunVSXZaYfF32i+r10yQdbinFhLFpSJ4tOlws0k8y03s37xZ8gNi6dmHp+E4heXezLPAwo/7/1ByXHL6cFZ3NLNpO6pvxvv/btY9aoWG4GoHal5R1pAQS1wGTANEHgV3DRva8cm2hGjAAk39nTt5YZN2oWtR2kQiiAHmHwSwjoBrC/TVY1B8qH/tBICaKrfxWRNV4wuIWAdwHY19bv3QCp2D/lZ5zj9JXc0MAfy3/o7FxzIBvxH6EYw2f2Y6l7MKzeG5dxlcs/brCxOpMZ+HlFG3oV0yQAdemsPA/KI+c+Aj0HmO3WyY9oc3ZkUKlja7q8VS8E4WYUdtsoFDd78WG7emmubsB3d699ZR6xIQLU5qq4QjC+k8skT6kVf937ENXf6pYg2roS3QaROuapW4pAAFtChf1hZmny9GLQ7CQmGTja6d7moo++vWDwh912YV1didcWiDYTcNjsSeO7UMBandolzAKu2gxrpA7TvocO3gojYMb4AufYLVVKHBeA6RLHhaEHzanYEWNjN2/TvcAdK9a9Faap5tzux74DxJ9BHlc6sKLOFYnD2MTX3NRcUZwg7HJWY6S26uTpeygSi/15kAln5kaSo84FwsOP/kEznl81NnTrY078PE6V6sT6VS6YF9vxO2TFugNgFKbuH0r9pdbFpB4IwsOt1PWUVLHGWKv23Ozq33zUhl1f9PzEbbNPkHotRa3zEWX9uiWI5b3rX7Ykn/8pQG8uDVi7aJfB0Lu3TUPlaYfQsHvFQtbWtn4vo6Wg/KGnA7QFdVcrBf263q4XZeDROAq96fQtf3BOaJgFfIEIwlLp7L8vLvOwpVBHT+J8MH3edqwuMPOaXCY1E6m2NHg3Pa2fH7pS3cTzuBmgXieO9UAQLv71ycmlfIaXus3aDe/y9OfAOG+WWoLwlexI8sNOUnEjCAaunRrf/f21GqK9Fp0FIgha3rv+pUsK+Vcy0xoCTtbETfK8mFbpDAPPNabWhbezq+9qJvzL9AnCYw2ota+o6tctQagzQ3t3n7qNeFp5sF63Vjs7E2+BQfcy8DKrvDq2k+UhUnw8dgbtXYnjiEgF9bL0g+qjyqWTp9s/wtItzR8CWFJqMZSRSbNbeZpMOmp0dPD+hiOIgF4eOlJlwkW5keTnnGPu7Ex0sGGdGMueHuqEsimXSVbGPXJUdvFjV7Ynz6CFYT+isCeIYugG/AggpWosvZbImZNTxwTx7HIhTe1FKxeCcHW0CDv2eSCIKEVSbeUZfGEunVLxsLQqNM3p2NcDrRYh57u/WmSdXnOjaGSu2tC4u97ZGt81MTR03c6KRXW2a+x2gD4EsDLQWZ5QHvo+6ujpvwLMm0ptaq/mKwIyDTqNwG/FXwhkAAAexUlEQVQH6EAiSi+JbTvZyyZil7GnZ80+eY6PAPgn22KhjZlf5ycI11OAc060HiUOl9tync6uvs8y4WMzbQSPoaPqdHT3nQNYN2Gnn4UmCM2mIrSHTGdn/xvZsDz6ptVuunHpCWJ+VBgRnSCiXEb+DNCZ2fSg2tC52lcq3zkIQThmoZ5PEHCqfjzUMo7TBj9QgJGIwbxhxttI753gVGV5qX5qeYObhSDC4lPxEWoIQqceDLu4a42AC3yC0JCe1jPO673Su21yxUZJHwZkfoygdUIQeYC3gnEFTNwWxKVac6FuvgliTlVataxXDXGC0BiPtbtX5wKjVEQH7MPve/Rxuo7I0k2qRzv5laqfyo8vCqCbhSAA/KfBO4/NZL6vLsP5PhXHag1BaNyTA91dsXeuMwKGJRm/wYRRMWnlASo84/z6tE5HlSHGK/z6q3UND9K/X5mICMI73Dfxbo5LqCWxeNRg419bWpb9UqdZ8JJd49Qy3wThqmb2w3y+fq/rE4QCwbnAsOYjq3hBS/aG9p6+84jxZZvOocLvuGIBi9CLoXjT+m+vMRE7DsQnlbxtpv2g3dIy1rOKKWymtEAEUYzCabfvPGLG8yt03jpeH0aFEXABTxAaJ4uqPWQ09quKhcwlkODJucygim46p09EBOG7OCvPqxaTP8pglYjJFtab/gAy35sdSf1nmIHOt01gvvsLg4Vb2boniIqPQ5Me07HDmn7RnHWdxmfNCcX3JdUAOeNNYeIVBlkhJ97KgPK+8koiopSjjWeDcLEhuL1ggQiiq+L+ilfsHNf3vmIzsYAEEebE6PchaxbgYAThkxfar9+gv88XQZTl6exJHM1M6q7MdA5sAONMfHZuJKW8BQPd6xAvJv8ZrnuC0OzEnmCDDrfdKq2wP5QjNyovGmopDBPwmiIUs412FXrrgLmZp2MQMc4H4RCPm6qeMyAEUYSn8hSH+1pbeHXQ+PtlkBcRQVRsLsKov/yXhXAl5psglHQuFypD5VvQOBFUfcrzQ8zlHoTn/Sy/Nufj97onCGsBcQTgY6bpo7PGyDztouo08BHwV5h85Ohoyrpl7NxBBEj+4ns5y2XSCmA8CrJOFNMxcYQghCCCfOSaBbjCRrPYCKLkBv9pAi52YBg4TLfWsUHj4h5kjvzK6G5tA/CN8ODX7lz/3hgE4XFPobOz/xA2WKV73L14SJh9E9Jph7CRi/OehZ9hNGiMIJWU5HEr7AOxCvT3n62xXR8oFJ7doyncXEXFdAcI7yx/mG4kLyomvfp0+rTX3Tc7IKWLE4nXAli6Z3K785KkspNNLOWEX9wvfSwmeN72r3ZBdt610q1V1bY9l/UagiAqmX5GVeRmfyiD5rRDlGOtVKquvIN0tXevayOYdzn0noUXIj/+gmBcCxPDS5aYf3RTi4RZMOrZSI25IYhVRKwSupcN+GKktn31TlWI8ySsii7CE0RRPan/LtXFp49mMykVpt7THqHxEAuUojTMolzUZOx1PcAqZ3b5CXXRNkx/UZZtCIJwMyYviz/1q9mhfSsXeTc7xBRPvWF2rCe60i1wmcsE7wDo9Gx6UMXZ8TWKCUGUXlsNwVTcggf8TnMV34BuN7iQbq46eaoNBFdxCla5NqZiK+wxrRYrQXiqmgxjZXZ44BdeC6Y+H0RwG1jxIqN5LcO4u2DQN+8fHnjU2Z8+H4R7LLYoF/ha22oIglCDrLgpy3TmZItxiz1eky5EhuaikWXkNkzut0WL9bwd7KKrvDSXTqp8EL7koOSvUIUVK4oXEwCXOFqbsunk14K+4HJRLvGiiSkKpP4KimnQcgthpLbL1tl5/J6m0XIPAf88S+ZgIWGUqtlpyyiA+KPZkdR/+H3fjnUpz4w7YuCLMpnU/yhZJKNc0LeoxnKalzBlMDabBKWaKEZ8dbnDUGmHwEYCTrHpkj3j4mv80EPfgNSGghCCmH4rKi8thQu1oUtWv5AniKL6o+J+h2eYd90nol18Ne67i/gEUVQ1Fe2UAw6PwklmPiWXSQ16LT/tvesOQt68e8bb0Sq9nYg3jI6k1P0c7aOLtVUqOA7iT7TGsHkib3wJ4DPsDQQN8lnjkhlJ9YY5QVTsMgm/Y6ZBAl9UQsL1DoNmgVcXapTrazljnac3geZDLzDTsbnMoLJJ+D6uweSEIKax0wXrC5oXuOQhouZiOtJucb/QlMH6tLrrxU4QLoEN1WvgGiLd/uG2dyX6iegGWzBOiySYeWMuk1KbUK2moHR570Oly3vOHChPl/JxzCQJAv4PBT48m039znfhqIMCDUMQpR2ZPd/0cwDUf6UgZu5G5ko7hAN5n9vTGh25FfE1SBhlRQ4co+9VHH+tAIK0Vpc0qFYjtZ+7bpj2g1x089ydOXM6uxi53cJ9wzCOzw4PqJwJHrs4ugpAn7PAQhNEcWfbdyaRFURw5gZ9gOx6buG+CbR5SXzbB51BIhc7QSisu7rWvsGkmPJotCd0Ut+qrzrYLdmQigpLjM2FfMvFXmlf29pOekksPnUpE85y5ECxv5ah7mnUAT+gsQjC4e5qB1Bnfyj/7pKntvyz8+Jdxby4HCULDHyTp1o+qntxDj30+JfGWuPngOl8XXY1rx1umAVcQ5zqTw+hwKvddilh2p8vglBCuyUMItC/ccG81pkwqK1n/ZsMzl8D0Nt1H1M9EIR7wiB+mAycMzqc+plzd+qeZAoPkZl/9+jorfYMdtbQhSAsGKijK3EhyMpBYn+2M4xVufTAmNei60ESqtrTDFxjmLR5v/3MPzrzoJScElROkk+6pCNWGf42ZtNJpQYLZLcUggiJQEdHYj/EaQiMV1VU9TkFVMRlKjcQ8Pa0TsddamIczA+DjP9SqUettJGMdpCVWtB+tNwBwja77G65EcIs4NbCWhnMTf25dB+D1A5oeEnLsrPKwczCtD+fBOG24y7hHCTlqAmgZXpqF1jFVJYjopSjnhfAhCCKaNdosIZ1kl329IUlBxZbvKdZK87swIKugQSdq5T7piDkUjhvxRvqBOFxEvCNoaRNLxhCT118cZ75KhcNTvaFP8Bk8cNkGqeYMe62Bw90y1ERZgFXnbsdrWcEmx1iJEz7800QRbfFxHkEUj7sbh+oE/NxMD4D4qPsl6bq4QRRFrRoJ4ndBFBPgBfGUYQfhhE70ctlUwhiBjIXg3UhqE1LvYN+6WzDz+F0DStVbyFmnKdzia2h3Tmp2lAE4b5b9r7kpuq52CFC+dtbR9ClT58LgnJvdRqkdBP0FJg/DRPXKPVIhbHbJc92mAW81Cm19yROJaav6+JCOXM4hGl/AQhCDSnMB/o4GbxhiYEtFW6ehA/nRpIqo1gkT62LsJV61qBzQFCOFcWb/96POjV9cUmcv+IXl6pW2fwE8fp9od1cnbLVarAut2fFXFu6fQ2RpTZ6XQiMVG6KNCP2b4ZZmGDDCiw4y4FCBRcE8O28YXyhnomi4QjC5WWsSBHqnEyXxCuhE7iodpV9Ib6k5SQG94PotbY49Uqt8ydiGgHj5omJ3X5mzwWsScGp3dWEWcDt41TJ7g0mFZtGhYKwR5KdRYRh2l8ggrCGNR0UEdgAWOljy2PaAfCvwfhaOTnMfCyQUfVRtEsYqwBOKK0IgL1Lp9KS6gKjAKVa4+ZdfsRQnv+oZAuxCE4XrTeCUIK5nqpdcnr7jJvae9cdiHxhjQFaycDBtjlTVXcw8AQYW4jo+5NLzXvsYT6sjUEcm8B0SeXGkm5qjW87NWhmymrmp5Y6DUcQtQxW6jYvAprLdqFckZsXGRlZvSBgucQyX8bMa0vq02cNNo7JZAZUKuK6fIQg6nJaRKiwCGiiZfrapcL2IeUFgSgQUEQRM81zDaL9OW+eFiQ9ahT9VtOGEEQ1qEmdyBHo6O67BoDKLKdCFPyCDf5Odjj166AddXQnVgJ0R/miE2viFQVtS8oJAoJAEQEhCHkT6gIBTTrGwLHyXYySgevXBQAihCBQhwgIQdThpCxGkTTx8gNdbrIuR/Ukzgdbl6PK7seRh2xejHMiYxYEhCDkHagLBJReNm6adwN4vU2gp5jpvKmduyXt3mDl3y1vspaWT2jCG9zdGucTgnoA1QUAIoQgUIcICEHU4aQsVpF0cYtKWBTdWmE8qP7tcVtdHSj+AMN8Txj7xWLFW8YtCPghIAThh5D8Pm8IlMIcXMTFPMMhb6tbYj7IBvXlhgd/M29CS0eCQBMjIATRxJPboEMLc4u6PMRxBq7mqZZLvCJuNigeIrYgsGAICEEsGPTSsRcCiUQi9ugTsbcTeD1gHgHQP9puoVZ941hQFwQEgeAICEEEx0pKCgKCgCCwqBAQglhU0y2DFQQEAUEgOAJCEMGxkpKCgCAgCCwqBIQgFtV0y2AFAUFAEAiOgBBEcKykpCAgCAgCiwoBIYhFNd0yWEFAEBAEgiMgBBEcKykpCAgCgsCiQkAIYlFNtwxWEBAEBIHgCAhBBMdKSgoCgoAgsKgQEIJYVNMtgxUEBAFBIDgCQhDBsZKSgoAgIAgsKgSEIBbVdMtgBQFBQBAIjoAQRHCspKQgIAgIAosKASGIRTXdMlhBQBAQBIIjIAQRHCspKQgIAoLAokJACGJRTbcMtlEQ6O1NvGhiiu4A4Z1lmRk4LpdO/qBRxiByNj4CQhCNP4euI+joSCyDgdNBmMymU1c18VCbbmhCEE03pQ05ICGIhpw2b6FVNrY//8U4htn8d4Bex4QLcyPJLzbhUJt2SEIQTTu1DTUwIYiGmq5gwnb09G0G48xp1YQQRDDg6qiUEEQdTcYiFkUIogknv6On7zowThWCaNzJFYJo3LlrJsmFIJppNktjEYJo/EkVgmj8OWyGEQhBNMMsOsYgBNH4kyoE0fhz2AwjEIJohlkUgmi6WRSCaLopbcgBCUE05LR5Cy0niMafVCGIxp/DZhiBEEQTzKJuMfEcFuH67Ehyo1cZ5Sr76BOxtxOZG8E4EsCBAOJWHcLfwPw/BBosTMUGx8Zu3hY1jG1diW6D6McAdi21/QQbdHhuePA3fn11dPW/H8TfsJXbboKPHEuntoaty8C1uXTydADsUpfae9cdiHxhjQFaycDBAPYGECuVLwD4CwEPMfNgYSp/+5Ytt/3NT45qCaJ9Rf/rYfLtBLzG1sc4M5+Wy6SSHuOwilv95o1VgHkaQG8DsFepnTyAPxHTiGnwtw7Ym3OpVEqNTZ4mRkAIogkmN0qCsIjhcVpLhM8B+KcA8OSJ6JYpogvuHx54NED5QEWW965/2ZJ8/qcAvblcgZlOzmUGv+vTALV3911DwGn2ckHq9vb2xifye94EIFFe3Ino5NGRwQFNn9S5oq+HTf4SQG8PNKhioTwxNhfyLRePjX33Wbd61RBEZ+eaA9mI/whFkio/gcjBulQZxyYwXQLgxf7j4YfJwDmjw6mf+ZGOf1tSol4REIKo15kJIVdUBKHa2ZmnKwg4xbYDDirJ42TwhtHh1H1BK4Rd6APs5qEjFtUPMb41mkm+z6vPjo7EfojTEBivUuUYeDSOQm86fcsf7PWWLz+jZckuT38OjPOqwMlqioFfGWZ+9ejorX/SyRSWIFzJgfjs3Ejqeq9FvL133UGULwwA1BZy7gogXD75/O4f27r1qqmQdaV4AyAgBNEAk+QnYm/vxqUTU89vAtGri2XNw4Dy/1t/2ALQL6fbYXNLNpO6Ztaid0Rit9adlGJY6iTns4OAHIMeAXErgC4wXqFZHHcw08ZcZvBWP5mD/N7Z07+Omb8z0w+PxdCyKp2+abtb/c7O/kPY4HsA7D67jH/d9q7EcUSkZC+ph/jO1viuiaGh63ba2qKOnsT5YPp8xfhLqjfAeNAqr7BibgNIEU5RPWdviPGtJS1PnjU0NKTUN7OeMARxSG9i73geNwPUa2tknAOQgwuxqGYKYDzKhByBnmXwSwjoBrB/JfZ01eT4bpuEJIK81Y1VRgiiseYrkLRhjdRKtTKZ3+sKBp/l6OBxZvrAAfuadzj1zYesWHdArGBeToT3OBbKJ2EYK7PDA78IJKxHoe7uta/MIzZEwAGlYr62BI39odyDrw2jvbvvqwR8cFokpjOzmcFv2kVs604sN0D3Athj5u/0B5PNM/9xX9yn08u3tZ30EqMl/1GAz3cQxZNk0uGjo4NFQrE9QQliuZ7YxwFckE0nv+51cnCpW2DG7WaML9gynHrEyWltPevfZHD+GodarcDgj+TSqS/XOudSv74QEIKor/mIRJqwBNHWlTjSILoDwLIZAXjMjBdOGBu69TEPoaiju+/sFwzJl82qy3wHTKzPZlNqoar6sU5G+edSAB3jtWiXf9PYEGb17WWH6O4+cY8Cpu6yqVl0izd19PRfAeZNtob/DwU+PJtN/c5noNTekziVmL5lJ1Q3mYIQRC3koGRt7+n7CDG+YJO7AOaL998Xl3kZoEuG7K8DvKEKHKp+H6Ti/CMgBDH/mM95j2EIorio7nU9wCdW+bEro/CnCbjYVn/cZF49lkmpnXZNj2YRS7XGnzxRp5Zpa1u/F7UUhqc9eJh/ASKlCrPUTQxckUsnz9UJVHEyYNzX2sKrh4ZSfy+Xd7FvbMqmk18LMkgNCYEJF+VGksohYNbjRxAui3Sgk4PqSGFltBR+AuCNMx0HVxUdeujxLzWWtNxJQHu5PgGfGk0nPxkECynTGAgIQTTGPIWSMgxBKLdIMvmnAPaxdRJ40XNbbIIYlIMMqtLdlR+YjMcP3zp081+d9R1lC0x0JrH5AZsn1H8avPPYTOb7O5x1O7r7zgFwpddi19GR+AeK0dkMKPfPt4BQiHHhXU4jtte4nHMDF5djL4KwjOTLnrkS4DNsfeUZfGEunbo8iFdRpX0HQU9C011qbESucxNkrqVM/SEgBFF/c1KzRKEIoqv/JCLLEGw9bp47fkJ1dvV9lgkfmynnvpD7tWX/XbPrfs5kXjmWSaWd7ThOG0+YFFtpcEGdbMpuq1o7hEaV5dpHGNl1ZWsliKnx3X9cKzko87nTFbgaQtfYiOYMt1pxl/rVISAEUR1udV0rDEF0dvVdzYR/sS3sOs8d3/G2d/cdQcAPASwpFfY1KPs2WirgNB7r8ltU2B9KKqKdBfogMT5baqrAzGtymdT3Z5OQ0xgeDblFTRBEOAGMN3FRnVe+iKdY/ePZTPLSICcHJZOLquvDuZGkOn0Efrq6jnuxSUtVhrt3TFfSGPYDNygF6w4BIYi6m5LaBQpKEDojsJee3ksyzW5SnUYiSZEZxP1Uc4fBsje0d/WvImK1iFkLqm58zvarxcAFH+rqWrcPG+Y7mXFSaTGdcQYIoWIquitjudO9loGcOTl1TJAb2kWCqPAOU3++GyDtnQy3eWdwCwHvArBvuQwxPjeaSV5U+1ssLdQDAkIQ9TALEcsQnCA0eY+rTC7U07NmnzzHR+y3r6MiiIoFjfA75Lk3m039uQyd4wQzfVKoqKsxPjtOKNpTht8UKZvB83lj3xjxwQC/mpne9sKJ6pBZIUp0jYQjCHcxmD+azaSUR5JbSJDpunpXXb8RBvw9QBiXgC1JsTpAQAiiDiYhahHqhiCqJBsnHpqTziQDR+fSSeWFYz1O+0M5btObj9qw64uen7itfAHQaWOpULdoyMftZNDWk3g9MZ1PhOPAeGlV81gdQagYSBMAdrH1uZ1hrMqlB8b85BCC8ENIfp8+EQoUzYdA3RBEsNhJgSbAeQHOrsqoIBDHKcFhQC8w07G5zOBdqmNNUEBXN9qyoB6XBP3G8qQV6hB4+XTB8ARhhbcAGTfBNO+0q3eUmqg1zifYXXN1AglB+E2T/C4E0cTvQN0QREQ2CGshd95gtpFAW++a/Y18fBiwwn9U2Bmcdgg7uTjvWfgF9etYse6tMPl7AL/S5xXa9sJFst8TKG0yDU21GPcr19xavJis8Be22Ee6i27M2JTLJO2RbCvE1BBEZA4FTfxZLcqhiYqpCac9OEFU3lSu1kCruU8xa6deK8xOVZBdVeRmfyj36bRDEHDv33dpPf4fJmMFx01tz3Ac6ubykp243RHzSHXzZwa+Axg/norTg1uHblbhvLW2gJoIgnH55M7dLyzHPNJdVgPwuMGFozKZW/7bDfPOzsRbYNC9DLysVCbSuap1rqV+/SAgBFE/cxGZJEEJQnXodCEtL54P3HPjc2EEqtilA3+FyUeOjqZmggSGaVBT1s2Y7FAhVSzybnYI1cXsWE98FwpY6xYiRHO5TO3or0aePxw0rEgtBKEz+uvCpBDR4MTzu21wC55XceNcsVlE9qIap1iq1xkCQhB1NiFRiBOKIOr8opwdj47uxErAihll3bVQp52l8V0unHUK0HgpqbIOErGM3AbRy+zRYr0WSV2cp7Dupdp7AyFsEDqCcAm0OMnMp+QyqUHd+6SNcQVvctS1UyKauwnYg4D/MYHfGCZfG+WmIIrvQdqoHgEhiOqxq9uaoQhCE2oj7G5SGW3jpnk3gNeXQanmZq4foM67DgCPmcA5BLqtHPHVTUVWeR+CLwbon2yJhZ4lk1eOjqayOjm0l8s8Yjvp2ujs7H8jG1ZYkz2nf6+RIKxTYO+6g5A31UI9nUWOgd8ibrwrNzTwR50sztAiAJ412DgmkxlQrsqBns6evpOZcZ3tXkaFd1mghqRQ3SIgBFG3U1O9YGEIYo6C9U0CvDqbTqmUoZE9ml38djAue8EvSAWIU6cK1zsMlZfD+Ocg2r2cHEiRjVeuCV2gviBJiMqDV/GTWnd55kZm7p8FSAQEYZ2QKhdrEGjzkvi2D+oCG3Z1rX2DSTGVN2Pmkhtw78RSTmz9SeoZv0krOQYoTzBbsD/8V2Fy6qigF/b8+pDfFx4BIYiFn4PIJXAShN/tVp0e2y/jWUlofbjvgO6W1Qxck+9B5SywvJcAuBqZnXYIZ99+xnldfb9dermPYjpP+jIYKqPdTIgMpSZjfCeXSaoMfrOM2n7RXJ3yW30YuBlEq22/qXSj65yhRabnrqdPyfSh2W3Rja1x82wvV1nVFxl0xewQLYqc/T2oqplzqbNwCAhBLBz2c9ZzReIb4CEUeLVbzgKPhEFPATh3/3046cwPoG5OFxC/jBnrKhIGER+bHUmpsBCRP54+/C72h7IQlQEFp8ULpBrp7O77JAOfmD0o/rlJOHVsJPWQc7Cl1KRHg/nzAL1OC4aLzGEJQrXd3r2ujWCqXf10MqMi0U8dNTp6m7qDMetxOQUorvo5I7Yplx5QcziLuNp6EgcTk8r5PR3mWzWqnBuCnj4ifymkwTlDQAhizqBduIZdsqqptJaPA5QnxvCSlmVn2VNpuiSfKQ8iaMrRcSJsGB1J3jJXo/fKv+13CnDaIaZlDHh7uqMj8SrESNkQ/tExvlnpOS3CZPNtIIsUbEmYoG5A/7YUjqQc1FAbgrwagihFaXXm5lAr/KW5dPLjOtfbjp7EoWBSsapm7CLlwVkpVDEM0FNW+lQT7wDhoMq5pT/AMN+THU79eq7mXdpdGASEIBYG9zntVadfdux6tfr2op8/bQbQp8k37SfzU8x0avmGsl/hWn53OQn4xlByCVKnFtBrc+nk6UHiGLV39a8hYmWYfXHIMexgIhXELmMw3227g/CIGc+vcGbuq5Ig0Nl5/J6m0XIPAf9sk8/TAB3i8p9myIoc6IQoUsyGxFOKzwMCQhDzAPICdFFOb6lyEtt3sJYoXjkfEolE7LEnSBGESiO6XwDZ8wS6eiJufFyXxCdA/dBFNKHFVRu+Oafd7BB+t6edAnb0JN4Bxjdc1UazK+QZuLFgGJ+4f3jgUY2xW0ts1RKE6roUnXbAkUJ2aHIp3uNmgFZ5s6ll6jMEy05S8c5oJkmdRDcX8i0Xj41999nQkygVGgIBIYiGmKbqhFT6YoNJ5Q54J4C9bK34JnaxvG5anzkMhPVM3OOISrqNgAeYebAwlb99vr1WKt1dLdarSBGqQ815+qg2QZLCp2XZ0ysJUHmZOwHsbTt1eeFTkddad7GtFoIoyfZNmwuvgqLA4I/k0qkve71Nlqpxgo4F4xQGDnaOS4UPAePbCzHv1X0FUqsWBIQgakFP6goCgoAg0MQICEE08eTK0AQBQUAQqAUBIYha0JO6goAgIAg0MQJCEE08uTI0QUAQEARqQUAIohb0pK4gIAgIAk2MgBBEE0+uDE0QEAQEgVoQEIKoBT2pKwgIAoJAEyMgBNHEkytDEwQEAUGgFgSEIGpBT+oKAoKAINDECAhBNPHkytAEAUFAEKgFASGIWtCTuoKAICAINDECQhBNPLkyNEFAEBAEakFACKIW9KSuICAICAJNjIAQRBNPrgxNEBAEBIFaEBCCqAU9qSsICAKCQBMjIATRxJMrQxMEBAFBoBYEhCBqQU/qCgKCgCDQxAgIQTTx5MrQBAFBQBCoBQEhiFrQk7qCgCAgCDQxAkIQTTy5MjRBQBAQBGpBQAiiFvSkriAgCAgCTYyAEEQTT64MTRAQBASBWhAQgqgFPakrCAgCgkATIyAE0cSTK0MTBAQBQaAWBP4/px9TTYaZec0AAAAASUVORK5CYII="/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-65"><g><path d="M 211 701 L 211 726 L 51 726 L 51 744.63" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 51 749.88 L 47.5 742.88 L 51 744.63 L 54.5 742.88 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g><g data-cell-id="8N6JJebqrzA787TgpwUj-66"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 737px; margin-left: 143px;"><div data-drawio-colors="color: #393C56; background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; background-color: rgb(255, 255, 255); white-space: nowrap;">no</div></div></div></foreignObject><image x="136.5" y="731" width="13" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADQAAAA/CAYAAACvv+soAAAAAXNSR0IArs4c6QAABKNJREFUaEPtWH1MU1cU/wFmWAoraJawyFacgIIoLszMBHCZm07M5A+D4seyaMQvVDY+WoFCakVWSoFaUReNcTP6h44lOpdpNsVghpqp6BgwjNvYMqe2dmUw5pCvdrtNSlree7nvMdYH5N2/mt5zzzm/8zvn3POun9PpdGICLT8J0BhnU2JojBMEiSGJIR9HQEo5HwdcsDmJIcEh8/EBiSEfB1ywOYkhwSHz8QHBDFkf2/FJ7XnUX/kGVqsdDocDQUEyzIyZjjWr38aCV+dh0qSAEcEYGBjE7dutuFh3Fd82tcFi+R2Dg4MuXVOmKBA1Q4nUpQuRlJSIYHkQq40hQHb7H9i6XYsHD6xegstSX0NxURZ6e/tw9FgtTp3+YsgIm0alchrU+Zl4eV4cb1AkKHWXr8Nk/hidnX9SzwUGPoO05W9gc2YG5HKZlzwvQO+/tx67dftx7fodqjEiEBIiR0W5GglzZ1Hl7R2dKN17EDdufkeVHS5AgqfTZiMmOnJoiwpo8ZtJrpT67NwlQQbjZ0fDaNgFhSKE89xjmx2a4mq0fv+jIN2ewmFhChgNasTFRrn+pgIi9eBwOF21ImT5+/tDX5aHlORXWI+Retmz9wAu1V0TopZVdnpkBKqMBQgPf44OyK2BOJiclIhNmatAFJDV3HwPxuqjaG+/z2poZXoqcv5NV7ZVf+UGtDoz+vsHGNuhoc9i25Y1SEmZj1BFCPr6+tHW9hMOfngSLa0/sOpLW74IqrxMfoAImC2bMrBubRrIb8/1yGJDTm4Zfr3/iGFo0esLXDkeEODd9f568jfy8vVobrnHOEOCZdCrEBERztgjjclk/gjnPr/M2CPNwVRZxA/QnPgYVFUWcrbKw0dO4fiJMwwjiYnxqNCrIJNN9tq7easZ+epyBjvEqUpDARISuJtJV1c3VLsMrEyRjKDWEPEka9s6vLM2jTPXv/yqAbrSGt6AuAKwZHEySjRZDEaHKyZ1t3tPDaOuZ8dF8QNEWjCpH67VcLUR6oIKXoB6nvaisKiStU1rS3birSXJ1Cbx8KEV23fqQC55zzV1ahgdUHCwHOZqDWJjZ4wKoI6OLmTt0DJqjo8dtwPd3U+Qm/8Bo93LJgfSAZFL0mwqxqyZL40KIK6JhI8dtwM9PU+hLjSisbGF4RO1hvgYEpJyEw4QV7qM25QTvSmMdsqRpCdTde2nFxj575O2/X8A+rrhFgo1VYx75L9erBs3pPu+yxFaSOvOzillnQFHOvqQqbtmX4k4gAioM2cvosp0jHWKH8lwmrFqGbJ3vCseIDKgFpeYRvRhN7z4oqOU2FetAWHJ5/eQpzMWiw15qnL8/Mtv1HGHS+DFF55HuV6FSOU0l4iogIgD5KtVq9uPpqa7gkERZspKc70+NUQHRFC4H0kOHDoJm62DCow8CWxYvwLpK5aCPJh4rjEByO0QAdZ2tx3nL9S7aovrGWthynzGN5Zbh+B3OWr4RBaQAIlMANW8xBA1RCILSAyJTADVvMQQNUQiC0gMiUwA1fzEY4gKeZwJ+I0zf6nuSoCoIRJZQGJIZAKo5iWGqCESWUBiSGQCqOYnHEP/ADZNm1jj/+tzAAAAAElFTkSuQmCC"/></switch></g></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-60"><g><path d="M 211 601 L 261 651 L 211 701 L 161 651 Z" fill="#f2cc8f" stroke="#e07a5f" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/></g><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 98px; height: 1px; padding-top: 651px; margin-left: 162px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Failures?</div></div></div></foreignObject><image x="162" y="644.5" width="98" height="17" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYgAAABECAYAAACbHqJdAAAAAXNSR0IArs4c6QAAEYRJREFUeF7tnX90HNV1x793dmSJkGJoSAyEH/lVTpJSmoONtSvt6ojfhNgxAXYlEwxOSUKaUtOkpTmltE0bk4SQk5xwCBRSBwwES7sYDHVLiF2ieH9o5dQpJYSmSSAtuBhMXWMIyJJ255a32hWj3ZndmZFmdq298+fO+/l5b+c789699xHkEgJCQAgIASFgQYCEihAQAkJACAgBKwIiEDIvhIAQEAJCwJKACIRMDCEgBISAEBCBkDkgBISAEBACzgnIF4RzVpJSCAgBIdBWBEQg2mq4pbNCQAgIAecERCCcs5KUQkAICIG2IiAC0VbDLZ0VAkJACDgnIALhnFVgKSOxxF1gXOFHhUz4Qj6d/JofZTspMxxL/DkxbpxJS9g4mk6urc4bi110bIH1NID3Vu4x8NF8JvmPTuqRNE0hQMujA7+jM1/BhPMAnAzgt8otKQJ4AUAOoFSnbjwyMpL6TVNaKZU6JiAC4RhVcAlFIAARiODm23zU1B2Lf5CYNhAQdljeOAPf4amOvxob+94rDvNIsoAJiEAEDNxJdSIQIhBO5kmLpKFINPFZADcBOMxDm55kjRL5HcP/4SGvZPGZgAiEz4C9FC8CIQLhZd40IQ9FYvE/A9NXAISq6i+C8GsAWTBNAPx2EPrAeFttO+kZaHTJ6I6hf2tCH6TKOgREIFpwetQIBOOxzg5etRDWbGUPogUnnMcmhaOD3QTjEQBHmYooArRBp6kvptMP7DEXHY/HQ88+jzM1ws0AvX92tTwy2YULd21PHfDYHMnmAwERCB+gzrXIhSwQTtnIHoRTUs1JF4nED4OGTSBaNUsciP9iNJ36OgC2a9np/fFj9AI2AdRvzsuMq/PZ5N83p0dSqxUBEYgWnBciELLE1ILTclaTenriEdbo+wCOqNxg4M6p8SOv2rXrjqlG7Q/3D74LBeNRmrZ0Kl+cm+zCBfIV0YhecPdFIIJj7bgmEQgRCMeTpUkJa5YKgb1k0Fm53PCTTpsUiSb+CMAtpvR7WKOzZMPaKUH/04lA+M/YdQ0iECIQridNgBn6+/v1icI77gMQN739P4IiLh4dTY07bUpPz8DprPEPABxZzjPJwEfymeR2p2VIOn8JiED4y9dT6a0gEN3dq5doemEFiD4CYCmA4wDopg69CPDTDGwuaqHUj3cMPeeks35sUteU6XJT303+6rQV573u7o8fQR1T1xHwCQDvADAO5p8z4V5jsrBx584H99nxUZu3z+0JLSMy1oJxDoCTTKxfJOAJEO5eFOItXg0Vlp4dX9w5QSvBuJyBUwEsKben5MBGwFPMPFycKmyp11aVp79/bdfE1OtXQ8MpYF4G0NHE2JDLJv/SyRyopJF9Jje0mpNWBKI53OvW2kyBUA5PGtM3AJxtYbpo1+4iM7YUQ9rnGgnFQhQI0rTdMPh+gN9jDYi3duqHx0dG7jpYdZ96+uJnsIFv11r1WJb0Koi/hAJucfqm3t8ff+tEkb4ExlUO/RQKxLitWOi43m8Htp6e+Ieg0TYGji73dr8BPmcsk9rVgn/LtmySCEQLDnuTBGKuDk+K5PMaa6uy2aF/tcO60ASCgGsYpYfvB236XCSiy3Lp4SHzfWUFRCFtPYOvcSHE5SJ4zNCLl4yNPLC73vSNROK/jRANl8Xe1Uxn4BfQ6EI/9wNq9yD4iUldP2vXyKb/ddVYSewbAREI39B6L7gZAtETS1zMjHss3jJfhXpYgB4HoJYjQmDjNFDJjr3Wc7bB8s5CEwgQ9pmcv9T6exagZ8pfE70AnjamQmePjW16sTIjli79dMeiww7cAvCna2YJYR8xfsIg5WQWYvAyAn63anlPZXuKjMIFudwD/2010+rUUVr6Amk/KY0ncSeAXjDeXStU/vkm9PRcdBJr+j/PElaiW0bTw+vqmch6/1dJTi8ERCC8UPM5T9ACcXrf4Am6YTwK4AOmru0nwrqJ148ctjJbLL8Bf4LBXzUFZFPZJwFeNZpJKRPImmvBCcRMD3mM9dBgfmTovyo/qeWd8SJOHEunnjKBsPE+5p8bjHUnHofHUqmUEuKZa/nyj71NW9TxdzT9pWL2WH60U+dLrPYlwtHBfoKhHsAVES8y8JUunW+0Sr+8L/5u3aDbGaU9kMpl+fUz1+lv82XzvMbFc7PZzT+ba/mSf/4IiEDMH8t5KylogbAwN3xFY21FNjukoqnWvXr64meyQQ+a7eGJ8WW7DcuFKBCl5RhdO88sDnbQuqPxpRpoW5X38XYUeWB0NPV/dWBTOBa/gphuNT/0ibA2l07eW50vHE18iwD1Nj59OXg7VxvZiw5iy2wHNtv9k0ZTw/J+d/9Fx2uF0P0AdZsSjBNhTS6d3OypUMnkGwERCN/Qei/Yh1hMT+tUiFWHPlAtLFmkFF5LAbTCzcOkktbK5JEZ9+azycutlgoWokAQ8Le5TPKLDkacIrGBm8F8tSlt3aWiqjIpHE2oL4nr3/y91rns1HPXHP7W1yceNH8NOA2VHonGzwfoIQCLVB0MPKej2J/JbH7GQf/qJgn3DXwABm+Z7RxXWuZq6H0917olvzcCIhDeuPmaK0iBKIVMCNEfMBAFcBoBiw3mS8ayqYzTTs7FTBTzcB6Em/qt+uQmv4WD2GsG8/lOeKllnJBB/wKU1vvVVbT7ArBjb1FGje9AyXJpih4C4cxKOcx0WT47/L1GY1p+w98GkNqbeJxBT3WQ8a10OvVSo7z17pc9r+8vm0tXkhbBfP3xx+Gm6mW1udQleeePgAjE/LGct5KCFIj5aPScHrCHvkD82tALfY0sihTncG/8o0T0wMw+AuFXKHD/6Gjqf5yOg9UXm8WSHoV7E3cT4bIZgQjAKsmuD5FYfDmY1EFPyj+kcqkN/WtHM0m1ZGYbt8kpF0nnDwERCH+4zqlUEQh3ntRuBGrevyBcOOX19CZuYMJ1lTYQsO03b+n82BM/uOc1NxMmHEtcR4wb3sxTu0/QExsYZGa1N2He1FaH9DxE4I2d+uEjFn4ZbprhKK2ltRLwKkBXjmaG1ReFiIMjks1JJALRHO51aw16k9oLgqX9q4/uKBQjBKwGoPYvKkdLqr+8bXjyBbgHkerU9146MjJSqMfRJjzFLwHth+7584cALDcJxFgIHR/OZO7bX/mtbPm0tc4JbwUGfkaMYQ7RlhOWGL+Y72UeZWrb+ZYD9zDzgKmP+4l4TS6d+if3/ZYcQRMQgQiauIP6WkUg1Ab25OQrSwxNP1UDncDEy8GlB9O76nrltpNA2CyRVQ+z1b6Ag6ngNImlEUJv7+Aygwy14azCpDS6lL/LVjLoromJxT90EpG1UYEWEV8nmfnyfDalnPfkOgQIiEC04CA1UyCUT0TIMK4hQL31He8JjwhEDbZmCIRqRGk8i8Y3iXChC49ttT/wXZ0KN1hZvjmdE9VLagC7DujntC5J5w8BEQh/uM6p1GYIhAo2F9Kn1jPhDy28duv1R715qqWNE2cSiUC0jEBUGmIS/jVVm8X1xvZ50nhNbkfqMbcT2tJ8mukzo9nh292WJembR0AEonnsbWsOWiBsnJdq2zcdVkJ5CucB+pHGlM1mh/aEY4lriXGjCIT9ZLI0PSV8IZ9Ofi3gKUjh/sGTaIrPg8ZXgKH2M2pDprzZqL3QtPPdnhcdjV56VBFTj5gc4orMtDKfHVZHlMp1iBAQgWjBgQpSINRGYsdhL99eDlNtpvEyCENs0MMd2tTjxxwT2mu3ienGimjBbVI73IOwcl4jxj/ksslPNXMKqvHv6tp3cpFCa4gxACrtL82+HHhhV2exCOUtkVqbOdAe6xaB8AjOz2xBCoTV0ZFvmOx/H0X+eIPQDzMImi0Qkd6Bq0D85lnGLkxPVSd6ehPfYcIn5/MLyGp+uKnHz/lVp2yKxBLngnG3eRlKhRLhqVCfOeBgo/aJQDQidGjcF4FowXEKVCCqbPMBPIsinzU6mvqVQzQ1TlmBm7lGEysJeNjU3h9pfHBlNvuw2h+pe1m92c9H+60qrREyD8d0qqhK5XAd6jS33SrKLrOxravjpc0VU1sV0oLYWAGmiDrsiYFcl753TSNT3Eqbw7H4WmK609QHeftvNJEW6H0RiBYc2KAEwsY23+5wG0tSpZPnOorqiMhT5vMN3M1pY+FagXDu3awepgar8BfHzmf7Lb8gegZOYa1U14xHMQPr85nkXzt1GItE4u9DqBSuY8YogIGb85mkOleidM3VY9sioKAIRAs+J4JokghEEJRd1hGcQNQG6nPp3WsRPK70qAvUUa67Nx7ViFR48cPLqOuGHDcNh2/tt/yCUHGvNGwC0SrT/b0gXjmaTu1sNE2UoE8WltzMYGVpVrnGGdoF+czQSOWHaPTi9xQQGiHghPJvrmI+1QgM4FhwG/VB7h9aBEQgWnC8ghII1fWadXFgP0P7cD4zNFYPjTpHefcL+DyYvmxhFmu7xOPHJnUkEn8ndBoB432VNiuhm+ji+K7tqQM2/aBwbzxBVFpKmW3FMw8CZ8euuzd+jkalaKkzdTo8vc26vcwPwcBq8xGk01+GSzYCfKlbIbIO+Q1H3uIt+FeSJs2RgAjEHAH6kT1QgbCM2UPPkGZ8KrcjpcJAzIqVo4ThuRcoDMbX64RxsA0v7odAqHX5cDSxwcISa0tB09ZVn5OtwlDoHR1/Y+vz4aNA2HwFqGm0H8zXwsB91edN1/FRsf366O0djBlkbDWf0wHgJWb63NTBxUkrT+np88ixEaBl5i8Ug3nVWDalzrBwfFmee+HQ4stxJZLQdwIiEL4jdl9BkALRIGaPOiYzB1A51LNxCkC/V3WCHKCiknJpXf2Icm9tlyR8EgjYHMSjmlMAeBeg/bvNEZ5FYtzKVIonNR2G20eBUMWrt/TOg5SqOr2tMlHGQXgcTD+d/sH4fYCWWnyljTPxn+bTqdvsvpAiscQ3wPgTi/uvAvxTQHty+h6/XX1MAlhSm5bumBxffLXb0BsiEO7/962YQwSiBUclSIFQ3XcZs8dMrACmb3IH3YqC8ajpIJiXyaBzc7nhH1fj9UsgStY90cRn39iHuKmB45e5SUUC1sMo3MmarjZ+3xuEQFREYtFBuuONs6UTHqago1DZJee8gnYrwMp72u2ljii9u0vndVZHlDYqTASiEaFD474IRAuOU9ACoRAsjw6crIE31lk2mvVgBbDdIP68Om/ZKqwC23gJ+ygQqn3U0xc/gw18G6D3Nxjal5j5j/PZVDIWu+iYAuvqeNXABEK1rbSPs4eUQChRe6eTqchAnomvrDrn2jZruY7PvPEFsh7AkU7qAKDOp7j2+GM56TXCqwiEQ9ItnkwEogUHqBkCUXlgqf0FzaBPMnEMwEkzSxulMBv8nwxsLmqhVPW6fvW51upBZkxOrdi588F9ZsQ+C0SpqrJ3+PlUsvah02aWTsp9IKLbFoV4S+XNuMak1uclpuopp9qrd+3vDkG7ksnoBUgFSaxsYqvloGcZtNUAfXdnZviXTk1izfWUQm93HjgDhNUWdYwDvJtYyxZhbCgcPGrM7ZJSdZ9EIFrwweKhSSIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCAgAuEBmmQRAkJACLQDARGIdhhl6aMQEAJCwAMBEQgP0CSLEBACQqAdCIhAtMMoSx+FgBAQAh4IiEB4gCZZhIAQEALtQEAEoh1GWfooBISAEPBAQATCAzTJIgSEgBBoBwIiEO0wytJHISAEhIAHAiIQHqBJFiEgBIRAOxAQgWiHUZY+CgEhIAQ8EBCB8ABNsggBISAE2oGACEQ7jLL0UQgIASHggYAIhAdokkUICAEh0A4ERCDaYZSlj0JACAgBDwREIDxAkyxCQAgIgXYgIALRDqMsfRQCQkAIeCDw/9PUXuoYdJv9AAAAAElFTkSuQmCC"/></switch></g></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-61"><g><path d="M 261 651 L 356 651 L 356 87.37" fill="none" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 356 82.12 L 359.5 89.12 L 356 87.37 L 352.5 89.12 Z" fill="#e07a5f" stroke="#e07a5f" stroke-miterlimit="10" pointer-events="all"/></g></g><g data-cell-id="8N6JJebqrzA787TgpwUj-62"><g><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 662px; margin-left: 308px;"><div data-drawio-colors="color: #393C56; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(57, 60, 86); line-height: 1.2; pointer-events: all; font-weight: bold; white-space: nowrap;">yes</div></div></div></foreignObject><image x="299" y="656" width="18" height="15.75" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEgAAAA/CAYAAABDyo4+AAAAAXNSR0IArs4c6QAACIdJREFUeF7tWXuMHHUd/3xnd++uFAQC6bUUMUAkJmIDOdvbZ11oqS1IgTaz2yttqY80VatEYzRSFZpIExSDNJAWYgSDxe4OtEV5BUqz3D5m75rTEBrARgFTtekZLMfJ9fZ2dr7mt7d73Z3Xzt1Wc01m/tz9Pj/zfQ/BexwRIA8fZwQ8gFpEiAeQB1B7RcSLIC+CvAhqDwEvgtrDz6tBXgR5EdQeAl4EtYefV4O8CPIiqD0EZnMEybLsO36CbiDgKwBuBNBds/c0gPcBekUn/fGBrPI2AJ4OEpOyfZ8ncB+gLwfoCgAXNMgYZeCExFRg0lOd/rmZTObJcaMOCkXlFwFaZVJO9IiaTX3bjWHhaOI+Bu41CQd2FHLp+6wcC8XkL4CxB6DPtHKcgaIOumswlzrWilb8Hw4nF7PEvwZwrRv6Gs2/ANx9+QJOK4pSqfNRMJK8k4h/ayHoqF72LR8Y+N1JJyU9y+ULO8YhQA4b6D4inVcWCopq+J1C0cQ3APwcwJxpOHCKiDcWssoLTjzBSHINET9piBa3aiogPDQxdtE9Q0OPlwUTLVkqX+nT6TUAVxqkTAB8m5pTXnaSHg7LIZZI0HyimY5fQgVrVVUR6TL1hKLJTQDvmSY4df5RZtpczKf2W9kUiaz9rE6+VwBc5hYRC7oKM7YV82lhI0Sbp1AsuQvM24zEDDxRzKW/6pRmdukFYJuaSz/aBM7SdddD1wWY89pw4Kju11YNZPb/3RSZNn7MQNfbmiR98Uj/vuPVOag3IkclqkbBXIOw9yoSLxvsV96zUhKNrr+4gvJLAPU2/s/AcT8q8Vzu2Xfrv/f0bAl0njfyFDMnLWSNEnB/qYv3DB1SRkTado7TVga2W6UKWdS2nnjfpR2a9hpAiwzyTwH4scTSgXx+3wnxskUBf/+k7zI/V74Mpu9Z6WCmDcV8am8VIIc6UiGiDYVsap8VQPbA0tOd/pN3ZTIZrc5nn4oYhiTdofbvKxh11OT/HsDFhv9M9TEclq+DRK8ycGnTyyJsL2bTO+2iKBxLbADjIQB/YmaVWMr7fOU358/3DYtiPTVJh6KJbwJ4xCzI7OyU05HE/Uy4x8BTYeY1xbwiHJt6grHE94nxgEUaP1DMpX9ok8Z26T/BwC3FXPpQwwuwBAjAIVQ4qarKv2eQatUaVH2i0bVXafBlCPhkq3SZpLdOLwCmtxuPb+4qaR8rAH3JYKTJUaMTwUhyFRH/AYCv8T9i7Czk0yIFq49Diom/RaN4nQh7SZcO11PNDWBTAMXjcX9J6/4NwOtNjExb1Xzqscbf7dKLgV3FXPruRlqn+qCDbxrIKUN2xvZG5R4J9KpFmimd/uH1DWls22wsZI8C/CYIByVdf3F8/JJj9bZupG1aVkNReSVAzwHoaCbk5zv9c+XGSTNsnV6Ws09vfM3lkubvtxgl3LxEaxrG4c4A35bJKP+pE7TR5sVU/QwTP2ic2psAckibYdJpWaGQOuqcXlyY6MLNohM1RZt9FMwcIOCvftJi2ex+0ZmmnlozeGaGs5CYgQ5WfNJ3RIsXQk3nDjdzjcNYYJp9hBKHNDnrAAmBsdiaBRr7RX0SO950pvW6PW+Rrt1cKOz/mxmgcPJallhM1s3DXENI26RXU5Q1em7XANpBBzYR1Ciz1hziAG0CIBpE47LqqJ6IUqWxCzeaAJos1vOeBiAbJJwSBTWAwLtWwyEAY9GcYq+90SyAqw0yLdOkTeDs2CkSWbeAJf1GZqwGsLThemDFc4IlWmZ5UQzHkuuYqwtsc2sFdlSYD1lM3Y4D5aIVG+eeP1Y6wMBNBks+JJ1WFAqpI/8jUBzFigE5UJI2EbOYz0ypyMBqS4B6e/u6pUBFDGGGcwEXCNIbDP56k2bCX6BxXFWVf9hZFIwmHiZAnE+aHgZ+Wsylf+LmrOLkrUinMe30FX7SF4FpMYOjBLoGRN+y2wSEvHhcPr9UpudA1XuU0TZrgASVTZ2xttHF7SgYTSwnQJwqDCMEhhm4s3EqriuprUDPAvRpAEMgVpl9RyS99NbChf4P6ncbh0FUiBrS/drtFsttVY19MKDCTLfaHu0ddicjSB/rzCsH8krO+Q3bvykAoyB+UJ/wPybuT2KxDZw38jkwP0pA0EKuKaXtysIkL7/DLG3vCpQOZzIHP5yKHA1RBt1ro6O6qNsCFArJc+CDeHvma2NzIFrOPlZgBSPyaiISi+9MWu8ZkRZD4pIld1widQSet3F2+iWO8Es1m/6u42cfh2vjlEIm/KCYTf/MjQWiQ05o3btMNcwN8xmaYRDfqmaVQSNbKCYvAZPY29q5Nwmx9nNQo1KHa2OdzHb2sfO5eheaM/Iwg7cYu6QLnGzrVZ23zUlaiDnKEiWK/SnxocA8SRuMbLEAmnc0F05W9QYjcoKIfgFgoQse0wrgxLM4Ls/3abSTgI0A/C7kCxKNGLsrWuBHAwN7P6rztPyy6rBWOM4+boyqRlPnyA0g9DFxDMCnGhwaBXAMzC9oPt+v6ruRG7l1GlGXfAH/7SC6RVxEavtZHTBxzPsnAX9m5lSlrB0cHDzwgVF+S4AcupnjOXY6jsxmWkeAqkW1PG83E75mdMLNQX82O+7WNkeAwrHEWmY8ZdGWXX0ScmvEbKabPNr3bAl0dJy6QFUV8QWAq18VSpTgyRuy8WAuSFzPPrPZeTe2TX72mf5By/Lu40bhuUZTBcjhHGHyh4E3JL28olA4MHyuOTsTeycjqLevmwKVfgKuaSHEdoqdifJzgacKkNPKf8YJfgeErWpWef1ccOxs2VgDaHPXhDa2Q5wdAMxvWAFOAvxHBu0un77oZbtPI2fLmNkop+WgOBuN/n/a5AHUAm0PIA+g9hLSiyAvgrwIag8BL4Law8+rQV4EeRHUHgJeBLWHn1eDvAhqL4L+C38I2WDElOGjAAAAAElFTkSuQmCC"/></switch></g></g></g></g></g></g></svg>
\ No newline at end of file diff --git a/Documentation/arch/x86/x86_64/5level-paging.rst b/Documentation/arch/x86/x86_64/5level-paging.rst index 71f882f4a173..ad7ddc13f79d 100644 --- a/Documentation/arch/x86/x86_64/5level-paging.rst +++ b/Documentation/arch/x86/x86_64/5level-paging.rst @@ -22,15 +22,6 @@ QEMU 2.9 and later support 5-level paging. Virtual memory layout for 5-level paging is described in Documentation/arch/x86/x86_64/mm.rst - -Enabling 5-level paging -======================= -CONFIG_X86_5LEVEL=y enables the feature. - -Kernel with CONFIG_X86_5LEVEL=y still able to boot on 4-level hardware. -In this case additional page table level -- p4d -- will be folded at -runtime. - User-space and large virtual address space ========================================== On x86, 5-level paging enables 56-bit userspace virtual address space. diff --git a/Documentation/core-api/genericirq.rst b/Documentation/core-api/genericirq.rst index 25f94dfd66fa..582bde9bf5a9 100644 --- a/Documentation/core-api/genericirq.rst +++ b/Documentation/core-api/genericirq.rst @@ -410,8 +410,6 @@ which are used in the generic IRQ layer. .. kernel-doc:: include/linux/interrupt.h :internal: -.. kernel-doc:: include/linux/irqdomain.h - Public Functions Provided ========================= diff --git a/Documentation/core-api/irq/concepts.rst b/Documentation/core-api/irq/concepts.rst index 4273806a606b..7c4564f3cbdf 100644 --- a/Documentation/core-api/irq/concepts.rst +++ b/Documentation/core-api/irq/concepts.rst @@ -2,23 +2,24 @@ What is an IRQ? =============== -An IRQ is an interrupt request from a device. -Currently they can come in over a pin, or over a packet. -Several devices may be connected to the same pin thus -sharing an IRQ. +An IRQ is an interrupt request from a device. Currently, they can come +in over a pin, or over a packet. Several devices may be connected to +the same pin thus sharing an IRQ. Such as on legacy PCI bus: All devices +typically share 4 lanes/pins. Note that each device can request an +interrupt on each of the lanes. An IRQ number is a kernel identifier used to talk about a hardware -interrupt source. Typically this is an index into the global irq_desc -array, but except for what linux/interrupt.h implements the details -are architecture specific. +interrupt source. Typically, this is an index into the global irq_desc +array or sparse_irqs tree. But except for what linux/interrupt.h +implements, the details are architecture specific. An IRQ number is an enumeration of the possible interrupt sources on a -machine. Typically what is enumerated is the number of input pins on -all of the interrupt controller in the system. In the case of ISA -what is enumerated are the 16 input pins on the two i8259 interrupt -controllers. +machine. Typically, what is enumerated is the number of input pins on +all of the interrupt controllers in the system. In the case of ISA, +what is enumerated are the 8 input pins on each of the two i8259 +interrupt controllers. Architectures can assign additional meaning to the IRQ numbers, and -are encouraged to in the case where there is any manual configuration -of the hardware involved. The ISA IRQs are a classic example of +are encouraged to in the case where there is any manual configuration +of the hardware involved. The ISA IRQs are a classic example of assigning this kind of additional meaning. diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst index f88a6ee67a35..a01c6ead1bc0 100644 --- a/Documentation/core-api/irq/irq-domain.rst +++ b/Documentation/core-api/irq/irq-domain.rst @@ -1,59 +1,77 @@ =============================================== -The irq_domain interrupt number mapping library +The irq_domain Interrupt Number Mapping Library =============================================== The current design of the Linux kernel uses a single large number -space where each separate IRQ source is assigned a different number. -This is simple when there is only one interrupt controller, but in -systems with multiple interrupt controllers the kernel must ensure +space where each separate IRQ source is assigned a unique number. +This is simple when there is only one interrupt controller. But in +systems with multiple interrupt controllers, the kernel must ensure that each one gets assigned non-overlapping allocations of Linux IRQ numbers. The number of interrupt controllers registered as unique irqchips -show a rising tendency: for example subdrivers of different kinds +shows a rising tendency. For example, subdrivers of different kinds such as GPIO controllers avoid reimplementing identical callback mechanisms as the IRQ core system by modelling their interrupt -handlers as irqchips, i.e. in effect cascading interrupt controllers. +handlers as irqchips. I.e. in effect cascading interrupt controllers. -Here the interrupt number loose all kind of correspondence to -hardware interrupt numbers: whereas in the past, IRQ numbers could -be chosen so they matched the hardware IRQ line into the root -interrupt controller (i.e. the component actually fireing the -interrupt line to the CPU) nowadays this number is just a number. +So in the past, IRQ numbers could be chosen so that they match the +hardware IRQ line into the root interrupt controller (i.e. the +component actually firing the interrupt line to the CPU). Nowadays, +this number is just a number and the number loose all kind of +correspondence to hardware interrupt numbers. -For this reason we need a mechanism to separate controller-local -interrupt numbers, called hardware irq's, from Linux IRQ numbers. +For this reason, we need a mechanism to separate controller-local +interrupt numbers, called hardware IRQs, from Linux IRQ numbers. The irq_alloc_desc*() and irq_free_desc*() APIs provide allocation of -irq numbers, but they don't provide any support for reverse mapping of +IRQ numbers, but they don't provide any support for reverse mapping of the controller-local IRQ (hwirq) number into the Linux IRQ number space. -The irq_domain library adds mapping between hwirq and IRQ numbers on -top of the irq_alloc_desc*() API. An irq_domain to manage mapping is -preferred over interrupt controller drivers open coding their own +The irq_domain library adds a mapping between hwirq and IRQ numbers on +top of the irq_alloc_desc*() API. An irq_domain to manage the mapping +is preferred over interrupt controller drivers open coding their own reverse mapping scheme. -irq_domain also implements translation from an abstract irq_fwspec -structure to hwirq numbers (Device Tree and ACPI GSI so far), and can -be easily extended to support other IRQ topology data sources. +irq_domain also implements a translation from an abstract struct +irq_fwspec to hwirq numbers (Device Tree, non-DT firmware node, ACPI +GSI, and software node so far), and can be easily extended to support +other IRQ topology data sources. The implementation is performed +without any extra platform support code. -irq_domain usage +irq_domain Usage ================ - -An interrupt controller driver creates and registers an irq_domain by -calling one of the irq_domain_add_*() or irq_domain_create_*() functions -(each mapping method has a different allocator function, more on that later). -The function will return a pointer to the irq_domain on success. The caller -must provide the allocator function with an irq_domain_ops structure. +struct irq_domain could be defined as an irq domain controller. That +is, it handles the mapping between hardware and virtual interrupt +numbers for a given interrupt domain. The domain structure is +generally created by the PIC code for a given PIC instance (though a +domain can cover more than one PIC if they have a flat number model). +It is the domain callbacks that are responsible for setting the +irq_chip on a given irq_desc after it has been mapped. + +The host code and data structures use a fwnode_handle pointer to +identify the domain. In some cases, and in order to preserve source +code compatibility, this fwnode pointer is "upgraded" to a DT +device_node. For those firmware infrastructures that do not provide a +unique identifier for an interrupt controller, the irq_domain code +offers a fwnode allocator. + +An interrupt controller driver creates and registers a struct irq_domain +by calling one of the irq_domain_create_*() functions (each mapping +method has a different allocator function, more on that later). The +function will return a pointer to the struct irq_domain on success. The +caller must provide the allocator function with a struct irq_domain_ops +pointer. In most cases, the irq_domain will begin empty without any mappings between hwirq and IRQ numbers. Mappings are added to the irq_domain by calling irq_create_mapping() which accepts the irq_domain and a -hwirq number as arguments. If a mapping for the hwirq doesn't already -exist then it will allocate a new Linux irq_desc, associate it with -the hwirq, and call the .map() callback so the driver can perform any -required hardware setup. +hwirq number as arguments. If a mapping for the hwirq doesn't already +exist, irq_create_mapping() allocates a new Linux irq_desc, associates +it with the hwirq, and calls the :c:member:`irq_domain_ops.map()` +callback. In there, the driver can perform any required hardware +setup. Once a mapping has been established, it can be retrieved or used via a variety of methods: @@ -63,8 +81,6 @@ variety of methods: mapping. - irq_find_mapping() returns a Linux IRQ number for a given domain and hwirq number, and 0 if there was no mapping -- irq_linear_revmap() is now identical to irq_find_mapping(), and is - deprecated - generic_handle_domain_irq() handles an interrupt described by a domain and a hwirq number @@ -77,9 +93,10 @@ be allocated. If the driver has the Linux IRQ number or the irq_data pointer, and needs to know the associated hwirq number (such as in the irq_chip -callbacks) then it can be directly obtained from irq_data->hwirq. +callbacks) then it can be directly obtained from +:c:member:`irq_data.hwirq`. -Types of irq_domain mappings +Types of irq_domain Mappings ============================ There are several mechanisms available for reverse mapping from hwirq @@ -92,7 +109,6 @@ Linear :: - irq_domain_add_linear() irq_domain_create_linear() The linear reverse map maintains a fixed size table indexed by the @@ -105,19 +121,13 @@ map are fixed time lookup for IRQ numbers, and irq_descs are only allocated for in-use IRQs. The disadvantage is that the table must be as large as the largest possible hwirq number. -irq_domain_add_linear() and irq_domain_create_linear() are functionally -equivalent, except for the first argument is different - the former -accepts an Open Firmware specific 'struct device_node', while the latter -accepts a more general abstraction 'struct fwnode_handle'. - -The majority of drivers should use the linear map. +The majority of drivers should use the Linear map. Tree ---- :: - irq_domain_add_tree() irq_domain_create_tree() The irq_domain maintains a radix tree map from hwirq numbers to Linux @@ -129,11 +139,6 @@ since it doesn't need to allocate a table as large as the largest hwirq number. The disadvantage is that hwirq to IRQ number lookup is dependent on how many entries are in the table. -irq_domain_add_tree() and irq_domain_create_tree() are functionally -equivalent, except for the first argument is different - the former -accepts an Open Firmware specific 'struct device_node', while the latter -accepts a more general abstraction 'struct fwnode_handle'. - Very few drivers should need this mapping. No Map @@ -141,7 +146,7 @@ No Map :: - irq_domain_add_nomap() + irq_domain_create_nomap() The No Map mapping is to be used when the hwirq number is programmable in the hardware. In this case it is best to program the @@ -159,8 +164,6 @@ Legacy :: - irq_domain_add_simple() - irq_domain_add_legacy() irq_domain_create_simple() irq_domain_create_legacy() @@ -189,13 +192,13 @@ supported. For example, ISA controllers would use the legacy map for mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ numbers. -Most users of legacy mappings should use irq_domain_add_simple() or -irq_domain_create_simple() which will use a legacy domain only if an IRQ range -is supplied by the system and will otherwise use a linear domain mapping. -The semantics of this call are such that if an IRQ range is specified then -descriptors will be allocated on-the-fly for it, and if no range is -specified it will fall through to irq_domain_add_linear() or -irq_domain_create_linear() which means *no* irq descriptors will be allocated. +Most users of legacy mappings should use irq_domain_create_simple() +which will use a legacy domain only if an IRQ range is supplied by the +system and will otherwise use a linear domain mapping. The semantics of +this call are such that if an IRQ range is specified then descriptors +will be allocated on-the-fly for it, and if no range is specified it +will fall through to irq_domain_create_linear() which means *no* irq +descriptors will be allocated. A typical use case for simple domains is where an irqchip provider is supporting both dynamic and static IRQ assignments. @@ -206,13 +209,7 @@ that the driver using the simple domain call irq_create_mapping() before any irq_find_mapping() since the latter will actually work for the static IRQ assignment case. -irq_domain_add_simple() and irq_domain_create_simple() as well as -irq_domain_add_legacy() and irq_domain_create_legacy() are functionally -equivalent, except for the first argument is different - the former -accepts an Open Firmware specific 'struct device_node', while the latter -accepts a more general abstraction 'struct fwnode_handle'. - -Hierarchy IRQ domain +Hierarchy IRQ Domain -------------------- On some architectures, there may be multiple interrupt controllers @@ -253,20 +250,40 @@ There are four major interfaces to use hierarchy irq_domain: 4) irq_domain_deactivate_irq(): deactivate interrupt controller hardware to stop delivering the interrupt. -Following changes are needed to support hierarchy irq_domain: +The following is needed to support hierarchy irq_domain: -1) a new field 'parent' is added to struct irq_domain; it's used to +1) The :c:member:`parent` field in struct irq_domain is used to maintain irq_domain hierarchy information. -2) a new field 'parent_data' is added to struct irq_data; it's used to - build hierarchy irq_data to match hierarchy irq_domains. The irq_data - is used to store irq_domain pointer and hardware irq number. -3) new callbacks are added to struct irq_domain_ops to support hierarchy - irq_domain operations. - -With support of hierarchy irq_domain and hierarchy irq_data ready, an -irq_domain structure is built for each interrupt controller, and an +2) The :c:member:`parent_data` field in struct irq_data is used to + build hierarchy irq_data to match hierarchy irq_domains. The + irq_data is used to store irq_domain pointer and hardware irq + number. +3) The :c:member:`alloc()`, :c:member:`free()`, and other callbacks in + struct irq_domain_ops to support hierarchy irq_domain operations. + +With the support of hierarchy irq_domain and hierarchy irq_data ready, +an irq_domain structure is built for each interrupt controller, and an irq_data structure is allocated for each irq_domain associated with an -IRQ. Now we could go one step further to support stacked(hierarchy) +IRQ. + +For an interrupt controller driver to support hierarchy irq_domain, it +needs to: + +1) Implement irq_domain_ops.alloc() and irq_domain_ops.free() +2) Optionally, implement irq_domain_ops.activate() and + irq_domain_ops.deactivate(). +3) Optionally, implement an irq_chip to manage the interrupt controller + hardware. +4) There is no need to implement irq_domain_ops.map() and + irq_domain_ops.unmap(). They are unused with hierarchy irq_domain. + +Note the hierarchy irq_domain is in no way x86-specific, and is +heavily used to support other architectures, such as ARM, ARM64 etc. + +Stacked irq_chip +~~~~~~~~~~~~~~~~ + +Now, we could go one step further to support stacked (hierarchy) irq_chip. That is, an irq_chip is associated with each irq_data along the hierarchy. A child irq_chip may implement a required action by itself or by cooperating with its parent irq_chip. @@ -276,22 +293,28 @@ with the hardware managed by itself and may ask for services from its parent irq_chip when needed. So we could achieve a much cleaner software architecture. -For an interrupt controller driver to support hierarchy irq_domain, it -needs to: - -1) Implement irq_domain_ops.alloc and irq_domain_ops.free -2) Optionally implement irq_domain_ops.activate and - irq_domain_ops.deactivate. -3) Optionally implement an irq_chip to manage the interrupt controller - hardware. -4) No need to implement irq_domain_ops.map and irq_domain_ops.unmap, - they are unused with hierarchy irq_domain. - -Hierarchy irq_domain is in no way x86 specific, and is heavily used to -support other architectures, such as ARM, ARM64 etc. - Debugging ========= Most of the internals of the IRQ subsystem are exposed in debugfs by turning CONFIG_GENERIC_IRQ_DEBUGFS on. + +Structures and Public Functions Provided +======================================== + +This chapter contains the autogenerated documentation of the structures +and exported kernel API functions which are used for IRQ domains. + +.. kernel-doc:: include/linux/irqdomain.h + +.. kernel-doc:: kernel/irq/irqdomain.c + :export: + +Internal Functions Provided +=========================== + +This chapter contains the autogenerated documentation of the internal +functions. + +.. kernel-doc:: kernel/irq/irqdomain.c + :internal: diff --git a/Documentation/dev-tools/kunit/run_wrapper.rst b/Documentation/dev-tools/kunit/run_wrapper.rst index 19ddf5e07013..6697c71ee8ca 100644 --- a/Documentation/dev-tools/kunit/run_wrapper.rst +++ b/Documentation/dev-tools/kunit/run_wrapper.rst @@ -182,6 +182,8 @@ via UML. To run tests on qemu, by default it requires two flags: is ignored), the tests will run via UML. Non-UML architectures, for example: i386, x86_64, arm and so on; run on qemu. + ``--arch help`` lists all valid ``--arch`` values. + - ``--cross_compile``: Specifies the Kbuild toolchain. It passes the same argument as passed to the ``CROSS_COMPILE`` variable used by Kbuild. As a reminder, this will be the prefix for the toolchain diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst index 22955d56b379..038f480074fd 100644 --- a/Documentation/dev-tools/kunit/usage.rst +++ b/Documentation/dev-tools/kunit/usage.rst @@ -670,28 +670,50 @@ with ``kunit_remove_action``. Testing Static Functions ------------------------ -If we do not want to expose functions or variables for testing, one option is to -conditionally export the used symbol. For example: +If you want to test static functions without exposing those functions outside of +testing, one option is conditionally export the symbol. When KUnit is enabled, +the symbol is exposed but remains static otherwise. To use this method, follow +the template below. .. code-block:: c - /* In my_file.c */ + /* In the file containing functions to test "my_file.c" */ - VISIBLE_IF_KUNIT int do_interesting_thing(); + #include <kunit/visibility.h> + #include <my_file.h> + ... + VISIBLE_IF_KUNIT int do_interesting_thing() + { + ... + } EXPORT_SYMBOL_IF_KUNIT(do_interesting_thing); - /* In my_file.h */ + /* In the header file "my_file.h" */ #if IS_ENABLED(CONFIG_KUNIT) int do_interesting_thing(void); #endif -Alternatively, you could conditionally ``#include`` the test file at the end of -your .c file. For example: + /* In the KUnit test file "my_file_test.c" */ + + #include <kunit/visibility.h> + #include <my_file.h> + ... + MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); + ... + // Use do_interesting_thing() in tests + +For a full example, see this `patch <https://lore.kernel.org/all/20221207014024.340230-3-rmoar@google.com/>`_ +where a test is modified to conditionally expose static functions for testing +using the macros above. + +As an **alternative** to the method above, you could conditionally ``#include`` +the test file at the end of your .c file. This is not recommended but works +if needed. For example: .. code-block:: c - /* In my_file.c */ + /* In "my_file.c" */ static int do_interesting_thing(); diff --git a/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml b/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml new file mode 100644 index 000000000000..32bf3a1c3b42 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/amd,ccp-seattle-v1a.yaml @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/amd,ccp-seattle-v1a.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: AMD Cryptographic Coprocessor (ccp) + +maintainers: + - Tom Lendacky <thomas.lendacky@amd.com> + +properties: + compatible: + const: amd,ccp-seattle-v1a + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + dma-coherent: true + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + crypto@e0100000 { + compatible = "amd,ccp-seattle-v1a"; + reg = <0xe0100000 0x10000>; + interrupts = <0 3 4>; + dma-coherent; + }; diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt deleted file mode 100644 index d87579d63da6..000000000000 --- a/Documentation/devicetree/bindings/crypto/amd-ccp.txt +++ /dev/null @@ -1,17 +0,0 @@ -* AMD Cryptographic Coprocessor driver (ccp) - -Required properties: -- compatible: Should be "amd,ccp-seattle-v1a" -- reg: Address and length of the register set for the device -- interrupts: Should contain the CCP interrupt - -Optional properties: -- dma-coherent: Present if dma operations are coherent - -Example: - ccp@e0100000 { - compatible = "amd,ccp-seattle-v1a"; - reg = <0 0xe0100000 0 0x10000>; - interrupt-parent = <&gic>; - interrupts = <0 3 4>; - }; diff --git a/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt b/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt deleted file mode 100644 index d9cca4875bd6..000000000000 --- a/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt +++ /dev/null @@ -1,16 +0,0 @@ -Axis crypto engine with PDMA interface. - -Required properties: -- compatible : Should be one of the following strings: - "axis,artpec6-crypto" for the version in the Axis ARTPEC-6 SoC - "axis,artpec7-crypto" for the version in the Axis ARTPEC-7 SoC. -- reg: Base address and size for the PDMA register area. -- interrupts: Interrupt handle for the PDMA interrupt line. - -Example: - -crypto@f4264000 { - compatible = "axis,artpec6-crypto"; - reg = <0xf4264000 0x1000>; - interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>; -}; diff --git a/Documentation/devicetree/bindings/crypto/axis,artpec6-crypto.yaml b/Documentation/devicetree/bindings/crypto/axis,artpec6-crypto.yaml new file mode 100644 index 000000000000..c91f81e3c39e --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/axis,artpec6-crypto.yaml @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/axis,artpec6-crypto.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Axis ARTPEC6 crypto engine with PDMA interface + +maintainers: + - Lars Persson <lars.persson@axis.com> + +properties: + compatible: + enum: + - axis,artpec6-crypto + - axis,artpec7-crypto + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + + crypto@f4264000 { + compatible = "axis,artpec6-crypto"; + reg = <0xf4264000 0x1000>; + interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/crypto/brcm,spu-crypto.txt b/Documentation/devicetree/bindings/crypto/brcm,spu-crypto.txt deleted file mode 100644 index 29b6007568eb..000000000000 --- a/Documentation/devicetree/bindings/crypto/brcm,spu-crypto.txt +++ /dev/null @@ -1,22 +0,0 @@ -The Broadcom Secure Processing Unit (SPU) hardware supports symmetric -cryptographic offload for Broadcom SoCs. A SoC may have multiple SPU hardware -blocks. - -Required properties: -- compatible: Should be one of the following: - brcm,spum-crypto - for devices with SPU-M hardware - brcm,spu2-crypto - for devices with SPU2 hardware - brcm,spu2-v2-crypto - for devices with enhanced SPU2 hardware features like SHA3 - and Rabin Fingerprint support - brcm,spum-nsp-crypto - for the Northstar Plus variant of the SPU-M hardware - -- reg: Should contain SPU registers location and length. -- mboxes: The mailbox channel to be used to communicate with the SPU. - Mailbox channels correspond to DMA rings on the device. - -Example: - crypto@612d0000 { - compatible = "brcm,spum-crypto"; - reg = <0 0x612d0000 0 0x900>; - mboxes = <&pdc0 0>; - }; diff --git a/Documentation/devicetree/bindings/crypto/brcm,spum-crypto.yaml b/Documentation/devicetree/bindings/crypto/brcm,spum-crypto.yaml new file mode 100644 index 000000000000..9a5fb61727fa --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/brcm,spum-crypto.yaml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/brcm,spum-crypto.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Broadcom SPU Crypto Offload + +maintainers: + - Rob Rice <rob.rice@broadcom.com> + +description: + The Broadcom Secure Processing Unit (SPU) hardware supports symmetric + cryptographic offload for Broadcom SoCs. A SoC may have multiple SPU hardware + blocks. + +properties: + compatible: + enum: + - brcm,spum-crypto + - brcm,spu2-crypto + - brcm,spu2-v2-crypto # enhanced SPU2 hardware features like SHA3 and Rabin Fingerprint support + - brcm,spum-nsp-crypto # Northstar Plus variant of the SPU-M hardware + + reg: + maxItems: 1 + + mboxes: + maxItems: 1 + +required: + - compatible + - reg + - mboxes + +additionalProperties: false + +examples: + - | + crypto@612d0000 { + compatible = "brcm,spum-crypto"; + reg = <0x612d0000 0x900>; + mboxes = <&pdc0 0>; + }; diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml index f0c4a7c83568..75afa441e019 100644 --- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml +++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml @@ -38,7 +38,9 @@ properties: compatible: oneOf: - items: - - const: fsl,sec-v5.4 + - enum: + - fsl,sec-v5.4 + - fsl,sec-v6.0 - const: fsl,sec-v5.0 - const: fsl,sec-v4.0 - items: @@ -94,6 +96,12 @@ patternProperties: compatible: oneOf: - items: + - const: fsl,sec-v6.0-job-ring + - const: fsl,sec-v5.2-job-ring + - const: fsl,sec-v5.0-job-ring + - const: fsl,sec-v4.4-job-ring + - const: fsl,sec-v4.0-job-ring + - items: - const: fsl,sec-v5.4-job-ring - const: fsl,sec-v5.0-job-ring - const: fsl,sec-v4.0-job-ring diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec6.txt b/Documentation/devicetree/bindings/crypto/fsl-sec6.txt deleted file mode 100644 index 73b0eb950bb3..000000000000 --- a/Documentation/devicetree/bindings/crypto/fsl-sec6.txt +++ /dev/null @@ -1,157 +0,0 @@ -SEC 6 is as Freescale's Cryptographic Accelerator and Assurance Module (CAAM). -Currently Freescale powerpc chip C29X is embedded with SEC 6. -SEC 6 device tree binding include: - -SEC 6 Node - -Job Ring Node - -Full Example - -===================================================================== -SEC 6 Node - -Description - - Node defines the base address of the SEC 6 block. - This block specifies the address range of all global - configuration registers for the SEC 6 block. - For example, In C293, we could see three SEC 6 node. - -PROPERTIES - - - compatible - Usage: required - Value type: <string> - Definition: Must include "fsl,sec-v6.0". - - - fsl,sec-era - Usage: optional - Value type: <u32> - Definition: A standard property. Define the 'ERA' of the SEC - device. - - - #address-cells - Usage: required - Value type: <u32> - Definition: A standard property. Defines the number of cells - for representing physical addresses in child nodes. - - - #size-cells - Usage: required - Value type: <u32> - Definition: A standard property. Defines the number of cells - for representing the size of physical addresses in - child nodes. - - - reg - Usage: required - Value type: <prop-encoded-array> - Definition: A standard property. Specifies the physical - address and length of the SEC 6 configuration registers. - - - ranges - Usage: required - Value type: <prop-encoded-array> - Definition: A standard property. Specifies the physical address - range of the SEC 6.0 register space (-SNVS not included). A - triplet that includes the child address, parent address, & - length. - - Note: All other standard properties (see the Devicetree Specification) - are allowed but are optional. - -EXAMPLE - crypto@a0000 { - compatible = "fsl,sec-v6.0"; - fsl,sec-era = <6>; - #address-cells = <1>; - #size-cells = <1>; - reg = <0xa0000 0x20000>; - ranges = <0 0xa0000 0x20000>; - }; - -===================================================================== -Job Ring (JR) Node - - Child of the crypto node defines data processing interface to SEC 6 - across the peripheral bus for purposes of processing - cryptographic descriptors. The specified address - range can be made visible to one (or more) cores. - The interrupt defined for this node is controlled within - the address range of this node. - - - compatible - Usage: required - Value type: <string> - Definition: Must include "fsl,sec-v6.0-job-ring". - - - reg - Usage: required - Value type: <prop-encoded-array> - Definition: Specifies a two JR parameters: an offset from - the parent physical address and the length the JR registers. - - - interrupts - Usage: required - Value type: <prop_encoded-array> - Definition: Specifies the interrupts generated by this - device. The value of the interrupts property - consists of one interrupt specifier. The format - of the specifier is defined by the binding document - describing the node's interrupt parent. - -EXAMPLE - jr@1000 { - compatible = "fsl,sec-v6.0-job-ring"; - reg = <0x1000 0x1000>; - interrupts = <49 2 0 0>; - }; - -=================================================================== -Full Example - -Since some chips may contain more than one SEC, the dtsi contains -only the node contents, not the node itself. A chip using the SEC -should include the dtsi inside each SEC node. Example: - -In qoriq-sec6.0.dtsi: - - compatible = "fsl,sec-v6.0"; - fsl,sec-era = <6>; - #address-cells = <1>; - #size-cells = <1>; - - jr@1000 { - compatible = "fsl,sec-v6.0-job-ring", - "fsl,sec-v5.2-job-ring", - "fsl,sec-v5.0-job-ring", - "fsl,sec-v4.4-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x1000 0x1000>; - }; - - jr@2000 { - compatible = "fsl,sec-v6.0-job-ring", - "fsl,sec-v5.2-job-ring", - "fsl,sec-v5.0-job-ring", - "fsl,sec-v4.4-job-ring", - "fsl,sec-v4.0-job-ring"; - reg = <0x2000 0x1000>; - }; - -In the C293 device tree, we add the include of public property: - - crypto@a0000 { - /include/ "qoriq-sec6.0.dtsi" - } - - crypto@a0000 { - reg = <0xa0000 0x20000>; - ranges = <0 0xa0000 0x20000>; - - jr@1000 { - interrupts = <49 2 0 0>; - }; - - jr@2000 { - interrupts = <50 2 0 0>; - }; - }; diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml b/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml new file mode 100644 index 000000000000..2bfac9d1c020 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/hisilicon,hip06-sec.yaml @@ -0,0 +1,134 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/hisilicon,hip06-sec.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Hisilicon hip06/hip07 Security Accelerator + +maintainers: + - Jonathan Cameron <Jonathan.Cameron@huawei.com> + +properties: + compatible: + enum: + - hisilicon,hip06-sec + - hisilicon,hip07-sec + + reg: + items: + - description: Registers for backend processing engines + - description: Registers for common functionality + - description: Registers for queue 0 + - description: Registers for queue 1 + - description: Registers for queue 2 + - description: Registers for queue 3 + - description: Registers for queue 4 + - description: Registers for queue 5 + - description: Registers for queue 6 + - description: Registers for queue 7 + - description: Registers for queue 8 + - description: Registers for queue 9 + - description: Registers for queue 10 + - description: Registers for queue 11 + - description: Registers for queue 12 + - description: Registers for queue 13 + - description: Registers for queue 14 + - description: Registers for queue 15 + + interrupts: + items: + - description: SEC unit error queue interrupt + - description: Completion interrupt for queue 0 + - description: Error interrupt for queue 0 + - description: Completion interrupt for queue 1 + - description: Error interrupt for queue 1 + - description: Completion interrupt for queue 2 + - description: Error interrupt for queue 2 + - description: Completion interrupt for queue 3 + - description: Error interrupt for queue 3 + - description: Completion interrupt for queue 4 + - description: Error interrupt for queue 4 + - description: Completion interrupt for queue 5 + - description: Error interrupt for queue 5 + - description: Completion interrupt for queue 6 + - description: Error interrupt for queue 6 + - description: Completion interrupt for queue 7 + - description: Error interrupt for queue 7 + - description: Completion interrupt for queue 8 + - description: Error interrupt for queue 8 + - description: Completion interrupt for queue 9 + - description: Error interrupt for queue 9 + - description: Completion interrupt for queue 10 + - description: Error interrupt for queue 10 + - description: Completion interrupt for queue 11 + - description: Error interrupt for queue 11 + - description: Completion interrupt for queue 12 + - description: Error interrupt for queue 12 + - description: Completion interrupt for queue 13 + - description: Error interrupt for queue 13 + - description: Completion interrupt for queue 14 + - description: Error interrupt for queue 14 + - description: Completion interrupt for queue 15 + - description: Error interrupt for queue 15 + + dma-coherent: true + + iommus: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - dma-coherent + +additionalProperties: false + +examples: + - | + bus { + #address-cells = <2>; + #size-cells = <2>; + + crypto@400d2000000 { + compatible = "hisilicon,hip07-sec"; + reg = <0x400 0xd0000000 0x0 0x10000 + 0x400 0xd2000000 0x0 0x10000 + 0x400 0xd2010000 0x0 0x10000 + 0x400 0xd2020000 0x0 0x10000 + 0x400 0xd2030000 0x0 0x10000 + 0x400 0xd2040000 0x0 0x10000 + 0x400 0xd2050000 0x0 0x10000 + 0x400 0xd2060000 0x0 0x10000 + 0x400 0xd2070000 0x0 0x10000 + 0x400 0xd2080000 0x0 0x10000 + 0x400 0xd2090000 0x0 0x10000 + 0x400 0xd20a0000 0x0 0x10000 + 0x400 0xd20b0000 0x0 0x10000 + 0x400 0xd20c0000 0x0 0x10000 + 0x400 0xd20d0000 0x0 0x10000 + 0x400 0xd20e0000 0x0 0x10000 + 0x400 0xd20f0000 0x0 0x10000 + 0x400 0xd2100000 0x0 0x10000>; + interrupts = <576 4>, + <577 1>, <578 4>, + <579 1>, <580 4>, + <581 1>, <582 4>, + <583 1>, <584 4>, + <585 1>, <586 4>, + <587 1>, <588 4>, + <589 1>, <590 4>, + <591 1>, <592 4>, + <593 1>, <594 4>, + <595 1>, <596 4>, + <597 1>, <598 4>, + <599 1>, <600 4>, + <601 1>, <602 4>, + <603 1>, <604 4>, + <605 1>, <606 4>, + <607 1>, <608 4>; + dma-coherent; + iommus = <&p1_smmu_alg_a 0x600>; + }; + }; diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt deleted file mode 100644 index d28fd1af01b4..000000000000 --- a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt +++ /dev/null @@ -1,67 +0,0 @@ -* Hisilicon hip07 Security Accelerator (SEC) - -Required properties: -- compatible: Must contain one of - - "hisilicon,hip06-sec" - - "hisilicon,hip07-sec" -- reg: Memory addresses and lengths of the memory regions through which - this device is controlled. - Region 0 has registers to control the backend processing engines. - Region 1 has registers for functionality common to all queues. - Regions 2-18 have registers for the 16 individual queues which are isolated - both in hardware and within the driver. -- interrupts: Interrupt specifiers. - Refer to interrupt-controller/interrupts.txt for generic interrupt client node - bindings. - Interrupt 0 is for the SEC unit error queue. - Interrupt 2N + 1 is the completion interrupt for queue N. - Interrupt 2N + 2 is the error interrupt for queue N. -- dma-coherent: The driver assumes coherent dma is possible. - -Optional properties: -- iommus: The SEC units are behind smmu-v3 iommus. - Refer to iommu/arm,smmu-v3.txt for more information. - -Example: - -p1_sec_a: crypto@400d2000000 { - compatible = "hisilicon,hip07-sec"; - reg = <0x400 0xd0000000 0x0 0x10000 - 0x400 0xd2000000 0x0 0x10000 - 0x400 0xd2010000 0x0 0x10000 - 0x400 0xd2020000 0x0 0x10000 - 0x400 0xd2030000 0x0 0x10000 - 0x400 0xd2040000 0x0 0x10000 - 0x400 0xd2050000 0x0 0x10000 - 0x400 0xd2060000 0x0 0x10000 - 0x400 0xd2070000 0x0 0x10000 - 0x400 0xd2080000 0x0 0x10000 - 0x400 0xd2090000 0x0 0x10000 - 0x400 0xd20a0000 0x0 0x10000 - 0x400 0xd20b0000 0x0 0x10000 - 0x400 0xd20c0000 0x0 0x10000 - 0x400 0xd20d0000 0x0 0x10000 - 0x400 0xd20e0000 0x0 0x10000 - 0x400 0xd20f0000 0x0 0x10000 - 0x400 0xd2100000 0x0 0x10000>; - interrupt-parent = <&p1_mbigen_sec_a>; - iommus = <&p1_smmu_alg_a 0x600>; - dma-coherent; - interrupts = <576 4>, - <577 1>, <578 4>, - <579 1>, <580 4>, - <581 1>, <582 4>, - <583 1>, <584 4>, - <585 1>, <586 4>, - <587 1>, <588 4>, - <589 1>, <590 4>, - <591 1>, <592 4>, - <593 1>, <594 4>, - <595 1>, <596 4>, - <597 1>, <598 4>, - <599 1>, <600 4>, - <601 1>, <602 4>, - <603 1>, <604 4>, - <605 1>, <606 4>, - <607 1>, <608 4>; -}; diff --git a/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml b/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml new file mode 100644 index 000000000000..46617561ef94 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/img,hash-accelerator.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/img,hash-accelerator.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Imagination Technologies hardware hash accelerator + +maintainers: + - James Hartley <james.hartley@imgtec.com> + +description: + The hash accelerator provides hardware hashing acceleration for + SHA1, SHA224, SHA256 and MD5 hashes. + +properties: + compatible: + const: img,hash-accelerator + + reg: + items: + - description: Register base address and size + - description: DMA port specifier + + interrupts: + maxItems: 1 + + dmas: + maxItems: 1 + + dma-names: + items: + - const: tx + + clocks: + items: + - description: System clock for hash block registers + - description: Hash clock for data path + + clock-names: + items: + - const: sys + - const: hash + +additionalProperties: false + +required: + - compatible + - reg + - interrupts + - dmas + - dma-names + - clocks + - clock-names + +examples: + - | + #include <dt-bindings/interrupt-controller/mips-gic.h> + #include <dt-bindings/clock/pistachio-clk.h> + + hash@18149600 { + compatible = "img,hash-accelerator"; + reg = <0x18149600 0x100>, <0x18101100 0x4>; + interrupts = <GIC_SHARED 59 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&dma 8 0xffffffff 0>; + dma-names = "tx"; + clocks = <&cr_periph SYS_CLK_HASH>, <&clk_periph PERIPH_CLK_ROM>; + clock-names = "sys", "hash"; + }; diff --git a/Documentation/devicetree/bindings/crypto/img-hash.txt b/Documentation/devicetree/bindings/crypto/img-hash.txt deleted file mode 100644 index 91a3d757d641..000000000000 --- a/Documentation/devicetree/bindings/crypto/img-hash.txt +++ /dev/null @@ -1,27 +0,0 @@ -Imagination Technologies hardware hash accelerator - -The hash accelerator provides hardware hashing acceleration for -SHA1, SHA224, SHA256 and MD5 hashes - -Required properties: - -- compatible : "img,hash-accelerator" -- reg : Offset and length of the register set for the module, and the DMA port -- interrupts : The designated IRQ line for the hashing module. -- dmas : DMA specifier as per Documentation/devicetree/bindings/dma/dma.txt -- dma-names : Should be "tx" -- clocks : Clock specifiers -- clock-names : "sys" Used to clock the hash block registers - "hash" Used to clock data through the accelerator - -Example: - - hash: hash@18149600 { - compatible = "img,hash-accelerator"; - reg = <0x18149600 0x100>, <0x18101100 0x4>; - interrupts = <GIC_SHARED 59 IRQ_TYPE_LEVEL_HIGH>; - dmas = <&dma 8 0xffffffff 0>; - dma-names = "tx"; - clocks = <&cr_periph SYS_CLK_HASH>, <&clk_periph PERIPH_CLK_ROM>; - clock-names = "sys", "hash"; - }; diff --git a/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml b/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml new file mode 100644 index 000000000000..b44d36c50ec4 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/marvell,orion-crypto.yaml @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/marvell,orion-crypto.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Marvell Cryptographic Engines And Security Accelerator + +maintainers: + - Andrew Lunn <andrew@lunn.ch> + - Boris Brezillon <bbrezillon@kernel.org> + +description: | + Marvell Cryptographic Engines And Security Accelerator + +properties: + compatible: + enum: + - marvell,armada-370-crypto + - marvell,armada-xp-crypto + - marvell,armada-375-crypto + - marvell,armada-38x-crypto + - marvell,dove-crypto + - marvell,kirkwood-crypto + - marvell,orion-crypto + + reg: + minItems: 1 + items: + - description: Registers region + - description: SRAM region + deprecated: true + + reg-names: + minItems: 1 + items: + - const: regs + - const: sram + deprecated: true + + interrupts: + description: One interrupt for each CESA engine + minItems: 1 + maxItems: 2 + + clocks: + description: One or two clocks for each CESA engine + minItems: 1 + maxItems: 4 + + clock-names: + minItems: 1 + items: + - const: cesa0 + - const: cesa1 + - const: cesaz0 + - const: cesaz1 + + marvell,crypto-srams: + description: Phandle(s) to crypto SRAM. + $ref: /schemas/types.yaml#/definitions/phandle-array + minItems: 1 + maxItems: 2 + items: + maxItems: 1 + + marvell,crypto-sram-size: + description: SRAM size reserved for crypto operations. + $ref: /schemas/types.yaml#/definitions/uint32 + default: 0x800 + +required: + - compatible + - reg + - reg-names + - interrupts + - marvell,crypto-srams + +allOf: + - if: + not: + properties: + compatible: + enum: + - marvell,kirkwood-crypto + - marvell,orion-crypto + then: + required: + - clocks + - if: + properties: + compatible: + contains: + enum: + - marvell,armada-370-crypto + - marvell,armada-375-crypto + - marvell,armada-38x-crypto + - marvell,armada-xp-crypto + then: + required: + - clock-names + - if: + properties: + compatible: + contains: + enum: + - marvell,armada-375-crypto + - marvell,armada-38x-crypto + then: + properties: + clocks: + minItems: 4 + clock-names: + minItems: 4 + else: + properties: + clocks: + maxItems: 2 + clock-names: + maxItems: 2 + +additionalProperties: false + +examples: + - | + crypto@30000 { + compatible = "marvell,orion-crypto"; + reg = <0x30000 0x10000>; + reg-names = "regs"; + interrupts = <22>; + marvell,crypto-srams = <&crypto_sram>; + marvell,crypto-sram-size = <0x600>; + }; diff --git a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt deleted file mode 100644 index 28d3f2496b89..000000000000 --- a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt +++ /dev/null @@ -1,44 +0,0 @@ -Marvell Cryptographic Engines And Security Accelerator - -Required properties: -- compatible: should be one of the following string - "marvell,orion-crypto" - "marvell,kirkwood-crypto" - "marvell,dove-crypto" - "marvell,armada-370-crypto" - "marvell,armada-xp-crypto" - "marvell,armada-375-crypto" - "marvell,armada-38x-crypto" -- reg: base physical address of the engine and length of memory mapped - region. Can also contain an entry for the SRAM attached to the CESA, - but this representation is deprecated and marvell,crypto-srams should - be used instead -- reg-names: "regs". Can contain an "sram" entry, but this representation - is deprecated and marvell,crypto-srams should be used instead -- interrupts: interrupt number -- clocks: reference to the crypto engines clocks. This property is not - required for orion and kirkwood platforms -- clock-names: "cesaX" and "cesazX", X should be replaced by the crypto engine - id. - This property is not required for the orion and kirkwoord - platforms. - "cesazX" clocks are not required on armada-370 platforms -- marvell,crypto-srams: phandle to crypto SRAM definitions - -Optional properties: -- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not - specified the whole SRAM is used (2KB) - - -Examples: - - crypto@90000 { - compatible = "marvell,armada-xp-crypto"; - reg = <0x90000 0x10000>; - reg-names = "regs"; - interrupts = <48>, <49>; - clocks = <&gateclk 23>, <&gateclk 23>; - clock-names = "cesa0", "cesa1"; - marvell,crypto-srams = <&crypto_sram0>, <&crypto_sram1>; - marvell,crypto-sram-size = <0x600>; - }; diff --git a/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt b/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt deleted file mode 100644 index 450da3661cad..000000000000 --- a/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt +++ /dev/null @@ -1,25 +0,0 @@ -MediaTek cryptographic accelerators - -Required properties: -- compatible: Should be "mediatek,eip97-crypto" -- reg: Address and length of the register set for the device -- interrupts: Should contain the five crypto engines interrupts in numeric - order. These are global system and four descriptor rings. -- clocks: the clock used by the core -- clock-names: Must contain "cryp". -- power-domains: Must contain a reference to the PM domain. - - -Example: - crypto: crypto@1b240000 { - compatible = "mediatek,eip97-crypto"; - reg = <0 0x1b240000 0 0x20000>; - interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>, - <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>, - <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>, - <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>, - <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>; - clocks = <ðsys CLK_ETHSYS_CRYPTO>; - clock-names = "cryp"; - power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; - }; diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt deleted file mode 100644 index d9b92e2f3138..000000000000 --- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt +++ /dev/null @@ -1,32 +0,0 @@ -Marvell Cryptographic Engines And Security Accelerator - -Required properties: -- compatible: should be one of the following string - "marvell,orion-crypto" - "marvell,kirkwood-crypto" - "marvell,dove-crypto" -- reg: base physical address of the engine and length of memory mapped - region. Can also contain an entry for the SRAM attached to the CESA, - but this representation is deprecated and marvell,crypto-srams should - be used instead -- reg-names: "regs". Can contain an "sram" entry, but this representation - is deprecated and marvell,crypto-srams should be used instead -- interrupts: interrupt number -- clocks: reference to the crypto engines clocks. This property is only - required for Dove platforms -- marvell,crypto-srams: phandle to crypto SRAM definitions - -Optional properties: -- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not - specified the whole SRAM is used (2KB) - -Examples: - - crypto@30000 { - compatible = "marvell,orion-crypto"; - reg = <0x30000 0x10000>; - reg-names = "regs"; - interrupts = <22>; - marvell,crypto-srams = <&crypto_sram>; - marvell,crypto-sram-size = <0x600>; - }; diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml index 3f35122f7873..e009cb712fb8 100644 --- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml @@ -45,6 +45,7 @@ properties: - items: - enum: + - qcom,qcs615-qce - qcom,qcs8300-qce - qcom,sa8775p-qce - qcom,sc7280-qce diff --git a/Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml new file mode 100644 index 000000000000..5536319c49c3 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/econet,en751221-intc.yaml @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/econet,en751221-intc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: EcoNet EN751221 Interrupt Controller + +maintainers: + - Caleb James DeLisle <cjd@cjdns.fr> + +description: + The EcoNet EN751221 Interrupt Controller is a simple interrupt controller + designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can + be routed to either VPE but not both, so to support per-CPU interrupts, a + secondary IRQ number is allocated to control masking/unmasking on VPE#1. For + lack of a better term we call these "shadow interrupts". The assignment of + shadow interrupts is defined by the SoC integrator when wiring the interrupt + lines, so they are configurable in the device tree. + +allOf: + - $ref: /schemas/interrupt-controller.yaml# + +properties: + compatible: + const: econet,en751221-intc + + reg: + maxItems: 1 + + "#interrupt-cells": + const: 1 + + interrupt-controller: true + + interrupts: + maxItems: 1 + description: Interrupt line connecting this controller to its parent. + + econet,shadow-interrupts: + $ref: /schemas/types.yaml#/definitions/uint32-matrix + description: + An array of interrupt number pairs where each pair represents a shadow + interrupt relationship. The first number in each pair is the primary IRQ, + and the second is its shadow IRQ used for VPE#1 control. For example, + <8 3> means IRQ 8 is shadowed by IRQ 3, so IRQ 3 cannot be mapped, but + when VPE#1 requests IRQ 8, it will manipulate the IRQ 3 mask bit. + minItems: 1 + maxItems: 20 + items: + items: + - description: primary per-CPU IRQ + - description: shadow IRQ number + +required: + - compatible + - reg + - interrupt-controller + - "#interrupt-cells" + - interrupts + +additionalProperties: false + +examples: + - | + interrupt-controller@1fb40000 { + compatible = "econet,en751221-intc"; + reg = <0x1fb40000 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + + econet,shadow-interrupts = <7 2>, <8 3>, <13 12>, <30 29>; + }; +... diff --git a/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml b/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml index e1ffd55fa7bf..f6b8b1d92f79 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml @@ -18,7 +18,9 @@ allOf: properties: compatible: - const: sophgo,sg2042-msi + enum: + - sophgo,sg2042-msi + - sophgo,sg2044-msi reg: items: diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml index e0ec53bc10c6..1525a50ded47 100644 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/can/microchip,mcp2510.yaml# +$id: http://devicetree.org/schemas/net/can/microchip,mcp2510.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Microchip MCP251X stand-alone CAN controller diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml index f75000e3093d..214caa4ec3d5 100644 --- a/Documentation/devicetree/bindings/pci/pci-ep.yaml +++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml @@ -17,6 +17,24 @@ properties: $nodename: pattern: "^pcie-ep@" + iommu-map: + $ref: /schemas/types.yaml#/definitions/uint32-matrix + items: + items: + - description: Device ID (see msi-map) base + maximum: 0x7ffff + - description: phandle to IOMMU + - description: IOMMU specifier base (currently always 1 cell) + - description: Number of Device IDs + maximum: 0x80000 + + iommu-map-mask: + description: + A mask to be applied to each Device ID prior to being mapped to an + IOMMU specifier per the iommu-map property. + $ref: /schemas/types.yaml#/definitions/uint32 + maximum: 0x7ffff + max-functions: description: Maximum number of functions that can be configured $ref: /schemas/types.yaml#/definitions/uint8 @@ -35,6 +53,56 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32 enum: [ 1, 2, 3, 4 ] + msi-map: + description: | + Maps a Device ID to an MSI and associated MSI specifier data. + + A PCI Endpoint (EP) can use MSI as a doorbell function. This is achieved by + mapping the MSI controller's address into PCI BAR<n>. The PCI Root Complex + can write to this BAR<n>, triggering the EP to generate IRQ. This notifies + the EP-side driver of an event, eliminating the need for the driver to + continuously poll for status changes. + + However, the EP cannot rely on Requester ID (RID) because the RID is + determined by the PCI topology of the host system. Since the EP may be + connected to different PCI hosts, the RID can vary between systems and is + therefore not a reliable identifier. + + Each EP can support up to 8 physical functions and up to 65,536 virtual + functions. To uniquely identify each child device, a device ID is defined + as + - Bits [2:0] for the function number (func) + - Bits [18:3] for the virtual function index (vfunc) + + The resulting device ID is computed as: + + (func & 0x7) | (vfunc << 3) + + The property is an arbitrary number of tuples of + (device-id-base, msi, msi-base,length). + + Any Device ID id in the interval [id-base, id-base + length) is + associated with the listed MSI, with the MSI specifier + (id - id-base + msi-base). + $ref: /schemas/types.yaml#/definitions/uint32-matrix + items: + items: + - description: The Device ID base matched by the entry + maximum: 0x7ffff + - description: phandle to msi-controller node + - description: (optional) The msi-specifier produced for the first + Device ID matched by the entry. Currently, msi-specifier is 0 or + 1 cells. + - description: The length of consecutive Device IDs following the + Device ID base + maximum: 0x80000 + + msi-map-mask: + description: A mask to be applied to each Device ID prior to being + mapped to an msi-specifier per the msi-map property. + $ref: /schemas/types.yaml#/definitions/uint32 + maximum: 0x7ffff + num-lanes: description: maximum number of lanes $ref: /schemas/types.yaml#/definitions/uint32 diff --git a/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml b/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml index ca71b400bcae..fcc5be80142d 100644 --- a/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml +++ b/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml @@ -4,9 +4,9 @@ $id: http://devicetree.org/schemas/rng/rockchip,rk3588-rng.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Rockchip RK3588 TRNG +title: Rockchip RK3576/RK3588 TRNG -description: True Random Number Generator on Rockchip RK3588 SoC +description: True Random Number Generator on Rockchip RK3576/RK3588 SoCs maintainers: - Nicolas Frattaroli <nicolas.frattaroli@collabora.com> @@ -14,6 +14,7 @@ maintainers: properties: compatible: enum: + - rockchip,rk3576-rng - rockchip,rk3588-rng reg: diff --git a/Documentation/driver-api/early-userspace/buffer-format.rst b/Documentation/driver-api/early-userspace/buffer-format.rst index 7f74e301fdf3..726bfa2fe70d 100644 --- a/Documentation/driver-api/early-userspace/buffer-format.rst +++ b/Documentation/driver-api/early-userspace/buffer-format.rst @@ -4,20 +4,18 @@ initramfs buffer format Al Viro, H. Peter Anvin -Last revision: 2002-01-13 - -Starting with kernel 2.5.x, the old "initial ramdisk" protocol is -getting {replaced/complemented} with the new "initial ramfs" -(initramfs) protocol. The initramfs contents is passed using the same -memory buffer protocol used by the initrd protocol, but the contents +With kernel 2.5.x, the old "initial ramdisk" protocol was complemented +with an "initial ramfs" protocol. The initramfs content is passed +using the same memory buffer protocol used by initrd, but the content is different. The initramfs buffer contains an archive which is -expanded into a ramfs filesystem; this document details the format of -the initramfs buffer format. +expanded into a ramfs filesystem; this document details the initramfs +buffer format. The initramfs buffer format is based around the "newc" or "crc" CPIO formats, and can be created with the cpio(1) utility. The cpio -archive can be compressed using gzip(1). One valid version of an -initramfs buffer is thus a single .cpio.gz file. +archive can be compressed using gzip(1), or any other algorithm provided +via CONFIG_DECOMPRESS_*. One valid version of an initramfs buffer is +thus a single .cpio.gz file. The full format of the initramfs buffer is defined by the following grammar, where:: @@ -25,12 +23,20 @@ grammar, where:: * is used to indicate "0 or more occurrences of" (|) indicates alternatives + indicates concatenation - GZIP() indicates the gzip(1) of the operand + GZIP() indicates gzip compression of the operand + BZIP2() indicates bzip2 compression of the operand + LZMA() indicates lzma compression of the operand + XZ() indicates xz compression of the operand + LZO() indicates lzo compression of the operand + LZ4() indicates lz4 compression of the operand + ZSTD() indicates zstd compression of the operand ALGN(n) means padding with null bytes to an n-byte boundary - initramfs := ("\0" | cpio_archive | cpio_gzip_archive)* + initramfs := ("\0" | cpio_archive | cpio_compressed_archive)* - cpio_gzip_archive := GZIP(cpio_archive) + cpio_compressed_archive := (GZIP(cpio_archive) | BZIP2(cpio_archive) + | LZMA(cpio_archive) | XZ(cpio_archive) | LZO(cpio_archive) + | LZ4(cpio_archive) | ZSTD(cpio_archive)) cpio_archive := cpio_file* + (<nothing> | cpio_trailer) @@ -75,6 +81,8 @@ c_chksum 8 bytes Checksum of data field if c_magic is 070702; The c_mode field matches the contents of st_mode returned by stat(2) on Linux, and encodes the file type and file permissions. +c_mtime is ignored unless CONFIG_INITRAMFS_PRESERVE_MTIME=y is set. + The c_filesize should be zero for any file which is not a regular file or symlink. diff --git a/Documentation/filesystems/bcachefs/casefolding.rst b/Documentation/filesystems/bcachefs/casefolding.rst index ba5de97d155f..871a38f557e8 100644 --- a/Documentation/filesystems/bcachefs/casefolding.rst +++ b/Documentation/filesystems/bcachefs/casefolding.rst @@ -88,3 +88,21 @@ This would fail if negative dentry's were cached. This is slightly suboptimal, but could be fixed in future with some vfs work. + +References +---------- + +(from Peter Anvin, on the list) + +It is worth noting that Microsoft has basically declared their +"recommended" case folding (upcase) table to be permanently frozen (for +new filesystem instances in the case where they use an on-disk +translation table created at format time.) As far as I know they have +never supported anything other than 1:1 conversion of BMP code points, +nor normalization. + +The exFAT specification enumerates the full recommended upcase table, +although in a somewhat annoying format (basically a hex dump of +compressed data): + +https://learn.microsoft.com/en-us/windows/win32/fileio/exfat-specification diff --git a/Documentation/filesystems/bcachefs/future/idle_work.rst b/Documentation/filesystems/bcachefs/future/idle_work.rst new file mode 100644 index 000000000000..59a332509dcd --- /dev/null +++ b/Documentation/filesystems/bcachefs/future/idle_work.rst @@ -0,0 +1,78 @@ +Idle/background work classes design doc: + +Right now, our behaviour at idle isn't ideal, it was designed for servers that +would be under sustained load, to keep pending work at a "medium" level, to +let work build up so we can process it in more efficient batches, while also +giving headroom for bursts in load. + +But for desktops or mobile - scenarios where work is less sustained and power +usage is more important - we want to operate differently, with a "rush to +idle" so the system can go to sleep. We don't want to be dribbling out +background work while the system should be idle. + +The complicating factor is that there are a number of background tasks, which +form a heirarchy (or a digraph, depending on how you divide it up) - one +background task may generate work for another. + +Thus proper idle detection needs to model this heirarchy. + +- Foreground writes +- Page cache writeback +- Copygc, rebalance +- Journal reclaim + +When we implement idle detection and rush to idle, we need to be careful not +to disturb too much the existing behaviour that works reasonably well when the +system is under sustained load (or perhaps improve it in the case of +rebalance, which currently does not actively attempt to let work batch up). + +SUSTAINED LOAD REGIME +--------------------- + +When the system is under continuous load, we want these jobs to run +continuously - this is perhaps best modelled with a P/D controller, where +they'll be trying to keep a target value (i.e. fragmented disk space, +available journal space) roughly in the middle of some range. + +The goal under sustained load is to balance our ability to handle load spikes +without running out of x resource (free disk space, free space in the +journal), while also letting some work accumululate to be batched (or become +unnecessary). + +For example, we don't want to run copygc too aggressively, because then it +will be evacuating buckets that would have become empty (been overwritten or +deleted) anyways, and we don't want to wait until we're almost out of free +space because then the system will behave unpredicably - suddenly we're doing +a lot more work to service each write and the system becomes much slower. + +IDLE REGIME +----------- + +When the system becomes idle, we should start flushing our pending work +quicker so the system can go to sleep. + +Note that the definition of "idle" depends on where in the heirarchy a task +is - a task should start flushing work more quickly when the task above it has +stopped generating new work. + +e.g. rebalance should start flushing more quickly when page cache writeback is +idle, and journal reclaim should only start flushing more quickly when both +copygc and rebalance are idle. + +It's important to let work accumulate when more work is still incoming and we +still have room, because flushing is always more efficient if we let it batch +up. New writes may overwrite data before rebalance moves it, and tasks may be +generating more updates for the btree nodes that journal reclaim needs to flush. + +On idle, how much work we do at each interval should be proportional to the +length of time we have been idle for. If we're idle only for a short duration, +we shouldn't flush everything right away; the system might wake up and start +generating new work soon, and flushing immediately might end up doing a lot of +work that would have been unnecessary if we'd allowed things to batch more. + +To summarize, we will need: + + - A list of classes for background tasks that generate work, which will + include one "foreground" class. + - Tracking for each class - "Am I doing work, or have I gone to sleep?" + - And each class should check the class above it when deciding how much work to issue. diff --git a/Documentation/filesystems/bcachefs/index.rst b/Documentation/filesystems/bcachefs/index.rst index 3864d0ae89c1..e5c4c2120b93 100644 --- a/Documentation/filesystems/bcachefs/index.rst +++ b/Documentation/filesystems/bcachefs/index.rst @@ -29,3 +29,10 @@ At this moment, only a few of these are described here. casefolding errorcodes + +Future design +------------- +.. toctree:: + :maxdepth: 1 + + future/idle_work diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index c293f8e37468..7ddb235aee9d 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -128,6 +128,7 @@ device=%s Specify a path to an extra device to be used together. fsid=%s Specify a filesystem image ID for Fscache back-end. domain_id=%s Specify a domain ID in fscache mode so that different images with the same blobs under a given domain ID can share storage. +fsoffset=%llu Specify block-aligned filesystem offset for the primary device. =================== ========================================================= Sysfs Entries diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index e80329908549..29e84d125e02 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -70,7 +70,7 @@ Online attacks -------------- fscrypt (and storage encryption in general) can only provide limited -protection, if any at all, against online attacks. In detail: +protection against online attacks. In detail: Side-channel attacks ~~~~~~~~~~~~~~~~~~~~ @@ -99,16 +99,23 @@ Therefore, any encryption-specific access control checks would merely be enforced by kernel *code* and therefore would be largely redundant with the wide variety of access control mechanisms already available.) -Kernel memory compromise -~~~~~~~~~~~~~~~~~~~~~~~~ +Read-only kernel memory compromise +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Unless `hardware-wrapped keys`_ are used, an attacker who gains the +ability to read from arbitrary kernel memory, e.g. by mounting a +physical attack or by exploiting a kernel security vulnerability, can +compromise all fscrypt keys that are currently in-use. This also +extends to cold boot attacks; if the system is suddenly powered off, +keys the system was using may remain in memory for a short time. -An attacker who compromises the system enough to read from arbitrary -memory, e.g. by mounting a physical attack or by exploiting a kernel -security vulnerability, can compromise all encryption keys that are -currently in use. +However, if hardware-wrapped keys are used, then the fscrypt master +keys and file contents encryption keys (but not other types of fscrypt +subkeys such as filenames encryption keys) are protected from +compromises of arbitrary kernel memory. -However, fscrypt allows encryption keys to be removed from the kernel, -which may protect them from later compromise. +In addition, fscrypt allows encryption keys to be removed from the +kernel, which may protect them from later compromise. In more detail, the FS_IOC_REMOVE_ENCRYPTION_KEY ioctl (or the FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS ioctl) can wipe a master @@ -144,6 +151,24 @@ However, these ioctls have some limitations: accelerator hardware (if used by the crypto API to implement any of the algorithms), or in other places not explicitly considered here. +Full system compromise +~~~~~~~~~~~~~~~~~~~~~~ + +An attacker who gains "root" access and/or the ability to execute +arbitrary kernel code can freely exfiltrate data that is protected by +any in-use fscrypt keys. Thus, usually fscrypt provides no meaningful +protection in this scenario. (Data that is protected by a key that is +absent throughout the entire attack remains protected, modulo the +limitations of key removal mentioned above in the case where the key +was removed prior to the attack.) + +However, if `hardware-wrapped keys`_ are used, such attackers will be +unable to exfiltrate the master keys or file contents keys in a form +that will be usable after the system is powered off. This may be +useful if the attacker is significantly time-limited and/or +bandwidth-limited, so they can only exfiltrate some data and need to +rely on a later offline attack to exfiltrate the rest of it. + Limitations of v1 policies ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -170,6 +195,10 @@ policies on all new encrypted directories. Key hierarchy ============= +Note: this section assumes the use of raw keys rather than +hardware-wrapped keys. The use of hardware-wrapped keys modifies the +key hierarchy slightly. For details, see `Hardware-wrapped keys`_. + Master Keys ----------- @@ -832,7 +861,9 @@ a pointer to struct fscrypt_add_key_arg, defined as follows:: struct fscrypt_key_specifier key_spec; __u32 raw_size; __u32 key_id; - __u32 __reserved[8]; + #define FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001 + __u32 flags; + __u32 __reserved[7]; __u8 raw[]; }; @@ -851,7 +882,7 @@ a pointer to struct fscrypt_add_key_arg, defined as follows:: struct fscrypt_provisioning_key_payload { __u32 type; - __u32 __reserved; + __u32 flags; __u8 raw[]; }; @@ -879,24 +910,32 @@ as follows: Alternatively, if ``key_id`` is nonzero, this field must be 0, since in that case the size is implied by the specified Linux keyring key. -- ``key_id`` is 0 if the raw key is given directly in the ``raw`` - field. Otherwise ``key_id`` is the ID of a Linux keyring key of - type "fscrypt-provisioning" whose payload is - struct fscrypt_provisioning_key_payload whose ``raw`` field contains - the raw key and whose ``type`` field matches ``key_spec.type``. - Since ``raw`` is variable-length, the total size of this key's - payload must be ``sizeof(struct fscrypt_provisioning_key_payload)`` - plus the raw key size. The process must have Search permission on - this key. - - Most users should leave this 0 and specify the raw key directly. - The support for specifying a Linux keyring key is intended mainly to +- ``key_id`` is 0 if the key is given directly in the ``raw`` field. + Otherwise ``key_id`` is the ID of a Linux keyring key of type + "fscrypt-provisioning" whose payload is struct + fscrypt_provisioning_key_payload whose ``raw`` field contains the + key, whose ``type`` field matches ``key_spec.type``, and whose + ``flags`` field matches ``flags``. Since ``raw`` is + variable-length, the total size of this key's payload must be + ``sizeof(struct fscrypt_provisioning_key_payload)`` plus the number + of key bytes. The process must have Search permission on this key. + + Most users should leave this 0 and specify the key directly. The + support for specifying a Linux keyring key is intended mainly to allow re-adding keys after a filesystem is unmounted and re-mounted, - without having to store the raw keys in userspace memory. + without having to store the keys in userspace memory. + +- ``flags`` contains optional flags from ``<linux/fscrypt.h>``: + + - FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED: This denotes that the key is a + hardware-wrapped key. See `Hardware-wrapped keys`_. This flag + can't be used if FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR is used. - ``raw`` is a variable-length field which must contain the actual key, ``raw_size`` bytes long. Alternatively, if ``key_id`` is - nonzero, then this field is unused. + nonzero, then this field is unused. Note that despite being named + ``raw``, if FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED is specified then it + will contain a wrapped key, not a raw key. For v2 policy keys, the kernel keeps track of which user (identified by effective user ID) added the key, and only allows the key to be @@ -908,8 +947,8 @@ prevent that other user from unexpectedly removing it. Therefore, FS_IOC_ADD_ENCRYPTION_KEY may also be used to add a v2 policy key *again*, even if it's already added by other user(s). In this case, FS_IOC_ADD_ENCRYPTION_KEY will just install a claim to the key for the -current user, rather than actually add the key again (but the raw key -must still be provided, as a proof of knowledge). +current user, rather than actually add the key again (but the key must +still be provided, as a proof of knowledge). FS_IOC_ADD_ENCRYPTION_KEY returns 0 if either the key or a claim to the key was either added or already exists. @@ -918,20 +957,23 @@ FS_IOC_ADD_ENCRYPTION_KEY can fail with the following errors: - ``EACCES``: FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR was specified, but the caller does not have the CAP_SYS_ADMIN capability in the initial - user namespace; or the raw key was specified by Linux key ID but the + user namespace; or the key was specified by Linux key ID but the process lacks Search permission on the key. +- ``EBADMSG``: invalid hardware-wrapped key - ``EDQUOT``: the key quota for this user would be exceeded by adding the key - ``EINVAL``: invalid key size or key specifier type, or reserved bits were set -- ``EKEYREJECTED``: the raw key was specified by Linux key ID, but the - key has the wrong type -- ``ENOKEY``: the raw key was specified by Linux key ID, but no key - exists with that ID +- ``EKEYREJECTED``: the key was specified by Linux key ID, but the key + has the wrong type +- ``ENOKEY``: the key was specified by Linux key ID, but no key exists + with that ID - ``ENOTTY``: this type of filesystem does not implement encryption - ``EOPNOTSUPP``: the kernel was not configured with encryption support for this filesystem, or the filesystem superblock has not - had encryption enabled on it + had encryption enabled on it; or a hardware wrapped key was specified + but the filesystem does not support inline encryption or the hardware + does not support hardware-wrapped keys Legacy method ~~~~~~~~~~~~~ @@ -994,9 +1036,8 @@ or removed by non-root users. These ioctls don't work on keys that were added via the legacy process-subscribed keyrings mechanism. -Before using these ioctls, read the `Kernel memory compromise`_ -section for a discussion of the security goals and limitations of -these ioctls. +Before using these ioctls, read the `Online attacks`_ section for a +discussion of the security goals and limitations of these ioctls. FS_IOC_REMOVE_ENCRYPTION_KEY ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1316,7 +1357,8 @@ inline encryption hardware doesn't have the needed crypto capabilities (e.g. support for the needed encryption algorithm and data unit size) and where blk-crypto-fallback is unusable. (For blk-crypto-fallback to be usable, it must be enabled in the kernel configuration with -CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y.) +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y, and the file must be +protected by a raw key rather than a hardware-wrapped key.) Currently fscrypt always uses the filesystem block size (which is usually 4096 bytes) as the data unit size. Therefore, it can only use @@ -1324,7 +1366,76 @@ inline encryption hardware that supports that data unit size. Inline encryption doesn't affect the ciphertext or other aspects of the on-disk format, so users may freely switch back and forth between -using "inlinecrypt" and not using "inlinecrypt". +using "inlinecrypt" and not using "inlinecrypt". An exception is that +files that are protected by a hardware-wrapped key can only be +encrypted/decrypted by the inline encryption hardware and therefore +can only be accessed when the "inlinecrypt" mount option is used. For +more information about hardware-wrapped keys, see below. + +Hardware-wrapped keys +--------------------- + +fscrypt supports using *hardware-wrapped keys* when the inline +encryption hardware supports it. Such keys are only present in kernel +memory in wrapped (encrypted) form; they can only be unwrapped +(decrypted) by the inline encryption hardware and are temporally bound +to the current boot. This prevents the keys from being compromised if +kernel memory is leaked. This is done without limiting the number of +keys that can be used and while still allowing the execution of +cryptographic tasks that are tied to the same key but can't use inline +encryption hardware, e.g. filenames encryption. + +Note that hardware-wrapped keys aren't specific to fscrypt; they are a +block layer feature (part of *blk-crypto*). For more details about +hardware-wrapped keys, see the block layer documentation at +:ref:`Documentation/block/inline-encryption.rst +<hardware_wrapped_keys>`. The rest of this section just focuses on +the details of how fscrypt can use hardware-wrapped keys. + +fscrypt supports hardware-wrapped keys by allowing the fscrypt master +keys to be hardware-wrapped keys as an alternative to raw keys. To +add a hardware-wrapped key with `FS_IOC_ADD_ENCRYPTION_KEY`_, +userspace must specify FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED in the +``flags`` field of struct fscrypt_add_key_arg and also in the +``flags`` field of struct fscrypt_provisioning_key_payload when +applicable. The key must be in ephemerally-wrapped form, not +long-term wrapped form. + +Some limitations apply. First, files protected by a hardware-wrapped +key are tied to the system's inline encryption hardware. Therefore +they can only be accessed when the "inlinecrypt" mount option is used, +and they can't be included in portable filesystem images. Second, +currently the hardware-wrapped key support is only compatible with +`IV_INO_LBLK_64 policies`_ and `IV_INO_LBLK_32 policies`_, as it +assumes that there is just one file contents encryption key per +fscrypt master key rather than one per file. Future work may address +this limitation by passing per-file nonces down the storage stack to +allow the hardware to derive per-file keys. + +Implementation-wise, to encrypt/decrypt the contents of files that are +protected by a hardware-wrapped key, fscrypt uses blk-crypto, +attaching the hardware-wrapped key to the bio crypt contexts. As is +the case with raw keys, the block layer will program the key into a +keyslot when it isn't already in one. However, when programming a +hardware-wrapped key, the hardware doesn't program the given key +directly into a keyslot but rather unwraps it (using the hardware's +ephemeral wrapping key) and derives the inline encryption key from it. +The inline encryption key is the key that actually gets programmed +into a keyslot, and it is never exposed to software. + +However, fscrypt doesn't just do file contents encryption; it also +uses its master keys to derive filenames encryption keys, key +identifiers, and sometimes some more obscure types of subkeys such as +dirhash keys. So even with file contents encryption out of the +picture, fscrypt still needs a raw key to work with. To get such a +key from a hardware-wrapped key, fscrypt asks the inline encryption +hardware to derive a cryptographically isolated "software secret" from +the hardware-wrapped key. fscrypt uses this "software secret" to key +its KDF to derive all subkeys other than file contents keys. + +Note that this implies that the hardware-wrapped key feature only +protects the file contents encryption keys. It doesn't protect other +fscrypt subkeys such as filenames encryption keys. Direct I/O support ================== @@ -1409,7 +1520,7 @@ read the ciphertext into the page cache and decrypt it in-place. The folio lock must be held until decryption has finished, to prevent the folio from becoming visible to userspace prematurely. -For the write path (->writepage()) of regular files, filesystems +For the write path (->writepages()) of regular files, filesystems cannot encrypt data in-place in the page cache, since the cached plaintext must be preserved. Instead, filesystems must encrypt into a temporary buffer or "bounce page", then write out the temporary diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst index e29651a42eec..f2df9b6df988 100644 --- a/Documentation/filesystems/iomap/design.rst +++ b/Documentation/filesystems/iomap/design.rst @@ -243,13 +243,25 @@ The fields are as follows: regular file data. This is only useful for FIEMAP. - * **IOMAP_F_PRIVATE**: Starting with this value, the upper bits can - be set by the filesystem for its own purposes. + * **IOMAP_F_BOUNDARY**: This indicates I/O and its completion must not be + merged with any other I/O or completion. Filesystems must use this when + submitting I/O to devices that cannot handle I/O crossing certain LBAs + (e.g. ZNS devices). This flag applies only to buffered I/O writeback; all + other functions ignore it. + + * **IOMAP_F_PRIVATE**: This flag is reserved for filesystem private use. * **IOMAP_F_ANON_WRITE**: Indicates that (write) I/O does not have a target block assigned to it yet and the file system will do that in the bio submission handler, splitting the I/O as needed. + * **IOMAP_F_ATOMIC_BIO**: This indicates write I/O must be submitted with the + ``REQ_ATOMIC`` flag set in the bio. Filesystems need to set this flag to + inform iomap that the write I/O operation requires torn-write protection + based on HW-offload mechanism. They must also ensure that mapping updates + upon the completion of the I/O must be performed in a single metadata + update. + These flags can be set by iomap itself during file operations. The filesystem should supply an ``->iomap_end`` function if it needs to observe these flags: diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 0ec0bb6eb0fb..2e567e341c3b 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -249,7 +249,6 @@ address_space_operations ======================== prototypes:: - int (*writepage)(struct page *page, struct writeback_control *wbc); int (*read_folio)(struct file *, struct folio *); int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *folio); @@ -280,7 +279,6 @@ locking rules: ====================== ======================== ========= =============== ops folio locked i_rwsem invalidate_lock ====================== ======================== ========= =============== -writepage: yes, unlocks (see below) read_folio: yes, unlocks shared writepages: dirty_folio: maybe @@ -309,54 +307,6 @@ completion. ->readahead() unlocks the folios that I/O is attempted on like ->read_folio(). -->writepage() is used for two purposes: for "memory cleansing" and for -"sync". These are quite different operations and the behaviour may differ -depending upon the mode. - -If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then -it *must* start I/O against the page, even if that would involve -blocking on in-progress I/O. - -If writepage is called for memory cleansing (sync_mode == -WBC_SYNC_NONE) then its role is to get as much writeout underway as -possible. So writepage should try to avoid blocking against -currently-in-progress I/O. - -If the filesystem is not called for "sync" and it determines that it -would need to block against in-progress I/O to be able to start new I/O -against the page the filesystem should redirty the page with -redirty_page_for_writepage(), then unlock the page and return zero. -This may also be done to avoid internal deadlocks, but rarely. - -If the filesystem is called for sync then it must wait on any -in-progress I/O and then start new I/O. - -The filesystem should unlock the page synchronously, before returning to the -caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE -value. WRITEPAGE_ACTIVATE means that page cannot really be written out -currently, and VM should stop calling ->writepage() on this page for some -time. VM does this by moving page to the head of the active list, hence the -name. - -Unless the filesystem is going to redirty_page_for_writepage(), unlock the page -and return zero, writepage *must* run set_page_writeback() against the page, -followed by unlocking it. Once set_page_writeback() has been run against the -page, write I/O can be submitted and the write I/O completion handler must run -end_page_writeback() once the I/O is complete. If no I/O is submitted, the -filesystem must run end_page_writeback() against the page before returning from -writepage. - -That is: after 2.5.12, pages which are under writeout are *not* locked. Note, -if the filesystem needs the page to be locked during writeout, that is ok, too, -the page is allowed to be unlocked at any point in time between the calls to -set_page_writeback() and end_page_writeback(). - -Note, failure to run either redirty_page_for_writepage() or the combination of -set_page_writeback()/end_page_writeback() on a page submitted to writepage -will leave the page itself marked clean but it will be tagged as dirty in the -radix tree. This incoherency can lead to all sorts of hard-to-debug problems -in the filesystem like having dirty inodes at umount and losing written data. - ->writepages() is used for periodic writeback and for syscall-initiated sync operations. The address_space should start I/O against at least ``*nr_to_write`` pages. ``*nr_to_write`` must be decremented for each page @@ -364,8 +314,8 @@ which is written. The address_space implementation may write more (or less) pages than ``*nr_to_write`` asks for, but it should try to be reasonably close. If nr_to_write is NULL, all dirty pages must be written. -writepages should _only_ write pages which are present on -mapping->io_pages. +writepages should _only_ write pages which are present in +mapping->i_pages. ->dirty_folio() is called from various places in the kernel when the target folio is marked as needing writeback. The folio cannot be diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst index d92c276f1575..e149b89118c8 100644 --- a/Documentation/filesystems/mount_api.rst +++ b/Documentation/filesystems/mount_api.rst @@ -671,7 +671,6 @@ The members are as follows: fsparam_bool() fs_param_is_bool fsparam_u32() fs_param_is_u32 fsparam_u32oct() fs_param_is_u32_octal - fsparam_u32hex() fs_param_is_u32_hex fsparam_s32() fs_param_is_s32 fsparam_u64() fs_param_is_u64 fsparam_enum() fs_param_is_enum @@ -755,21 +754,6 @@ process the parameters it is given. * :: - bool validate_constant_table(const struct constant_table *tbl, - size_t tbl_size, - int low, int high, int special); - - Validate a constant table. Checks that all the elements are appropriately - ordered, that there are no duplicates and that the values are between low - and high inclusive, though provision is made for one allowable special - value outside of that range. If no special value is required, special - should just be set to lie inside the low-to-high range. - - If all is good, true is returned. If the table is invalid, errors are - logged to the kernel log buffer and false is returned. - - * :: - bool fs_validate_description(const char *name, const struct fs_parameter_description *desc); diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 3886c14f89f4..939b4b624fad 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -1,33 +1,187 @@ .. SPDX-License-Identifier: GPL-2.0 -================================= -Network Filesystem Helper Library -================================= +=================================== +Network Filesystem Services Library +=================================== .. Contents: - Overview. + - Requests and streams. + - Subrequests. + - Result collection and retry. + - Local caching. + - Content encryption (fscrypt). - Per-inode context. - Inode context helper functions. - - Buffered read helpers. - - Read helper functions. - - Read helper structures. - - Read helper operations. - - Read helper procedure. - - Read helper cache API. + - Inode locking. + - Inode writeback. + - High-level VFS API. + - Unlocked read/write iter. + - Pre-locked read/write iter. + - Monolithic files API. + - Memory-mapped I/O API. + - High-level VM API. + - Deprecated PG_private2 API. + - I/O request API. + - Request structure. + - Stream structure. + - Subrequest structure. + - Filesystem methods. + - Terminating a subrequest. + - Local cache API. + - API function reference. Overview ======== -The network filesystem helper library is a set of functions designed to aid a -network filesystem in implementing VM/VFS operations. For the moment, that -just includes turning various VM buffered read operations into requests to read -from the server. The helper library, however, can also interpose other -services, such as local caching or local data encryption. +The network filesystem services library, netfslib, is a set of functions +designed to aid a network filesystem in implementing VM/VFS API operations. It +takes over the normal buffered read, readahead, write and writeback and also +handles unbuffered and direct I/O. -Note that the library module doesn't link against local caching directly, so -access must be provided by the netfs. +The library provides support for (re-)negotiation of I/O sizes and retrying +failed I/O as well as local caching and will, in the future, provide content +encryption. + +It insulates the filesystem from VM interface changes as much as possible and +handles VM features such as large multipage folios. The filesystem basically +just has to provide a way to perform read and write RPC calls. + +The way I/O is organised inside netfslib consists of a number of objects: + + * A *request*. A request is used to track the progress of the I/O overall and + to hold on to resources. The collection of results is done at the request + level. The I/O within a request is divided into a number of parallel + streams of subrequests. + + * A *stream*. A non-overlapping series of subrequests. The subrequests + within a stream do not have to be contiguous. + + * A *subrequest*. This is the basic unit of I/O. It represents a single RPC + call or a single cache I/O operation. The library passes these to the + filesystem and the cache to perform. + +Requests and Streams +-------------------- + +When actually performing I/O (as opposed to just copying into the pagecache), +netfslib will create one or more requests to track the progress of the I/O and +to hold resources. + +A read operation will have a single stream and the subrequests within that +stream may be of mixed origins, for instance mixing RPC subrequests and cache +subrequests. + +On the other hand, a write operation may have multiple streams, where each +stream targets a different destination. For instance, there may be one stream +writing to the local cache and one to the server. Currently, only two streams +are allowed, but this could be increased if parallel writes to multiple servers +is desired. + +The subrequests within a write stream do not need to match alignment or size +with the subrequests in another write stream and netfslib performs the tiling +of subrequests in each stream over the source buffer independently. Further, +each stream may contain holes that don't correspond to holes in the other +stream. + +In addition, the subrequests do not need to correspond to the boundaries of the +folios or vectors in the source/destination buffer. The library handles the +collection of results and the wrangling of folio flags and references. + +Subrequests +----------- + +Subrequests are at the heart of the interaction between netfslib and the +filesystem using it. Each subrequest is expected to correspond to a single +read or write RPC or cache operation. The library will stitch together the +results from a set of subrequests to provide a higher level operation. + +Netfslib has two interactions with the filesystem or the cache when setting up +a subrequest. First, there's an optional preparatory step that allows the +filesystem to negotiate the limits on the subrequest, both in terms of maximum +number of bytes and maximum number of vectors (e.g. for RDMA). This may +involve negotiating with the server (e.g. cifs needing to acquire credits). + +And, secondly, there's the issuing step in which the subrequest is handed off +to the filesystem to perform. + +Note that these two steps are done slightly differently between read and write: + + * For reads, the VM/VFS tells us how much is being requested up front, so the + library can preset maximum values that the cache and then the filesystem can + then reduce. The cache also gets consulted first on whether it wants to do + a read before the filesystem is consulted. + + * For writeback, it is unknown how much there will be to write until the + pagecache is walked, so no limit is set by the library. + +Once a subrequest is completed, the filesystem or cache informs the library of +the completion and then collection is invoked. Depending on whether the +request is synchronous or asynchronous, the collection of results will be done +in either the application thread or in a work queue. + +Result Collection and Retry +--------------------------- + +As subrequests complete, the results are collected and collated by the library +and folio unlocking is performed progressively (if appropriate). Once the +request is complete, async completion will be invoked (again, if appropriate). +It is possible for the filesystem to provide interim progress reports to the +library to cause folio unlocking to happen earlier if possible. + +If any subrequests fail, netfslib can retry them. It will wait until all +subrequests are completed, offer the filesystem the opportunity to fiddle with +the resources/state held by the request and poke at the subrequests before +re-preparing and re-issuing the subrequests. + +This allows the tiling of contiguous sets of failed subrequest within a stream +to be changed, adding more subrequests or ditching excess as necessary (for +instance, if the network sizes change or the server decides it wants smaller +chunks). + +Further, if one or more contiguous cache-read subrequests fail, the library +will pass them to the filesystem to perform instead, renegotiating and retiling +them as necessary to fit with the filesystem's parameters rather than those of +the cache. + +Local Caching +------------- + +One of the services netfslib provides, via ``fscache``, is the option to cache +on local disk a copy of the data obtained from/written to a network filesystem. +The library will manage the storing, retrieval and some invalidation of data +automatically on behalf of the filesystem if a cookie is attached to the +``netfs_inode``. + +Note that local caching used to use the PG_private_2 (aliased as PG_fscache) to +keep track of a page that was being written to the cache, but this is now +deprecated as PG_private_2 will be removed. + +Instead, folios that are read from the server for which there was no data in +the cache will be marked as dirty and will have ``folio->private`` set to a +special value (``NETFS_FOLIO_COPY_TO_CACHE``) and left to writeback to write. +If the folio is modified before that happened, the special value will be +cleared and the write will become normally dirty. + +When writeback occurs, folios that are so marked will only be written to the +cache and not to the server. Writeback handles mixed cache-only writes and +server-and-cache writes by using two streams, sending one to the cache and one +to the server. The server stream will have gaps in it corresponding to those +folios. + +Content Encryption (fscrypt) +---------------------------- + +Though it does not do so yet, at some point netfslib will acquire the ability +to do client-side content encryption on behalf of the network filesystem (Ceph, +for example). fscrypt can be used for this if appropriate (it may not be - +cifs, for example). + +The data will be stored encrypted in the local cache using the same manner of +encryption as the data written to the server and the library will impose bounce +buffering and RMW cycles as necessary. Per-Inode Context @@ -40,10 +194,13 @@ structure is defined:: struct netfs_inode { struct inode inode; const struct netfs_request_ops *ops; - struct fscache_cookie *cache; + struct fscache_cookie * cache; + loff_t remote_i_size; + unsigned long flags; + ... }; -A network filesystem that wants to use netfs lib must place one of these in its +A network filesystem that wants to use netfslib must place one of these in its inode wrapper struct instead of the VFS ``struct inode``. This can be done in a way similar to the following:: @@ -56,7 +213,8 @@ This allows netfslib to find its state by using ``container_of()`` from the inode pointer, thereby allowing the netfslib helper functions to be pointed to directly by the VFS/VM operation tables. -The structure contains the following fields: +The structure contains the following fields that are of interest to the +filesystem: * ``inode`` @@ -71,6 +229,37 @@ The structure contains the following fields: Local caching cookie, or NULL if no caching is enabled. This field does not exist if fscache is disabled. + * ``remote_i_size`` + + The size of the file on the server. This differs from inode->i_size if + local modifications have been made but not yet written back. + + * ``flags`` + + A set of flags, some of which the filesystem might be interested in: + + * ``NETFS_ICTX_MODIFIED_ATTR`` + + Set if netfslib modifies mtime/ctime. The filesystem is free to ignore + this or clear it. + + * ``NETFS_ICTX_UNBUFFERED`` + + Do unbuffered I/O upon the file. Like direct I/O but without the + alignment limitations. RMW will be performed if necessary. The pagecache + will not be used unless mmap() is also used. + + * ``NETFS_ICTX_WRITETHROUGH`` + + Do writethrough caching upon the file. I/O will be set up and dispatched + as buffered writes are made to the page cache. mmap() does the normal + writeback thing. + + * ``NETFS_ICTX_SINGLE_NO_UPLOAD`` + + Set if the file has a monolithic content that must be read entirely in a + single go and must not be written back to the server, though it can be + cached (e.g. AFS directories). Inode Context Helper Functions ------------------------------ @@ -84,117 +273,250 @@ set the operations table pointer:: then a function to cast from the VFS inode structure to the netfs context:: - struct netfs_inode *netfs_node(struct inode *inode); + struct netfs_inode *netfs_inode(struct inode *inode); and finally, a function to get the cache cookie pointer from the context attached to an inode (or NULL if fscache is disabled):: struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx); +Inode Locking +------------- + +A number of functions are provided to manage the locking of i_rwsem for I/O and +to effectively extend it to provide more separate classes of exclusion:: + + int netfs_start_io_read(struct inode *inode); + void netfs_end_io_read(struct inode *inode); + int netfs_start_io_write(struct inode *inode); + void netfs_end_io_write(struct inode *inode); + int netfs_start_io_direct(struct inode *inode); + void netfs_end_io_direct(struct inode *inode); + +The exclusion breaks down into four separate classes: + + 1) Buffered reads and writes. + + Buffered reads can run concurrently each other and with buffered writes, + but buffered writes cannot run concurrently with each other. + + 2) Direct reads and writes. + + Direct (and unbuffered) reads and writes can run concurrently since they do + not share local buffering (i.e. the pagecache) and, in a network + filesystem, are expected to have exclusion managed on the server (though + this may not be the case for, say, Ceph). + + 3) Other major inode modifying operations (e.g. truncate, fallocate). + + These should just access i_rwsem directly. + + 4) mmap(). + + mmap'd accesses might operate concurrently with any of the other classes. + They might form the buffer for an intra-file loopback DIO read/write. They + might be permitted on unbuffered files. + +Inode Writeback +--------------- + +Netfslib will pin resources on an inode for future writeback (such as pinning +use of an fscache cookie) when an inode is dirtied. However, this pinning +needs careful management. To manage the pinning, the following sequence +occurs: + + 1) An inode state flag ``I_PINNING_NETFS_WB`` is set by netfslib when the + pinning begins (when a folio is dirtied, for example) if the cache is + active to stop the cache structures from being discarded and the cache + space from being culled. This also prevents re-getting of cache resources + if the flag is already set. + + 2) This flag then cleared inside the inode lock during inode writeback in the + VM - and the fact that it was set is transferred to ``->unpinned_netfs_wb`` + in ``struct writeback_control``. + + 3) If ``->unpinned_netfs_wb`` is now set, the write_inode procedure is forced. + + 4) The filesystem's ``->write_inode()`` function is invoked to do the cleanup. + + 5) The filesystem invokes netfs to do its cleanup. + +To do the cleanup, netfslib provides a function to do the resource unpinning:: + + int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); + +If the filesystem doesn't need to do anything else, this may be set as a its +``.write_inode`` method. + +Further, if an inode is deleted, the filesystem's write_inode method may not +get called, so:: + + void netfs_clear_inode_writeback(struct inode *inode, const void *aux); -Buffered Read Helpers -===================== +must be called from ``->evict_inode()`` *before* ``clear_inode()`` is called. -The library provides a set of read helpers that handle the ->read_folio(), -->readahead() and much of the ->write_begin() VM operations and translate them -into a common call framework. -The following services are provided: +High-Level VFS API +================== - * Handle folios that span multiple pages. +Netfslib provides a number of sets of API calls for the filesystem to delegate +VFS operations to. Netfslib, in turn, will call out to the filesystem and the +cache to negotiate I/O sizes, issue RPCs and provide places for it to intervene +at various times. - * Insulate the netfs from VM interface changes. +Unlocked Read/Write Iter +------------------------ - * Allow the netfs to arbitrarily split reads up into pieces, even ones that - don't match folio sizes or folio alignments and that may cross folios. +The first API set is for the delegation of operations to netfslib when the +filesystem is called through the standard VFS read/write_iter methods:: - * Allow the netfs to expand a readahead request in both directions to meet its - needs. + ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); + ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); + ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); + ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); + ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from); - * Allow the netfs to partially fulfil a read, which will then be resubmitted. +They can be assigned directly to ``.read_iter`` and ``.write_iter``. They +perform the inode locking themselves and the first two will switch between +buffered I/O and DIO as appropriate. - * Handle local caching, allowing cached data and server-read data to be - interleaved for a single request. +Pre-Locked Read/Write Iter +-------------------------- - * Handle clearing of bufferage that isn't on the server. +The second API set is for the delegation of operations to netfslib when the +filesystem is called through the standard VFS methods, but needs to do some +other stuff before or after calling netfslib whilst still inside locked section +(e.g. Ceph negotiating caps). The unbuffered read function is:: - * Handle retrying of reads that failed, switching reads from the cache to the - server as necessary. + ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter); - * In the future, this is a place that other services can be performed, such as - local encryption of data to be stored remotely or in the cache. +This must not be assigned directly to ``.read_iter`` and the filesystem is +responsible for performing the inode locking before calling it. In the case of +buffered read, the filesystem should use ``filemap_read()``. -From the network filesystem, the helpers require a table of operations. This -includes a mandatory method to issue a read operation along with a number of -optional methods. +There are three functions for writes:: + ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from, + struct netfs_group *netfs_group); + ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, + struct netfs_group *netfs_group); + ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter, + struct netfs_group *netfs_group); -Read Helper Functions +These must not be assigned directly to ``.write_iter`` and the filesystem is +responsible for performing the inode locking before calling them. + +The first two functions are for buffered writes; the first just adds some +standard write checks and jumps to the second, but if the filesystem wants to +do the checks itself, it can use the second directly. The third function is +for unbuffered or DIO writes. + +On all three write functions, there is a writeback group pointer (which should +be NULL if the filesystem doesn't use this). Writeback groups are set on +folios when they're modified. If a folio to-be-modified is already marked with +a different group, it is flushed first. The writeback API allows writing back +of a specific group. + +Memory-Mapped I/O API --------------------- -Three read helpers are provided:: +An API for support of mmap()'d I/O is provided:: + + vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); + +This allows the filesystem to delegate ``.page_mkwrite`` to netfslib. The +filesystem should not take the inode lock before calling it, but, as with the +locked write functions above, this does take a writeback group pointer. If the +page to be made writable is in a different group, it will be flushed first. + +Monolithic Files API +-------------------- + +There is also a special API set for files for which the content must be read in +a single RPC (and not written back) and is maintained as a monolithic blob +(e.g. an AFS directory), though it can be stored and updated in the local cache:: + + ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter); + void netfs_single_mark_inode_dirty(struct inode *inode); + int netfs_writeback_single(struct address_space *mapping, + struct writeback_control *wbc, + struct iov_iter *iter); + +The first function reads from a file into the given buffer, reading from the +cache in preference if the data is cached there; the second function allows the +inode to be marked dirty, causing a later writeback; and the third function can +be called from the writeback code to write the data to the cache, if there is +one. - void netfs_readahead(struct readahead_control *ractl); - int netfs_read_folio(struct file *file, - struct folio *folio); - int netfs_write_begin(struct netfs_inode *ctx, - struct file *file, - struct address_space *mapping, - loff_t pos, - unsigned int len, - struct folio **_folio, - void **_fsdata); +The inode should be marked ``NETFS_ICTX_SINGLE_NO_UPLOAD`` if this API is to be +used. The writeback function requires the buffer to be of ITER_FOLIOQ type. -Each corresponds to a VM address space operation. These operations use the -state in the per-inode context. +High-Level VM API +================== -For ->readahead() and ->read_folio(), the network filesystem just point directly -at the corresponding read helper; whereas for ->write_begin(), it may be a -little more complicated as the network filesystem might want to flush -conflicting writes or track dirty data and needs to put the acquired folio if -an error occurs after calling the helper. +Netfslib also provides a number of sets of API calls for the filesystem to +delegate VM operations to. Again, netfslib, in turn, will call out to the +filesystem and the cache to negotiate I/O sizes, issue RPCs and provide places +for it to intervene at various times:: -The helpers manage the read request, calling back into the network filesystem -through the supplied table of operations. Waits will be performed as -necessary before returning for helpers that are meant to be synchronous. + void netfs_readahead(struct readahead_control *); + int netfs_read_folio(struct file *, struct folio *); + int netfs_writepages(struct address_space *mapping, + struct writeback_control *wbc); + bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); + void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); + bool netfs_release_folio(struct folio *folio, gfp_t gfp); -If an error occurs, the ->free_request() will be called to clean up the -netfs_io_request struct allocated. If some parts of the request are in -progress when an error occurs, the request will get partially completed if -sufficient data is read. +These are ``address_space_operations`` methods and can be set directly in the +operations table. -Additionally, there is:: +Deprecated PG_private_2 API +--------------------------- - * void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, - ssize_t transferred_or_error, - bool was_async); +There is also a deprecated function for filesystems that still use the +``->write_begin`` method:: -which should be called to complete a read subrequest. This is given the number -of bytes transferred or a negative error code, plus a flag indicating whether -the operation was asynchronous (ie. whether the follow-on processing can be -done in the current context, given this may involve sleeping). + int netfs_write_begin(struct netfs_inode *inode, struct file *file, + struct address_space *mapping, loff_t pos, unsigned int len, + struct folio **_folio, void **_fsdata); +It uses the deprecated PG_private_2 flag and so should not be used. -Read Helper Structures ----------------------- -The read helpers make use of a couple of structures to maintain the state of -the read. The first is a structure that manages a read request as a whole:: +I/O Request API +=============== + +The I/O request API comprises a number of structures and a number of functions +that the filesystem may need to use. + +Request Structure +----------------- + +The request structure manages the request as a whole, holding some resources +and state on behalf of the filesystem and tracking the collection of results:: struct netfs_io_request { + enum netfs_io_origin origin; struct inode *inode; struct address_space *mapping; - struct netfs_cache_resources cache_resources; + struct netfs_group *group; + struct netfs_io_stream io_streams[]; void *netfs_priv; - loff_t start; - size_t len; - loff_t i_size; - const struct netfs_request_ops *netfs_ops; + void *netfs_priv2; + unsigned long long start; + unsigned long long len; + unsigned long long i_size; unsigned int debug_id; + unsigned long flags; ... }; -The above fields are the ones the netfs can use. They are: +Many of the fields are for internal use, but the fields shown here are of +interest to the filesystem: + + * ``origin`` + + The origin of the request (readahead, read_folio, DIO read, writeback, ...). * ``inode`` * ``mapping`` @@ -202,11 +524,19 @@ The above fields are the ones the netfs can use. They are: The inode and the address space of the file being read from. The mapping may or may not point to inode->i_data. - * ``cache_resources`` + * ``group`` + + The writeback group this request is dealing with or NULL. This holds a ref + on the group. + + * ``io_streams`` - Resources for the local cache to use, if present. + The parallel streams of subrequests available to the request. Currently two + are available, but this may be made extensible in future. ``NR_IO_STREAMS`` + indicates the size of the array. * ``netfs_priv`` + * ``netfs_priv2`` The network filesystem's private data. The value for this can be passed in to the helper functions or set during the request. @@ -221,37 +551,121 @@ The above fields are the ones the netfs can use. They are: The size of the file at the start of the request. - * ``netfs_ops`` - - A pointer to the operation table. The value for this is passed into the - helper functions. - * ``debug_id`` A number allocated to this operation that can be displayed in trace lines for reference. + * ``flags`` + + Flags for managing and controlling the operation of the request. Some of + these may be of interest to the filesystem: + + * ``NETFS_RREQ_RETRYING`` + + Netfslib sets this when generating retries. + + * ``NETFS_RREQ_PAUSE`` + + The filesystem can set this to request to pause the library's subrequest + issuing loop - but care needs to be taken as netfslib may also set it. + + * ``NETFS_RREQ_NONBLOCK`` + * ``NETFS_RREQ_BLOCKED`` + + Netfslib sets the first to indicate that non-blocking mode was set by the + caller and the filesystem can set the second to indicate that it would + have had to block. + + * ``NETFS_RREQ_USE_PGPRIV2`` + + The filesystem can set this if it wants to use PG_private_2 to track + whether a folio is being written to the cache. This is deprecated as + PG_private_2 is going to go away. + +If the filesystem wants more private data than is afforded by this structure, +then it should wrap it and provide its own allocator. + +Stream Structure +---------------- + +A request is comprised of one or more parallel streams and each stream may be +aimed at a different target. + +For read requests, only stream 0 is used. This can contain a mixture of +subrequests aimed at different sources. For write requests, stream 0 is used +for the server and stream 1 is used for the cache. For buffered writeback, +stream 0 is not enabled unless a normal dirty folio is encountered, at which +point ->begin_writeback() will be invoked and the filesystem can mark the +stream available. + +The stream struct looks like:: + + struct netfs_io_stream { + unsigned char stream_nr; + bool avail; + size_t sreq_max_len; + unsigned int sreq_max_segs; + unsigned int submit_extendable_to; + ... + }; + +A number of members are available for access/use by the filesystem: + + * ``stream_nr`` + + The number of the stream within the request. + + * ``avail`` + + True if the stream is available for use. The filesystem should set this on + stream zero if in ->begin_writeback(). + + * ``sreq_max_len`` + * ``sreq_max_segs`` + + These are set by the filesystem or the cache in ->prepare_read() or + ->prepare_write() for each subrequest to indicate the maximum number of + bytes and, optionally, the maximum number of segments (if not 0) that that + subrequest can support. + + * ``submit_extendable_to`` -The second structure is used to manage individual slices of the overall read -request:: + The size that a subrequest can be rounded up to beyond the EOF, given the + available buffer. This allows the cache to work out if it can do a DIO read + or write that straddles the EOF marker. + +Subrequest Structure +-------------------- + +Individual units of I/O are managed by the subrequest structure. These +represent slices of the overall request and run independently:: struct netfs_io_subrequest { struct netfs_io_request *rreq; - loff_t start; + struct iov_iter io_iter; + unsigned long long start; size_t len; size_t transferred; unsigned long flags; + short error; unsigned short debug_index; + unsigned char stream_nr; ... }; -Each subrequest is expected to access a single source, though the helpers will +Each subrequest is expected to access a single source, though the library will handle falling back from one source type to another. The members are: * ``rreq`` A pointer to the read request. + * ``io_iter`` + + An I/O iterator representing a slice of the buffer to be read into or + written from. + * ``start`` * ``len`` @@ -260,241 +674,300 @@ handle falling back from one source type to another. The members are: * ``transferred`` - The amount of data transferred so far of the length of this slice. The - network filesystem or cache should start the operation this far into the - slice. If a short read occurs, the helpers will call again, having updated - this to reflect the amount read so far. + The amount of data transferred so far for this subrequest. This should be + added to with the length of the transfer made by this issuance of the + subrequest. If this is less than ``len`` then the subrequest may be + reissued to continue. * ``flags`` - Flags pertaining to the read. There are two of interest to the filesystem - or cache: + Flags for managing the subrequest. There are a number of interest to the + filesystem or cache: + + * ``NETFS_SREQ_MADE_PROGRESS`` + + Set by the filesystem to indicates that at least one byte of data was read + or written. + + * ``NETFS_SREQ_HIT_EOF`` + + The filesystem should set this if a read hit the EOF on the file (in which + case ``transferred`` should stop at the EOF). Netfslib may expand the + subrequest out to the size of the folio containing the EOF on the off + chance that a third party change happened or a DIO read may have asked for + more than is available. The library will clear any excess pagecache. * ``NETFS_SREQ_CLEAR_TAIL`` - This can be set to indicate that the remainder of the slice, from - transferred to len, should be cleared. + The filesystem can set this to indicate that the remainder of the slice, + from transferred to len, should be cleared. Do not set if HIT_EOF is set. + + * ``NETFS_SREQ_NEED_RETRY`` + + The filesystem can set this to tell netfslib to retry the subrequest. + + * ``NETFS_SREQ_BOUNDARY`` + + This can be set by the filesystem on a subrequest to indicate that it ends + at a boundary with the filesystem structure (e.g. at the end of a Ceph + object). It tells netfslib not to retile subrequests across it. * ``NETFS_SREQ_SEEK_DATA_READ`` - This is a hint to the cache that it might want to try skipping ahead to - the next data (ie. using SEEK_DATA). + This is a hint from netfslib to the cache that it might want to try + skipping ahead to the next data (ie. using SEEK_DATA). + + * ``error`` + + This is for the filesystem to store result of the subrequest. It should be + set to 0 if successful and a negative error code otherwise. * ``debug_index`` + * ``stream_nr`` A number allocated to this slice that can be displayed in trace lines for - reference. + reference and the number of the request stream that it belongs to. +If necessary, the filesystem can get and put extra refs on the subrequest it is +given:: -Read Helper Operations ----------------------- + void netfs_get_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what); + void netfs_put_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what); -The network filesystem must provide the read helpers with a table of operations -through which it can issue requests and negotiate:: +using netfs trace codes to indicate the reason. Care must be taken, however, +as once control of the subrequest is returned to netfslib, the same subrequest +can be reissued/retried. + +Filesystem Methods +------------------ + +The filesystem sets a table of operations in ``netfs_inode`` for netfslib to +use:: struct netfs_request_ops { - void (*init_request)(struct netfs_io_request *rreq, struct file *file); + mempool_t *request_pool; + mempool_t *subrequest_pool; + int (*init_request)(struct netfs_io_request *rreq, struct file *file); void (*free_request)(struct netfs_io_request *rreq); + void (*free_subrequest)(struct netfs_io_subrequest *rreq); void (*expand_readahead)(struct netfs_io_request *rreq); - bool (*clamp_length)(struct netfs_io_subrequest *subreq); + int (*prepare_read)(struct netfs_io_subrequest *subreq); void (*issue_read)(struct netfs_io_subrequest *subreq); - bool (*is_still_valid)(struct netfs_io_request *rreq); - int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, - struct folio **foliop, void **_fsdata); void (*done)(struct netfs_io_request *rreq); + void (*update_i_size)(struct inode *inode, loff_t i_size); + void (*post_modify)(struct inode *inode); + void (*begin_writeback)(struct netfs_io_request *wreq); + void (*prepare_write)(struct netfs_io_subrequest *subreq); + void (*issue_write)(struct netfs_io_subrequest *subreq); + void (*retry_request)(struct netfs_io_request *wreq, + struct netfs_io_stream *stream); + void (*invalidate_cache)(struct netfs_io_request *wreq); }; -The operations are as follows: - - * ``init_request()`` +The table starts with a pair of optional pointers to memory pools from which +requests and subrequests can be allocated. If these are not given, netfslib +has default pools that it will use instead. If the filesystem wraps the netfs +structs in its own larger structs, then it will need to use its own pools. +Netfslib will allocate directly from the pools. - [Optional] This is called to initialise the request structure. It is given - the file for reference. +The methods defined in the table are: + * ``init_request()`` * ``free_request()`` + * ``free_subrequest()`` - [Optional] This is called as the request is being deallocated so that the - filesystem can clean up any state it has attached there. + [Optional] A filesystem may implement these to initialise or clean up any + resources that it attaches to the request or subrequest. * ``expand_readahead()`` [Optional] This is called to allow the filesystem to expand the size of a - readahead read request. The filesystem gets to expand the request in both - directions, though it's not permitted to reduce it as the numbers may - represent an allocation already made. If local caching is enabled, it gets - to expand the request first. + readahead request. The filesystem gets to expand the request in both + directions, though it must retain the initial region as that may represent + an allocation already made. If local caching is enabled, it gets to expand + the request first. Expansion is communicated by changing ->start and ->len in the request structure. Note that if any change is made, ->len must be increased by at least as much as ->start is reduced. - * ``clamp_length()`` - - [Optional] This is called to allow the filesystem to reduce the size of a - subrequest. The filesystem can use this, for example, to chop up a request - that has to be split across multiple servers or to put multiple reads in - flight. - - This should return 0 on success and an error code on error. - - * ``issue_read()`` + * ``prepare_read()`` - [Required] The helpers use this to dispatch a subrequest to the server for - reading. In the subrequest, ->start, ->len and ->transferred indicate what - data should be read from the server. + [Optional] This is called to allow the filesystem to limit the size of a + subrequest. It may also limit the number of individual regions in iterator, + such as required by RDMA. This information should be set on stream zero in:: - There is no return value; the netfs_subreq_terminated() function should be - called to indicate whether or not the operation succeeded and how much data - it transferred. The filesystem also should not deal with setting folios - uptodate, unlocking them or dropping their refs - the helpers need to deal - with this as they have to coordinate with copying to the local cache. + rreq->io_streams[0].sreq_max_len + rreq->io_streams[0].sreq_max_segs - Note that the helpers have the folios locked, but not pinned. It is - possible to use the ITER_XARRAY iov iterator to refer to the range of the - inode that is being operated upon without the need to allocate large bvec - tables. + The filesystem can use this, for example, to chop up a request that has to + be split across multiple servers or to put multiple reads in flight. - * ``is_still_valid()`` + Zero should be returned on success and an error code otherwise. - [Optional] This is called to find out if the data just read from the local - cache is still valid. It should return true if it is still valid and false - if not. If it's not still valid, it will be reread from the server. + * ``issue_read()`` - * ``check_write_begin()`` + [Required] Netfslib calls this to dispatch a subrequest to the server for + reading. In the subrequest, ->start, ->len and ->transferred indicate what + data should be read from the server and ->io_iter indicates the buffer to be + used. - [Optional] This is called from the netfs_write_begin() helper once it has - allocated/grabbed the folio to be modified to allow the filesystem to flush - conflicting state before allowing it to be modified. + There is no return value; the ``netfs_read_subreq_terminated()`` function + should be called to indicate that the subrequest completed either way. + ->error, ->transferred and ->flags should be updated before completing. The + termination can be done asynchronously. - It may unlock and discard the folio it was given and set the caller's folio - pointer to NULL. It should return 0 if everything is now fine (``*foliop`` - left set) or the op should be retried (``*foliop`` cleared) and any other - error code to abort the operation. + Note: the filesystem must not deal with setting folios uptodate, unlocking + them or dropping their refs - the library deals with this as it may have to + stitch together the results of multiple subrequests that variously overlap + the set of folios. - * ``done`` + * ``done()`` - [Optional] This is called after the folios in the request have all been + [Optional] This is called after the folios in a read request have all been unlocked (and marked uptodate if applicable). + * ``update_i_size()`` + + [Optional] This is invoked by netfslib at various points during the write + paths to ask the filesystem to update its idea of the file size. If not + given, netfslib will set i_size and i_blocks and update the local cache + cookie. + + * ``post_modify()`` + + [Optional] This is called after netfslib writes to the pagecache or when it + allows an mmap'd page to be marked as writable. + + * ``begin_writeback()`` + + [Optional] Netfslib calls this when processing a writeback request if it + finds a dirty page that isn't simply marked NETFS_FOLIO_COPY_TO_CACHE, + indicating it must be written to the server. This allows the filesystem to + only set up writeback resources when it knows it's going to have to perform + a write. + + * ``prepare_write()`` + [Optional] This is called to allow the filesystem to limit the size of a + subrequest. It may also limit the number of individual regions in iterator, + such as required by RDMA. This information should be set on stream to which + the subrequest belongs:: -Read Helper Procedure ---------------------- - -The read helpers work by the following general procedure: - - * Set up the request. - - * For readahead, allow the local cache and then the network filesystem to - propose expansions to the read request. This is then proposed to the VM. - If the VM cannot fully perform the expansion, a partially expanded read will - be performed, though this may not get written to the cache in its entirety. - - * Loop around slicing chunks off of the request to form subrequests: - - * If a local cache is present, it gets to do the slicing, otherwise the - helpers just try to generate maximal slices. - - * The network filesystem gets to clamp the size of each slice if it is to be - the source. This allows rsize and chunking to be implemented. + rreq->io_streams[subreq->stream_nr].sreq_max_len + rreq->io_streams[subreq->stream_nr].sreq_max_segs - * The helpers issue a read from the cache or a read from the server or just - clears the slice as appropriate. + The filesystem can use this, for example, to chop up a request that has to + be split across multiple servers or to put multiple writes in flight. - * The next slice begins at the end of the last one. + This is not permitted to return an error. Instead, in the event of failure, + ``netfs_prepare_write_failed()`` must be called. - * As slices finish being read, they terminate. + * ``issue_write()`` - * When all the subrequests have terminated, the subrequests are assessed and - any that are short or have failed are reissued: + [Required] This is used to dispatch a subrequest to the server for writing. + In the subrequest, ->start, ->len and ->transferred indicate what data + should be written to the server and ->io_iter indicates the buffer to be + used. - * Failed cache requests are issued against the server instead. + There is no return value; the ``netfs_write_subreq_terminated()`` function + should be called to indicate that the subrequest completed either way. + ->error, ->transferred and ->flags should be updated before completing. The + termination can be done asynchronously. - * Failed server requests just fail. + Note: the filesystem must not deal with removing the dirty or writeback + marks on folios involved in the operation and should not take refs or pins + on them, but should leave retention to netfslib. - * Short reads against either source will be reissued against that source - provided they have transferred some more data: + * ``retry_request()`` - * The cache may need to skip holes that it can't do DIO from. + [Optional] Netfslib calls this at the beginning of a retry cycle. This + allows the filesystem to examine the state of the request, the subrequests + in the indicated stream and of its own data and make adjustments or + renegotiate resources. + + * ``invalidate_cache()`` - * If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the - end of the slice instead of reissuing. + [Optional] This is called by netfslib to invalidate data stored in the local + cache in the event that writing to the local cache fails, providing updated + coherency data that netfs can't provide. - * Once the data is read, the folios that have been fully read/cleared: +Terminating a subrequest +------------------------ - * Will be marked uptodate. +When a subrequest completes, there are a number of functions that the cache or +subrequest can call to inform netfslib of the status change. One function is +provided to terminate a write subrequest at the preparation stage and acts +synchronously: - * If a cache is present, will be marked with PG_fscache. + * ``void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);`` - * Unlocked + Indicate that the ->prepare_write() call failed. The ``error`` field should + have been updated. - * Any folios that need writing to the cache will then have DIO writes issued. +Note that ->prepare_read() can return an error as a read can simply be aborted. +Dealing with writeback failure is trickier. - * Synchronous operations will wait for reading to be complete. +The other functions are used for subrequests that got as far as being issued: - * Writes to the cache will proceed asynchronously and the folios will have the - PG_fscache mark removed when that completes. + * ``void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);`` - * The request structures will be cleaned up when everything has completed. + Tell netfslib that a read subrequest has terminated. The ``error``, + ``flags`` and ``transferred`` fields should have been updated. + * ``void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error);`` -Read Helper Cache API ---------------------- + Tell netfslib that a write subrequest has terminated. Either the amount of + data processed or the negative error code can be passed in. This is + can be used as a kiocb completion function. -When implementing a local cache to be used by the read helpers, two things are -required: some way for the network filesystem to initialise the caching for a -read request and a table of operations for the helpers to call. + * ``void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq);`` -To begin a cache operation on an fscache object, the following function is -called:: + This is provided to optionally update netfslib on the incremental progress + of a read, allowing some folios to be unlocked early and does not actually + terminate the subrequest. The ``transferred`` field should have been + updated. - int fscache_begin_read_operation(struct netfs_io_request *rreq, - struct fscache_cookie *cookie); +Local Cache API +--------------- -passing in the request pointer and the cookie corresponding to the file. This -fills in the cache resources mentioned below. +Netfslib provides a separate API for a local cache to implement, though it +provides some somewhat similar routines to the filesystem request API. -The netfs_io_request object contains a place for the cache to hang its +Firstly, the netfs_io_request object contains a place for the cache to hang its state:: struct netfs_cache_resources { const struct netfs_cache_ops *ops; void *cache_priv; void *cache_priv2; + unsigned int debug_id; + unsigned int inval_counter; }; -This contains an operations table pointer and two private pointers. The -operation table looks like the following:: +This contains an operations table pointer and two private pointers plus the +debug ID of the fscache cookie for tracing purposes and an invalidation counter +that is cranked by calls to ``fscache_invalidate()`` allowing cache subrequests +to be invalidated after completion. + +The cache operation table looks like the following:: struct netfs_cache_ops { void (*end_operation)(struct netfs_cache_resources *cres); - void (*expand_readahead)(struct netfs_cache_resources *cres, loff_t *_start, size_t *_len, loff_t i_size); - enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, - loff_t i_size); - + loff_t i_size); int (*read)(struct netfs_cache_resources *cres, loff_t start_pos, struct iov_iter *iter, bool seek_data, netfs_io_terminated_t term_func, void *term_func_priv); - - int (*prepare_write)(struct netfs_cache_resources *cres, - loff_t *_start, size_t *_len, loff_t i_size, - bool no_space_allocated_yet); - - int (*write)(struct netfs_cache_resources *cres, - loff_t start_pos, - struct iov_iter *iter, - netfs_io_terminated_t term_func, - void *term_func_priv); - - int (*query_occupancy)(struct netfs_cache_resources *cres, - loff_t start, size_t len, size_t granularity, - loff_t *_data_start, size_t *_data_len); + void (*prepare_write_subreq)(struct netfs_io_subrequest *subreq); + void (*issue_write)(struct netfs_io_subrequest *subreq); }; With a termination handler function pointer:: @@ -511,10 +984,16 @@ The methods defined in the table are: * ``expand_readahead()`` - [Optional] Called at the beginning of a netfs_readahead() operation to allow - the cache to expand a request in either direction. This allows the cache to + [Optional] Called at the beginning of a readahead operation to allow the + cache to expand a request in either direction. This allows the cache to size the request appropriately for the cache granularity. + * ``prepare_read()`` + + [Required] Called to configure the next slice of a request. ->start and + ->len in the subrequest indicate where and how big the next slice can be; + the cache gets to reduce the length to match its granularity requirements. + The function is passed pointers to the start and length in its parameters, plus the size of the file for reference, and adjusts the start and length appropriately. It should return one of: @@ -528,12 +1007,6 @@ The methods defined in the table are: downloaded from the server or read from the cache - or whether slicing should be given up at the current point. - * ``prepare_read()`` - - [Required] Called to configure the next slice of a request. ->start and - ->len in the subrequest indicate where and how big the next slice can be; - the cache gets to reduce the length to match its granularity requirements. - * ``read()`` [Required] Called to read from the cache. The start file offset is given @@ -547,44 +1020,33 @@ The methods defined in the table are: indicating whether the termination is definitely happening in the caller's context. - * ``prepare_write()`` + * ``prepare_write_subreq()`` - [Required] Called to prepare a write to the cache to take place. This - involves checking to see whether the cache has sufficient space to honour - the write. ``*_start`` and ``*_len`` indicate the region to be written; the - region can be shrunk or it can be expanded to a page boundary either way as - necessary to align for direct I/O. i_size holds the size of the object and - is provided for reference. no_space_allocated_yet is set to true if the - caller is certain that no data has been written to that region - for example - if it tried to do a read from there already. + [Required] This is called to allow the cache to limit the size of a + subrequest. It may also limit the number of individual regions in iterator, + such as required by DIO/DMA. This information should be set on stream to + which the subrequest belongs:: - * ``write()`` + rreq->io_streams[subreq->stream_nr].sreq_max_len + rreq->io_streams[subreq->stream_nr].sreq_max_segs - [Required] Called to write to the cache. The start file offset is given - along with an iterator to write from, which gives the length also. - - Also provided is a pointer to a termination handler function and private - data to pass to that function. The termination function should be called - with the number of bytes transferred or an error code, plus a flag - indicating whether the termination is definitely happening in the caller's - context. + The filesystem can use this, for example, to chop up a request that has to + be split across multiple servers or to put multiple writes in flight. - * ``query_occupancy()`` + This is not permitted to return an error. In the event of failure, + ``netfs_prepare_write_failed()`` must be called. - [Required] Called to find out where the next piece of data is within a - particular region of the cache. The start and length of the region to be - queried are passed in, along with the granularity to which the answer needs - to be aligned. The function passes back the start and length of the data, - if any, available within that region. Note that there may be a hole at the - front. + * ``issue_write()`` - It returns 0 if some data was found, -ENODATA if there was no usable data - within the region or -ENOBUFS if there is no caching on this file. + [Required] This is used to dispatch a subrequest to the cache for writing. + In the subrequest, ->start, ->len and ->transferred indicate what data + should be written to the cache and ->io_iter indicates the buffer to be + used. -Note that these methods are passed a pointer to the cache resource structure, -not the read request structure as they could be used in other situations where -there isn't a read request structure as well, such as writing dirty data to the -cache. + There is no return value; the ``netfs_write_subreq_terminated()`` function + should be called to indicate that the subrequest completed either way. + ->error, ->transferred and ->flags should be updated before completing. The + termination can be done asynchronously. API Function Reference diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst index 767b2927c762..3111ef5592f3 100644 --- a/Documentation/filesystems/porting.rst +++ b/Documentation/filesystems/porting.rst @@ -1203,3 +1203,43 @@ should use d_drop();d_splice_alias() and return the result of the latter. If a positive dentry cannot be returned for some reason, in-kernel clients such as cachefiles, nfsd, smb/server may not perform ideally but will fail-safe. + +--- + +** mandatory** + +lookup_one(), lookup_one_unlocked(), lookup_one_positive_unlocked() now +take a qstr instead of a name and len. These, not the "one_len" +versions, should be used whenever accessing a filesystem from outside +that filesysmtem, through a mount point - which will have a mnt_idmap. + +--- + +** mandatory** + +Functions try_lookup_one_len(), lookup_one_len(), +lookup_one_len_unlocked() and lookup_positive_unlocked() have been +renamed to try_lookup_noperm(), lookup_noperm(), +lookup_noperm_unlocked(), lookup_noperm_positive_unlocked(). They now +take a qstr instead of separate name and length. QSTR() can be used +when strlen() is needed for the length. + +For try_lookup_noperm() a reference to the qstr is passed in case the +hash might subsequently be needed. + +These function no longer do any permission checking - they previously +checked that the caller has 'X' permission on the parent. They must +ONLY be used internally by a filesystem on itself when it knows that +permissions are irrelevant or in a context where permission checks have +already been performed such as after vfs_path_parent_lookup() + +--- + +** mandatory** + +d_hash_and_lookup() is no longer exported or available outside the VFS. +Use try_lookup_noperm() instead. This adds name validation and takes +arguments in the opposite order but is otherwise identical. + +Using try_lookup_noperm() will require linux/namei.h to be included. + diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index ae79c30b6c0c..bf051c7da6b8 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -716,9 +716,8 @@ page lookup by address, and keeping track of pages tagged as Dirty or Writeback. The first can be used independently to the others. The VM can try to -either write dirty pages in order to clean them, or release clean pages -in order to reuse them. To do this it can call the ->writepage method -on dirty pages, and ->release_folio on clean folios with the private +release clean pages in order to reuse them. To do this it can call +->release_folio on clean folios with the private flag set. Clean pages without PagePrivate and with no external references will be released without notice being given to the address_space. @@ -731,8 +730,8 @@ maintains information about the PG_Dirty and PG_Writeback status of each page, so that pages with either of these flags can be found quickly. The Dirty tag is primarily used by mpage_writepages - the default -->writepages method. It uses the tag to find dirty pages to call -->writepage on. If mpage_writepages is not used (i.e. the address +->writepages method. It uses the tag to find dirty pages to +write back. If mpage_writepages is not used (i.e. the address provides its own ->writepages) , the PAGECACHE_TAG_DIRTY tag is almost unused. write_inode_now and sync_inode do use it (through __sync_single_inode) to check if ->writepages has been successful in @@ -756,23 +755,23 @@ pages, however the address_space has finer control of write sizes. The read process essentially only requires 'read_folio'. The write process is more complicated and uses write_begin/write_end or -dirty_folio to write data into the address_space, and writepage and +dirty_folio to write data into the address_space, and writepages to writeback data to storage. Adding and removing pages to/from an address_space is protected by the inode's i_mutex. When data is written to a page, the PG_Dirty flag should be set. It -typically remains set until writepage asks for it to be written. This +typically remains set until writepages asks for it to be written. This should clear PG_Dirty and set PG_Writeback. It can be actually written at any point after PG_Dirty is clear. Once it is known to be safe, PG_Writeback is cleared. Writeback makes use of a writeback_control structure to direct the -operations. This gives the writepage and writepages operations some +operations. This gives the writepages operation some information about the nature of and reason for the writeback request, and the constraints under which it is being done. It is also used to -return information back to the caller about the result of a writepage or +return information back to the caller about the result of a writepages request. @@ -819,7 +818,6 @@ cache in your filesystem. The following members are defined: .. code-block:: c struct address_space_operations { - int (*writepage)(struct page *page, struct writeback_control *wbc); int (*read_folio)(struct file *, struct folio *); int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *); @@ -848,25 +846,6 @@ cache in your filesystem. The following members are defined: int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter); }; -``writepage`` - called by the VM to write a dirty page to backing store. This - may happen for data integrity reasons (i.e. 'sync'), or to free - up memory (flush). The difference can be seen in - wbc->sync_mode. The PG_Dirty flag has been cleared and - PageLocked is true. writepage should start writeout, should set - PG_Writeback, and should make sure the page is unlocked, either - synchronously or asynchronously when the write operation - completes. - - If wbc->sync_mode is WB_SYNC_NONE, ->writepage doesn't have to - try too hard if there are problems, and may choose to write out - other pages from the mapping if that is easier (e.g. due to - internal dependencies). If it chooses not to start writeout, it - should return AOP_WRITEPAGE_ACTIVATE so that the VM will not - keep calling ->writepage on that page. - - See the file "Locking" for more details. - ``read_folio`` Called by the page cache to read a folio from the backing store. The 'file' argument supplies authentication information to network @@ -909,7 +888,7 @@ cache in your filesystem. The following members are defined: given and that many pages should be written if possible. If no ->writepages is given, then mpage_writepages is used instead. This will choose pages from the address space that are tagged as - DIRTY and will pass them to ->writepage. + DIRTY and will write them back. ``dirty_folio`` called by the VM to mark a folio as dirty. This is particularly diff --git a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst index 9174fce12c1b..4a2d3b27aa4d 100644 --- a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst +++ b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst @@ -60,8 +60,6 @@ irq_domain和一个hwirqå·ä½œä¸ºå‚数。 如果hwirqçš„æ˜ å°„è¿˜ä¸å˜åœ¨ï¼Œé‚ - irq_find_mapping()返回给定域和hwirqçš„Linux IRQå·ï¼Œå¦‚æžœæ²¡æœ‰æ˜ å°„åˆ™è¿”å›ž0。 -- irq_linear_revmap()现与irq_find_mapping()相åŒï¼Œå·²è¢«åºŸå¼ƒã€‚ - - generic_handle_domain_irq()处ç†ä¸€ä¸ªç”±åŸŸå’Œhwirqå·æè¿°çš„ä¸æ–。 请注æ„,irq域的查找必须å‘生在与RCU读临界区兼容的上下文ä¸ã€‚ @@ -83,7 +81,6 @@ irq_domainæ˜ å°„çš„ç±»åž‹ :: - irq_domain_add_linear() irq_domain_create_linear() 线性å呿˜ 射维护了一个固定大å°çš„表,该表以hwirqå·ä¸ºç´¢å¼•。 当一个hwirqè¢«æ˜ å°„ @@ -104,7 +101,6 @@ irq_domain_add_linear()å’Œirq_domain_create_linear()在功能上是ç‰ä»·çš„, :: - irq_domain_add_tree() irq_domain_create_tree() irq_domain维护ç€ä»Žhwirqå·åˆ°Linux IRQçš„radixçš„æ ‘çŠ¶æ˜ å°„ã€‚ 当一个hwirqè¢«æ˜ å°„æ—¶ï¼Œ @@ -124,7 +120,7 @@ irq_domain_add_tree()å’Œirq_domain_create_tree()在功能上是ç‰ä»·çš„ï¼Œé™¤äº :: - irq_domain_add_nomap() + irq_domain_create_nomap() 当硬件ä¸çš„hwirqå·æ˜¯å¯ç¼–程的时候,就å¯ä»¥é‡‡ç”¨æ— æ˜ å°„ç±»åž‹ã€‚ åœ¨è¿™ç§æƒ…况下,最好将 Linux IRQå·ç¼–å…¥ç¡¬ä»¶æœ¬èº«ï¼Œè¿™æ ·å°±ä¸éœ€è¦æ˜ 射了。 调用irq_create_direct_mapping() @@ -138,8 +134,6 @@ Linux IRQå·ç¼–å…¥ç¡¬ä»¶æœ¬èº«ï¼Œè¿™æ ·å°±ä¸éœ€è¦æ˜ 射了。 调用irq_create :: - irq_domain_add_simple() - irq_domain_add_legacy() irq_domain_create_simple() irq_domain_create_legacy() diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 7a1409ecc238..fee5c4731501 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -366,6 +366,12 @@ Code Seq# Include File Comments <mailto:linuxppc-dev> 0xB2 01-02 arch/powerpc/include/uapi/asm/papr-sysparm.h powerpc/pseries system parameter API <mailto:linuxppc-dev> +0xB2 03-05 arch/powerpc/include/uapi/asm/papr-indices.h powerpc/pseries indices API + <mailto:linuxppc-dev> +0xB2 06-07 arch/powerpc/include/uapi/asm/papr-platform-dump.h powerpc/pseries Platform Dump API + <mailto:linuxppc-dev> +0xB2 08 powerpc/include/uapi/asm/papr-physical-attestation.h powerpc/pseries Physical Attestation API + <mailto:linuxppc-dev> 0xB3 00 linux/mmc/ioctl.h 0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org> 0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org> diff --git a/MAINTAINERS b/MAINTAINERS index d48dd6726fe6..e45559690b28 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1097,7 +1097,7 @@ R: Carlos Bilbao <carlos.bilbao@kernel.org> L: platform-driver-x86@vger.kernel.org S: Maintained F: Documentation/arch/x86/amd_hsmp.rst -F: arch/x86/include/asm/amd_hsmp.h +F: arch/x86/include/asm/amd/hsmp.h F: arch/x86/include/uapi/asm/amd_hsmp.h F: drivers/platform/x86/amd/hsmp/ @@ -1142,7 +1142,7 @@ M: Mario Limonciello <mario.limonciello@amd.com> M: Yazen Ghannam <yazen.ghannam@amd.com> L: linux-kernel@vger.kernel.org S: Supported -F: arch/x86/include/asm/amd_node.h +F: arch/x86/include/asm/amd/node.h F: arch/x86/kernel/amd_node.c AMD PDS CORE DRIVER @@ -1682,7 +1682,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> M: Arve HjønnevÃ¥g <arve@android.com> M: Todd Kjos <tkjos@android.com> M: Martijn Coenen <maco@android.com> -M: Joel Fernandes <joel@joelfernandes.org> +M: Joel Fernandes <joelagnelf@nvidia.com> M: Christian Brauner <christian@brauner.io> M: Carlos Llamas <cmllamas@google.com> M: Suren Baghdasaryan <surenb@google.com> @@ -5983,7 +5983,9 @@ S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/a.hindborg/linux.git configfs-next F: fs/configfs/ F: include/linux/configfs.h +F: rust/kernel/configfs.rs F: samples/configfs/ +F: samples/rust/rust_configfs.rs CONGATEC BOARD CONTROLLER MFD DRIVER M: Thomas Richard <thomas.richard@bootlin.com> @@ -6261,6 +6263,7 @@ F: Documentation/staging/crc* F: arch/*/lib/crc* F: include/linux/crc* F: lib/crc* +F: lib/tests/crc_kunit.c F: scripts/gen-crc-consts.py CREATIVE SB0540 @@ -6296,6 +6299,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git F: Documentation/crypto/ F: Documentation/devicetree/bindings/crypto/ F: arch/*/crypto/ +F: arch/*/lib/crypto/ F: crypto/ F: drivers/crypto/ F: include/crypto/ @@ -11106,6 +11110,14 @@ L: linuxppc-dev@lists.ozlabs.org S: Odd Fixes F: drivers/tty/hvc/ +HUNG TASK DETECTOR +M: Andrew Morton <akpm@linux-foundation.org> +R: Lance Yang <lance.yang@linux.dev> +L: linux-kernel@vger.kernel.org +S: Maintained +F: include/linux/hung_task.h +F: kernel/hung_task.c + I2C ACPI SUPPORT M: Mika Westerberg <westeri@kernel.org> L: linux-i2c@vger.kernel.org @@ -13653,7 +13665,6 @@ M: Madhavan Srinivasan <maddy@linux.ibm.com> M: Michael Ellerman <mpe@ellerman.id.au> R: Nicholas Piggin <npiggin@gmail.com> R: Christophe Leroy <christophe.leroy@csgroup.eu> -R: Naveen N Rao <naveen@kernel.org> L: linuxppc-dev@lists.ozlabs.org S: Supported W: https://github.com/linuxppc/wiki/wiki @@ -13734,7 +13745,7 @@ M: Luc Maranget <luc.maranget@inria.fr> M: "Paul E. McKenney" <paulmck@kernel.org> R: Akira Yokosawa <akiyks@gmail.com> R: Daniel Lustig <dlustig@nvidia.com> -R: Joel Fernandes <joel@joelfernandes.org> +R: Joel Fernandes <joelagnelf@nvidia.com> L: linux-kernel@vger.kernel.org L: linux-arch@vger.kernel.org L: lkmm@lists.linux.dev @@ -14296,9 +14307,8 @@ F: drivers/gpu/drm/armada/ F: include/uapi/drm/armada_drm.h MARVELL CRYPTO DRIVER -M: Boris Brezillon <bbrezillon@kernel.org> -M: Arnaud Ebalard <arno@natisbad.org> M: Srujana Challa <schalla@marvell.com> +M: Bharat Bhushan <bbhushan2@marvell.com> L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/marvell/ @@ -15561,6 +15571,41 @@ W: http://www.linux-mm.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm F: mm/gup.c +MEMORY MANAGEMENT - KSM (Kernel Samepage Merging) +M: Andrew Morton <akpm@linux-foundation.org> +M: David Hildenbrand <david@redhat.com> +R: Xu Xin <xu.xin16@zte.com.cn> +R: Chengming Zhou <chengming.zhou@linux.dev> +L: linux-mm@kvack.org +S: Maintained +W: http://www.linux-mm.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: Documentation/admin-guide/mm/ksm.rst +F: Documentation/mm/ksm.rst +F: include/linux/ksm.h +F: include/trace/events/ksm.h +F: mm/ksm.c + +MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION +M: Andrew Morton <akpm@linux-foundation.org> +M: David Hildenbrand <david@redhat.com> +R: Zi Yan <ziy@nvidia.com> +R: Matthew Brost <matthew.brost@intel.com> +R: Joshua Hahn <joshua.hahnjy@gmail.com> +R: Rakie Kim <rakie.kim@sk.com> +R: Byungchul Park <byungchul@sk.com> +R: Gregory Price <gourry@gourry.net> +R: Ying Huang <ying.huang@linux.alibaba.com> +L: linux-mm@kvack.org +S: Maintained +W: http://www.linux-mm.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: include/linux/mempolicy.h +F: include/linux/migrate.h +F: mm/mempolicy.c +F: mm/migrate.c +F: mm/migrate_device.c + MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION M: Andrew Morton <akpm@linux-foundation.org> M: Mike Rapoport <rppt@kernel.org> @@ -15573,7 +15618,7 @@ F: mm/numa_memblks.c MEMORY MANAGEMENT - PAGE ALLOCATOR M: Andrew Morton <akpm@linux-foundation.org> -R: Vlastimil Babka <vbabka@suse.cz> +M: Vlastimil Babka <vbabka@suse.cz> R: Suren Baghdasaryan <surenb@google.com> R: Michal Hocko <mhocko@suse.com> R: Brendan Jackman <jackmanb@google.com> @@ -15581,10 +15626,25 @@ R: Johannes Weiner <hannes@cmpxchg.org> R: Zi Yan <ziy@nvidia.com> L: linux-mm@kvack.org S: Maintained +F: include/linux/compaction.h +F: include/linux/gfp.h +F: include/linux/page-isolation.h F: mm/compaction.c F: mm/page_alloc.c -F: include/linux/gfp.h -F: include/linux/compaction.h +F: mm/page_isolation.c + +MEMORY MANAGEMENT - RECLAIM +M: Andrew Morton <akpm@linux-foundation.org> +M: Johannes Weiner <hannes@cmpxchg.org> +R: David Hildenbrand <david@redhat.com> +R: Michal Hocko <mhocko@kernel.org> +R: Qi Zheng <zhengqi.arch@bytedance.com> +R: Shakeel Butt <shakeel.butt@linux.dev> +R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> +L: linux-mm@kvack.org +S: Maintained +F: mm/pt_reclaim.c +F: mm/vmscan.c MEMORY MANAGEMENT - RMAP (REVERSE MAPPING) M: Andrew Morton <akpm@linux-foundation.org> @@ -20453,14 +20513,14 @@ READ-COPY UPDATE (RCU) M: "Paul E. McKenney" <paulmck@kernel.org> M: Frederic Weisbecker <frederic@kernel.org> (kernel/rcu/tree_nocb.h) M: Neeraj Upadhyay <neeraj.upadhyay@kernel.org> (kernel/rcu/tasks.h) -M: Joel Fernandes <joel@joelfernandes.org> +M: Joel Fernandes <joelagnelf@nvidia.com> M: Josh Triplett <josh@joshtriplett.org> M: Boqun Feng <boqun.feng@gmail.com> M: Uladzislau Rezki <urezki@gmail.com> R: Steven Rostedt <rostedt@goodmis.org> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> R: Lai Jiangshan <jiangshanlai@gmail.com> -R: Zqiang <qiang.zhang1211@gmail.com> +R: Zqiang <qiang.zhang@linux.dev> L: rcu@vger.kernel.org S: Supported W: http://www.rdrop.com/users/paulmck/RCU/ @@ -20613,8 +20673,8 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml F: drivers/i2c/busses/i2c-emev2.c RENESAS ETHERNET AVB DRIVER -M: Paul Barker <paul.barker.ct@bp.renesas.com> M: Niklas Söderlund <niklas.soderlund@ragnatech.se> +R: Paul Barker <paul@pbarker.dev> L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org S: Maintained @@ -25912,7 +25972,7 @@ F: tools/testing/vsock/ VMALLOC M: Andrew Morton <akpm@linux-foundation.org> -R: Uladzislau Rezki <urezki@gmail.com> +M: Uladzislau Rezki <urezki@gmail.com> L: linux-mm@kvack.org S: Maintained W: http://www.linux-mm.org @@ -26308,7 +26368,7 @@ R: Ahmed S. Darwish <darwi@linutronix.de> L: x86-cpuid@lists.linux.dev S: Maintained W: https://x86-cpuid.org -F: tools/arch/x86/kcpuid/cpuid.csv +F: tools/arch/x86/kcpuid/ X86 ENTRY CODE M: Andy Lutomirski <luto@kernel.org> @@ -26836,6 +26896,14 @@ L: linux-kernel@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/zhaoxin.c +ZONED LOOP DEVICE +M: Damien Le Moal <dlemoal@kernel.org> +R: Christoph Hellwig <hch@lst.de> +L: linux-block@vger.kernel.org +S: Maintained +F: Documentation/admin-guide/blockdev/zoned_loop.rst +F: drivers/block/zloop.c + ZONEFS FILESYSTEM M: Damien Le Moal <dlemoal@kernel.org> M: Naohiro Aota <naohiro.aota@wdc.com> @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 15 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 1f0eb4f25c0f..a3eaab094ece 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -852,14 +852,9 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); perf_sample_data_init(&data, 0, hwc->last_period); - if (alpha_perf_event_set_period(event, hwc, idx)) { - if (perf_event_overflow(event, &data, regs)) { - /* Interrupts coming too quickly; "throttle" the - * counter, i.e., disable it for a little while. - */ - alpha_pmu_stop(event, 0); - } - } + if (alpha_perf_event_set_period(event, hwc, idx)) + perf_event_overflow(event, &data, regs); + wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index fea29d9d18d6..809edc59af25 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -170,7 +170,7 @@ init_onchip_IRQ(struct device_node *intc, struct device_node *parent) if (parent) panic("DeviceTree incore intc not a root irq controller\n"); - root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL); + root_domain = irq_domain_create_linear(of_fwnode_handle(intc), nr_cpu_irqs, &arcv2_irq_ops, NULL); if (!root_domain) panic("root irq domain not avail\n"); diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index 1d2ff1c6a61b..1b159e9e0234 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -112,8 +112,9 @@ init_onchip_IRQ(struct device_node *intc, struct device_node *parent) if (parent) panic("DeviceTree incore intc not a root irq controller\n"); - root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS, - &arc_intc_domain_ops, NULL); + root_domain = irq_domain_create_linear(of_fwnode_handle(intc), + NR_CPU_IRQS, + &arc_intc_domain_ops, NULL); if (!root_domain) panic("root irq domain not avail\n"); diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index cdd370ec9280..02b28a9324f4 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -391,7 +391,8 @@ idu_of_init(struct device_node *intc, struct device_node *parent) pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); - domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(intc), nr_irqs, + &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 6e5a651cd75c..ed6d4f0cd621 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -599,10 +599,8 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) arc_perf_event_update(event, &event->hw, event->hw.idx); perf_sample_data_init(&data, 0, hwc->last_period); - if (arc_pmu_event_set_period(event)) { - if (perf_event_overflow(event, &data, regs)) - arc_pmu_stop(event, 0); - } + if (arc_pmu_event_set_period(event)) + perf_event_overflow(event, &data, regs); active_ints &= ~BIT(idx); } while (active_ints); diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 9846f30990f7..02eda44a6faa 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -416,9 +416,9 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) writel_relaxed(~0, irqbase + SA1111_INTSTATCLR0); writel_relaxed(~0, irqbase + SA1111_INTSTATCLR1); - sachip->irqdomain = irq_domain_add_linear(NULL, SA1111_IRQ_NR, - &sa1111_irqdomain_ops, - sachip); + sachip->irqdomain = irq_domain_create_linear(NULL, SA1111_IRQ_NR, + &sa1111_irqdomain_ops, + sachip); if (!sachip->irqdomain) { irq_free_descs(sachip->irq_base, SA1111_IRQ_NR); return -ENOMEM; diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index e81a5d6c1c20..e81964cce516 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -349,7 +349,7 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_XTS=m @@ -364,7 +364,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_SHA1_ARM_NEON=m -CONFIG_CRYPTO_SHA256_ARM=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m CONFIG_CRYPTO_CHACHA20_NEON=m diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig index 275ddf7a3a14..242e7d5a3f68 100644 --- a/arch/arm/configs/milbeaut_m10v_defconfig +++ b/arch/arm/configs/milbeaut_m10v_defconfig @@ -93,15 +93,13 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_KEYS=y -CONFIG_CRYPTO_MANAGER=y -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SELFTESTS=y # CONFIG_CRYPTO_ECHAINIV is not set CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_SEQIV=m CONFIG_CRYPTO_GHASH_ARM_CE=m CONFIG_CRYPTO_SHA1_ARM_NEON=m CONFIG_CRYPTO_SHA1_ARM_CE=m -CONFIG_CRYPTO_SHA2_ARM_CE=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index ad037c175fdb..96178acedad0 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -1301,7 +1301,6 @@ CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_GHASH_ARM_CE=m CONFIG_CRYPTO_SHA1_ARM_NEON=m CONFIG_CRYPTO_SHA1_ARM_CE=m -CONFIG_CRYPTO_SHA2_ARM_CE=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 75b326bc7830..317f977e509e 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -697,7 +697,6 @@ CONFIG_SECURITY=y CONFIG_CRYPTO_MICHAEL_MIC=y CONFIG_CRYPTO_GHASH_ARM_CE=m CONFIG_CRYPTO_SHA1_ARM_NEON=m -CONFIG_CRYPTO_SHA256_ARM=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 24fca8608554..ded4b9a5accf 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -636,10 +636,9 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_TIMER_STATS=y CONFIG_SECURITY=y -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m @@ -660,7 +659,6 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_DEFLATE=y CONFIG_CRYPTO_LZO=y CONFIG_CRYPTO_SHA1_ARM=m -CONFIG_CRYPTO_SHA256_ARM=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM=m CONFIG_FONTS=y diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig index ffec59e3f49c..ac2a0f998c73 100644 --- a/arch/arm/configs/spitz_defconfig +++ b/arch/arm/configs/spitz_defconfig @@ -215,7 +215,7 @@ CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_DEBUG_KERNEL=y CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_AES=m diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 23e4ea067ddb..7efb9a8596e4 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -46,30 +46,6 @@ config CRYPTO_NHPOLY1305_NEON Architecture: arm using: - NEON (Advanced SIMD) extensions -config CRYPTO_POLY1305_ARM - tristate - select CRYPTO_HASH - select CRYPTO_ARCH_HAVE_LIB_POLY1305 - default CRYPTO_LIB_POLY1305_INTERNAL - help - Poly1305 authenticator algorithm (RFC7539) - - Architecture: arm optionally using - - NEON (Advanced SIMD) extensions - -config CRYPTO_BLAKE2S_ARM - bool "Hash functions: BLAKE2s" - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S - help - BLAKE2s cryptographic hash function (RFC 7693) - - Architecture: arm - - This is faster than the generic implementations of BLAKE2s and - BLAKE2b, but slower than the NEON implementation of BLAKE2b. - There is no NEON implementation of BLAKE2s, since NEON doesn't - really help with it. - config CRYPTO_BLAKE2B_NEON tristate "Hash functions: BLAKE2b (NEON)" depends on KERNEL_MODE_NEON @@ -117,27 +93,6 @@ config CRYPTO_SHA1_ARM_CE Architecture: arm using ARMv8 Crypto Extensions -config CRYPTO_SHA2_ARM_CE - tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)" - depends on KERNEL_MODE_NEON - select CRYPTO_SHA256_ARM - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: arm using - - ARMv8 Crypto Extensions - -config CRYPTO_SHA256_ARM - tristate "Hash functions: SHA-224 and SHA-256 (NEON)" - select CRYPTO_HASH - depends on !CPU_V7M - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: arm using - - NEON (Advanced SIMD) extensions - config CRYPTO_SHA512_ARM tristate "Hash functions: SHA-384 and SHA-512 (NEON)" select CRYPTO_HASH @@ -172,7 +127,6 @@ config CRYPTO_AES_ARM_BS select CRYPTO_AES_ARM select CRYPTO_SKCIPHER select CRYPTO_LIB_AES - select CRYPTO_SIMD help Length-preserving ciphers: AES cipher algorithms (FIPS-197) with block cipher modes: @@ -200,7 +154,6 @@ config CRYPTO_AES_ARM_CE depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_LIB_AES - select CRYPTO_SIMD help Length-preserving ciphers: AES cipher algorithms (FIPS-197) with block cipher modes: @@ -214,17 +167,5 @@ config CRYPTO_AES_ARM_CE Architecture: arm using: - ARMv8 Crypto Extensions -config CRYPTO_CHACHA20_NEON - tristate - select CRYPTO_SKCIPHER - select CRYPTO_ARCH_HAVE_LIB_CHACHA - default CRYPTO_LIB_CHACHA_INTERNAL - help - Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 - stream cipher algorithms - - Architecture: arm using: - - NEON (Advanced SIMD) extensions - endmenu diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 3d0e23ff9e74..8479137c6e80 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -7,37 +7,25 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o -obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o -obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o -obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o -obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o -obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o aes-arm-y := aes-cipher-core.o aes-cipher-glue.o aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o sha1-arm-y := sha1-armv4-large.o sha1_glue.o sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o -sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o -sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y) sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y) -libblake2s-arm-y:= blake2s-core.o blake2s-glue.o blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o -sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o -chacha-neon-y := chacha-scalar-core.o chacha-glue.o -chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o -poly1305-arm-y := poly1305-core.o poly1305-glue.o nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o curve25519-neon-y := curve25519-core.o curve25519-glue.o @@ -47,14 +35,8 @@ quiet_cmd_perl = PERL $@ $(obj)/%-core.S: $(src)/%-armv4.pl $(call cmd,perl) -clean-files += poly1305-core.S sha256-core.S sha512-core.S +clean-files += sha512-core.S aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1 -AFLAGS_sha256-core.o += $(aflags-thumb2-y) AFLAGS_sha512-core.o += $(aflags-thumb2-y) - -# massage the perlasm code a bit so we only get the NEON routine if we need it -poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5 -poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7 -AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y) diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index 1cf61f51e766..00591895d540 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -10,8 +10,6 @@ #include <asm/simd.h> #include <linux/unaligned.h> #include <crypto/aes.h> -#include <crypto/ctr.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/cpufeature.h> @@ -418,29 +416,6 @@ static int ctr_encrypt(struct skcipher_request *req) return err; } -static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) -{ - struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - unsigned long flags; - - /* - * Temporarily disable interrupts to avoid races where - * cachelines are evicted when the CPU is interrupted - * to do something else. - */ - local_irq_save(flags); - aes_encrypt(ctx, dst, src); - local_irq_restore(flags); -} - -static int ctr_encrypt_sync(struct skcipher_request *req) -{ - if (!crypto_simd_usable()) - return crypto_ctr_encrypt_walk(req, ctr_encrypt_one); - - return ctr_encrypt(req); -} - static int xts_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -586,10 +561,9 @@ static int xts_decrypt(struct skcipher_request *req) } static struct skcipher_alg aes_algs[] = { { - .base.cra_name = "__ecb(aes)", - .base.cra_driver_name = "__ecb-aes-ce", + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-ce", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, @@ -600,10 +574,9 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(aes)", - .base.cra_driver_name = "__cbc-aes-ce", + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-ce", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, @@ -615,10 +588,9 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, { - .base.cra_name = "__cts(cbc(aes))", - .base.cra_driver_name = "__cts-cbc-aes-ce", + .base.cra_name = "cts(cbc(aes))", + .base.cra_driver_name = "cts-cbc-aes-ce", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, @@ -631,10 +603,9 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = cts_cbc_encrypt, .decrypt = cts_cbc_decrypt, }, { - .base.cra_name = "__ctr(aes)", - .base.cra_driver_name = "__ctr-aes-ce", + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-ce", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), .base.cra_module = THIS_MODULE, @@ -647,25 +618,9 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = ctr_encrypt, .decrypt = ctr_encrypt, }, { - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "ctr-aes-ce-sync", - .base.cra_priority = 300 - 1, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct crypto_aes_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .chunksize = AES_BLOCK_SIZE, - .setkey = ce_aes_setkey, - .encrypt = ctr_encrypt_sync, - .decrypt = ctr_encrypt_sync, -}, { - .base.cra_name = "__xts(aes)", - .base.cra_driver_name = "__xts-aes-ce", + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "xts-aes-ce", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), .base.cra_module = THIS_MODULE, @@ -679,51 +634,14 @@ static struct skcipher_alg aes_algs[] = { { .decrypt = xts_decrypt, } }; -static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; - static void aes_exit(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++) - simd_skcipher_free(aes_simd_algs[i]); - crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } static int __init aes_init(void) { - struct simd_skcipher_alg *simd; - const char *basename; - const char *algname; - const char *drvname; - int err; - int i; - - err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); - if (err) - return err; - - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { - if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL)) - continue; - - algname = aes_algs[i].base.cra_name + 2; - drvname = aes_algs[i].base.cra_driver_name + 2; - basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); - err = PTR_ERR(simd); - if (IS_ERR(simd)) - goto unregister_simds; - - aes_simd_algs[i] = simd; - } - - return 0; - -unregister_simds: - aes_exit(); - return err; + return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } module_cpu_feature_match(AES, aes_init); diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index f6be80b5938b..c60104dc1585 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -8,8 +8,6 @@ #include <asm/neon.h> #include <asm/simd.h> #include <crypto/aes.h> -#include <crypto/ctr.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <crypto/xts.h> @@ -59,11 +57,6 @@ struct aesbs_xts_ctx { struct crypto_aes_ctx tweak_key; }; -struct aesbs_ctr_ctx { - struct aesbs_ctx key; /* must be first member */ - struct crypto_aes_ctx fallback; -}; - static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -200,25 +193,6 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) -{ - struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = aes_expandkey(&ctx->fallback, in_key, key_len); - if (err) - return err; - - ctx->key.rounds = 6 + key_len / 4; - - kernel_neon_begin(); - aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds); - kernel_neon_end(); - - return 0; -} - static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -254,21 +228,6 @@ static int ctr_encrypt(struct skcipher_request *req) return err; } -static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) -{ - struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); - - __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst); -} - -static int ctr_encrypt_sync(struct skcipher_request *req) -{ - if (!crypto_simd_usable()) - return crypto_ctr_encrypt_walk(req, ctr_encrypt_one); - - return ctr_encrypt(req); -} - static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -374,13 +333,12 @@ static int xts_decrypt(struct skcipher_request *req) } static struct skcipher_alg aes_algs[] = { { - .base.cra_name = "__ecb(aes)", - .base.cra_driver_name = "__ecb-aes-neonbs", + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -389,13 +347,12 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(aes)", - .base.cra_driver_name = "__cbc-aes-neonbs", + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -405,13 +362,12 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, { - .base.cra_name = "__ctr(aes)", - .base.cra_driver_name = "__ctr-aes-neonbs", + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aesbs_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -422,29 +378,12 @@ static struct skcipher_alg aes_algs[] = { { .encrypt = ctr_encrypt, .decrypt = ctr_encrypt, }, { - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "ctr-aes-neonbs-sync", - .base.cra_priority = 250 - 1, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .chunksize = AES_BLOCK_SIZE, - .walksize = 8 * AES_BLOCK_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = aesbs_ctr_setkey_sync, - .encrypt = ctr_encrypt_sync, - .decrypt = ctr_encrypt_sync, -}, { - .base.cra_name = "__xts(aes)", - .base.cra_driver_name = "__xts-aes-neonbs", + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "xts-aes-neonbs", .base.cra_priority = 250, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), .base.cra_module = THIS_MODULE, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -455,55 +394,18 @@ static struct skcipher_alg aes_algs[] = { { .decrypt = xts_decrypt, } }; -static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; - static void aes_exit(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++) - if (aes_simd_algs[i]) - simd_skcipher_free(aes_simd_algs[i]); - crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } static int __init aes_init(void) { - struct simd_skcipher_alg *simd; - const char *basename; - const char *algname; - const char *drvname; - int err; - int i; - if (!(elf_hwcap & HWCAP_NEON)) return -ENODEV; - err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); - if (err) - return err; - - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { - if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL)) - continue; - - algname = aes_algs[i].base.cra_name + 2; - drvname = aes_algs[i].base.cra_driver_name + 2; - basename = aes_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); - err = PTR_ERR(simd); - if (IS_ERR(simd)) - goto unregister_simds; - - aes_simd_algs[i] = simd; - } - return 0; - -unregister_simds: - aes_exit(); - return err; + return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); } -late_initcall(aes_init); +module_init(aes_init); module_exit(aes_exit); diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c index 4b59d027ba4a..2ff443a91724 100644 --- a/arch/arm/crypto/blake2b-neon-glue.c +++ b/arch/arm/crypto/blake2b-neon-glue.c @@ -7,7 +7,6 @@ #include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <linux/module.h> #include <linux/sizes.h> @@ -21,11 +20,6 @@ asmlinkage void blake2b_compress_neon(struct blake2b_state *state, static void blake2b_compress_arch(struct blake2b_state *state, const u8 *block, size_t nblocks, u32 inc) { - if (!crypto_simd_usable()) { - blake2b_compress_generic(state, block, nblocks, inc); - return; - } - do { const size_t blocks = min_t(size_t, nblocks, SZ_4K / BLAKE2B_BLOCK_SIZE); @@ -42,12 +36,14 @@ static void blake2b_compress_arch(struct blake2b_state *state, static int crypto_blake2b_update_neon(struct shash_desc *desc, const u8 *in, unsigned int inlen) { - return crypto_blake2b_update(desc, in, inlen, blake2b_compress_arch); + return crypto_blake2b_update_bo(desc, in, inlen, blake2b_compress_arch); } -static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out) +static int crypto_blake2b_finup_neon(struct shash_desc *desc, const u8 *in, + unsigned int inlen, u8 *out) { - return crypto_blake2b_final(desc, out, blake2b_compress_arch); + return crypto_blake2b_finup(desc, in, inlen, out, + blake2b_compress_arch); } #define BLAKE2B_ALG(name, driver_name, digest_size) \ @@ -55,7 +51,9 @@ static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out) .base.cra_name = name, \ .base.cra_driver_name = driver_name, \ .base.cra_priority = 200, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \ + CRYPTO_AHASH_ALG_BLOCK_ONLY | \ + CRYPTO_AHASH_ALG_FINAL_NONZERO, \ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ .base.cra_module = THIS_MODULE, \ @@ -63,8 +61,9 @@ static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out) .setkey = crypto_blake2b_setkey, \ .init = crypto_blake2b_init, \ .update = crypto_blake2b_update_neon, \ - .final = crypto_blake2b_final_neon, \ + .finup = crypto_blake2b_finup_neon, \ .descsize = sizeof(struct blake2b_state), \ + .statesize = BLAKE2B_STATE_SIZE, \ } static struct shash_alg blake2b_neon_algs[] = { diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c deleted file mode 100644 index 50e635512046..000000000000 --- a/arch/arm/crypto/chacha-glue.c +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ARM NEON accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> - * Copyright (C) 2015 Martin Willi - */ - -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/simd.h> -#include <crypto/internal/skcipher.h> -#include <linux/jump_label.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include <asm/cputype.h> -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> - -asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src, - int nrounds); -asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src, - int nrounds, unsigned int nbytes); -asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds); -asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); - -asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes, - const u32 *state, int nrounds); - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon); - -static inline bool neon_usable(void) -{ - return static_branch_likely(&use_neon) && crypto_simd_usable(); -} - -static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds) -{ - u8 buf[CHACHA_BLOCK_SIZE]; - - while (bytes > CHACHA_BLOCK_SIZE) { - unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U); - - chacha_4block_xor_neon(state, dst, src, nrounds, l); - bytes -= l; - src += l; - dst += l; - state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); - } - if (bytes) { - const u8 *s = src; - u8 *d = dst; - - if (bytes != CHACHA_BLOCK_SIZE) - s = d = memcpy(buf, src, bytes); - chacha_block_xor_neon(state, d, s, nrounds); - if (d != dst) - memcpy(dst, buf, bytes); - state[12]++; - } -} - -void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -{ - if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) { - hchacha_block_arm(state, stream, nrounds); - } else { - kernel_neon_begin(); - hchacha_block_neon(state, stream, nrounds); - kernel_neon_end(); - } -} -EXPORT_SYMBOL(hchacha_block_arch); - -void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, - int nrounds) -{ - if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() || - bytes <= CHACHA_BLOCK_SIZE) { - chacha_doarm(dst, src, bytes, state, nrounds); - state[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE); - return; - } - - do { - unsigned int todo = min_t(unsigned int, bytes, SZ_4K); - - kernel_neon_begin(); - chacha_doneon(state, dst, src, todo, nrounds); - kernel_neon_end(); - - bytes -= todo; - src += todo; - dst += todo; - } while (bytes); -} -EXPORT_SYMBOL(chacha_crypt_arch); - -static int chacha_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv, - bool neon) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - chacha_init(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) { - chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr, - nbytes, state, ctx->nrounds); - state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE); - } else { - kernel_neon_begin(); - chacha_doneon(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, ctx->nrounds); - kernel_neon_end(); - } - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static int do_chacha(struct skcipher_request *req, bool neon) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_stream_xor(req, ctx, req->iv, neon); -} - -static int chacha_arm(struct skcipher_request *req) -{ - return do_chacha(req, false); -} - -static int chacha_neon(struct skcipher_request *req) -{ - return do_chacha(req, neon_usable()); -} - -static int do_xchacha(struct skcipher_request *req, bool neon) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - chacha_init(state, ctx->key, req->iv); - - if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) { - hchacha_block_arm(state, subctx.key, ctx->nrounds); - } else { - kernel_neon_begin(); - hchacha_block_neon(state, subctx.key, ctx->nrounds); - kernel_neon_end(); - } - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); - return chacha_stream_xor(req, &subctx, real_iv, neon); -} - -static int xchacha_arm(struct skcipher_request *req) -{ - return do_xchacha(req, false); -} - -static int xchacha_neon(struct skcipher_request *req) -{ - return do_xchacha(req, neon_usable()); -} - -static struct skcipher_alg arm_algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-arm", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_arm, - .decrypt = chacha_arm, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-arm", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_arm, - .decrypt = xchacha_arm, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-arm", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_arm, - .decrypt = xchacha_arm, - }, -}; - -static struct skcipher_alg neon_algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 4 * CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_neon, - .decrypt = chacha_neon, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 4 * CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_neon, - .decrypt = xchacha_neon, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 4 * CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_neon, - .decrypt = xchacha_neon, - } -}; - -static int __init chacha_simd_mod_init(void) -{ - int err = 0; - - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) { - err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); - if (err) - return err; - } - - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) { - int i; - - switch (read_cpuid_part()) { - case ARM_CPU_PART_CORTEX_A7: - case ARM_CPU_PART_CORTEX_A5: - /* - * The Cortex-A7 and Cortex-A5 do not perform well with - * the NEON implementation but do incredibly with the - * scalar one and use less power. - */ - for (i = 0; i < ARRAY_SIZE(neon_algs); i++) - neon_algs[i].base.cra_priority = 0; - break; - default: - static_branch_enable(&use_neon); - } - - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) { - err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); - if (err) - crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); - } - } - return err; -} - -static void __exit chacha_simd_mod_fini(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) { - crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) - crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); - } -} - -module_init(chacha_simd_mod_init); -module_exit(chacha_simd_mod_fini); - -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-arm"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-arm"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-arm"); -#ifdef CONFIG_KERNEL_MODE_NEON -MODULE_ALIAS_CRYPTO("chacha20-neon"); -MODULE_ALIAS_CRYPTO("xchacha20-neon"); -MODULE_ALIAS_CRYPTO("xchacha12-neon"); -#endif diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index aabfcf522a2c..a52dcc8c1e33 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -8,22 +8,22 @@ #include <asm/hwcap.h> #include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> #include <crypto/aes.h> -#include <crypto/gcm.h> #include <crypto/b128ops.h> -#include <crypto/cryptd.h> +#include <crypto/gcm.h> +#include <crypto/gf128mul.h> +#include <crypto/ghash.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> -#include <crypto/gf128mul.h> #include <crypto/scatterwalk.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/errno.h> #include <linux/jump_label.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); @@ -32,9 +32,6 @@ MODULE_ALIAS_CRYPTO("ghash"); MODULE_ALIAS_CRYPTO("gcm(aes)"); MODULE_ALIAS_CRYPTO("rfc4106(gcm(aes))"); -#define GHASH_BLOCK_SIZE 16 -#define GHASH_DIGEST_SIZE 16 - #define RFC4106_NONCE_SIZE 4 struct ghash_key { @@ -49,10 +46,8 @@ struct gcm_key { u8 nonce[]; // for RFC4106 nonce }; -struct ghash_desc_ctx { +struct arm_ghash_desc_ctx { u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)]; - u8 buf[GHASH_BLOCK_SIZE]; - u32 count; }; asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, @@ -65,9 +60,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64); static int ghash_init(struct shash_desc *desc) { - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); - *ctx = (struct ghash_desc_ctx){}; + *ctx = (struct arm_ghash_desc_ctx){}; return 0; } @@ -85,52 +80,49 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int len) { - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); - unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; - - ctx->count += len; + struct ghash_key *key = crypto_shash_ctx(desc->tfm); + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); + int blocks; - if ((partial + len) >= GHASH_BLOCK_SIZE) { - struct ghash_key *key = crypto_shash_ctx(desc->tfm); - int blocks; + blocks = len / GHASH_BLOCK_SIZE; + ghash_do_update(blocks, ctx->digest, src, key, NULL); + return len - blocks * GHASH_BLOCK_SIZE; +} - if (partial) { - int p = GHASH_BLOCK_SIZE - partial; +static int ghash_export(struct shash_desc *desc, void *out) +{ + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); + u8 *dst = out; - memcpy(ctx->buf + partial, src, p); - src += p; - len -= p; - } + put_unaligned_be64(ctx->digest[1], dst); + put_unaligned_be64(ctx->digest[0], dst + 8); + return 0; +} - blocks = len / GHASH_BLOCK_SIZE; - len %= GHASH_BLOCK_SIZE; +static int ghash_import(struct shash_desc *desc, const void *in) +{ + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); + const u8 *src = in; - ghash_do_update(blocks, ctx->digest, src, key, - partial ? ctx->buf : NULL); - src += blocks * GHASH_BLOCK_SIZE; - partial = 0; - } - if (len) - memcpy(ctx->buf + partial, src, len); + ctx->digest[1] = get_unaligned_be64(src); + ctx->digest[0] = get_unaligned_be64(src + 8); return 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); - unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; + struct ghash_key *key = crypto_shash_ctx(desc->tfm); + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); - if (partial) { - struct ghash_key *key = crypto_shash_ctx(desc->tfm); + if (len) { + u8 buf[GHASH_BLOCK_SIZE] = {}; - memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL); + memcpy(buf, src, len); + ghash_do_update(1, ctx->digest, buf, key, NULL); + memzero_explicit(buf, sizeof(buf)); } - put_unaligned_be64(ctx->digest[1], dst); - put_unaligned_be64(ctx->digest[0], dst + 8); - - *ctx = (struct ghash_desc_ctx){}; - return 0; + return ghash_export(desc, dst); } static void ghash_reflect(u64 h[], const be128 *k) @@ -175,13 +167,17 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, - .final = ghash_final, + .finup = ghash_finup, .setkey = ghash_setkey, - .descsize = sizeof(struct ghash_desc_ctx), + .export = ghash_export, + .import = ghash_import, + .descsize = sizeof(struct arm_ghash_desc_ctx), + .statesize = sizeof(struct ghash_desc_ctx), .base.cra_name = "ghash", .base.cra_driver_name = "ghash-ce", .base.cra_priority = 300, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = GHASH_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, @@ -317,9 +313,6 @@ static int gcm_encrypt(struct aead_request *req, const u8 *iv, u32 assoclen) u8 *tag, *dst; int tail, err; - if (WARN_ON_ONCE(!may_use_simd())) - return -EBUSY; - err = skcipher_walk_aead_encrypt(&walk, req, false); kernel_neon_begin(); @@ -409,9 +402,6 @@ static int gcm_decrypt(struct aead_request *req, const u8 *iv, u32 assoclen) u8 *tag, *dst; int tail, err, ret; - if (WARN_ON_ONCE(!may_use_simd())) - return -EBUSY; - scatterwalk_map_and_copy(otag, req->src, req->assoclen + req->cryptlen - authsize, authsize, 0); diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c deleted file mode 100644 index 4464ffbf8fd1..000000000000 --- a/arch/arm/crypto/poly1305-glue.c +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM - * - * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> - */ - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <crypto/internal/simd.h> -#include <linux/cpufeature.h> -#include <linux/crypto.h> -#include <linux/jump_label.h> -#include <linux/module.h> - -void poly1305_init_arm(void *state, const u8 *key); -void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit); -void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit); -void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce); - -void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) -{ -} - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); - -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) -{ - poly1305_init_arm(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(key + 16); - dctx->s[1] = get_unaligned_le32(key + 20); - dctx->s[2] = get_unaligned_le32(key + 24); - dctx->s[3] = get_unaligned_le32(key + 28); - dctx->buflen = 0; -} -EXPORT_SYMBOL(poly1305_init_arch); - -static int arm_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static void arm_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - u32 len, u32 hibit, bool do_neon) -{ - if (unlikely(!dctx->sset)) { - if (!dctx->rset) { - poly1305_init_arm(&dctx->h, src); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (len >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - if (len < POLY1305_BLOCK_SIZE) - return; - } - - len &= ~(POLY1305_BLOCK_SIZE - 1); - - if (static_branch_likely(&have_neon) && likely(do_neon)) - poly1305_blocks_neon(&dctx->h, src, len, hibit); - else - poly1305_blocks_arm(&dctx->h, src, len, hibit); -} - -static void arm_poly1305_do_update(struct poly1305_desc_ctx *dctx, - const u8 *src, u32 len, bool do_neon) -{ - if (unlikely(dctx->buflen)) { - u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - len -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - arm_poly1305_blocks(dctx, dctx->buf, - POLY1305_BLOCK_SIZE, 1, false); - dctx->buflen = 0; - } - } - - if (likely(len >= POLY1305_BLOCK_SIZE)) { - arm_poly1305_blocks(dctx, src, len, 1, do_neon); - src += round_down(len, POLY1305_BLOCK_SIZE); - len %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(len)) { - dctx->buflen = len; - memcpy(dctx->buf, src, len); - } -} - -static int arm_poly1305_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - arm_poly1305_do_update(dctx, src, srclen, false); - return 0; -} - -static int __maybe_unused arm_poly1305_update_neon(struct shash_desc *desc, - const u8 *src, - unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - bool do_neon = crypto_simd_usable() && srclen > 128; - - if (static_branch_likely(&have_neon) && do_neon) - kernel_neon_begin(); - arm_poly1305_do_update(dctx, src, srclen, do_neon); - if (static_branch_likely(&have_neon) && do_neon) - kernel_neon_end(); - return 0; -} - -void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int nbytes) -{ - bool do_neon = IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && - crypto_simd_usable(); - - if (unlikely(dctx->buflen)) { - u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - nbytes -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks_arm(&dctx->h, dctx->buf, - POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { - unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); - - if (static_branch_likely(&have_neon) && do_neon) { - do { - unsigned int todo = min_t(unsigned int, len, SZ_4K); - - kernel_neon_begin(); - poly1305_blocks_neon(&dctx->h, src, todo, 1); - kernel_neon_end(); - - len -= todo; - src += todo; - } while (len); - } else { - poly1305_blocks_arm(&dctx->h, src, len, 1); - src += len; - } - nbytes %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(nbytes)) { - dctx->buflen = nbytes; - memcpy(dctx->buf, src, nbytes); - } -} -EXPORT_SYMBOL(poly1305_update_arch); - -void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -{ - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); - poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - - poly1305_emit_arm(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; -} -EXPORT_SYMBOL(poly1305_final_arch); - -static int arm_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_arch(dctx, dst); - return 0; -} - -static struct shash_alg arm_poly1305_algs[] = {{ - .init = arm_poly1305_init, - .update = arm_poly1305_update, - .final = arm_poly1305_final, - .digestsize = POLY1305_DIGEST_SIZE, - .descsize = sizeof(struct poly1305_desc_ctx), - - .base.cra_name = "poly1305", - .base.cra_driver_name = "poly1305-arm", - .base.cra_priority = 150, - .base.cra_blocksize = POLY1305_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -#ifdef CONFIG_KERNEL_MODE_NEON -}, { - .init = arm_poly1305_init, - .update = arm_poly1305_update_neon, - .final = arm_poly1305_final, - .digestsize = POLY1305_DIGEST_SIZE, - .descsize = sizeof(struct poly1305_desc_ctx), - - .base.cra_name = "poly1305", - .base.cra_driver_name = "poly1305-neon", - .base.cra_priority = 200, - .base.cra_blocksize = POLY1305_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -#endif -}}; - -static int __init arm_poly1305_mod_init(void) -{ - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && - (elf_hwcap & HWCAP_NEON)) - static_branch_enable(&have_neon); - else if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) - /* register only the first entry */ - return crypto_register_shash(&arm_poly1305_algs[0]); - - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? - crypto_register_shashes(arm_poly1305_algs, - ARRAY_SIZE(arm_poly1305_algs)) : 0; -} - -static void __exit arm_poly1305_mod_exit(void) -{ - if (!IS_REACHABLE(CONFIG_CRYPTO_HASH)) - return; - if (!static_branch_likely(&have_neon)) { - crypto_unregister_shash(&arm_poly1305_algs[0]); - return; - } - crypto_unregister_shashes(arm_poly1305_algs, - ARRAY_SIZE(arm_poly1305_algs)); -} - -module_init(arm_poly1305_mod_init); -module_exit(arm_poly1305_mod_exit); - -MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-arm"); -MODULE_ALIAS_CRYPTO("poly1305-neon"); diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c index de9100c67b37..fac07a4799de 100644 --- a/arch/arm/crypto/sha1-ce-glue.c +++ b/arch/arm/crypto/sha1-ce-glue.c @@ -5,20 +5,14 @@ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> */ +#include <asm/neon.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> - -#include "sha1.h" - MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); @@ -29,50 +23,36 @@ asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src, static int sha1_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) - return sha1_update_arm(desc, data, len); + int remain; kernel_neon_begin(); - sha1_base_do_update(desc, data, len, sha1_ce_transform); + remain = sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform); kernel_neon_end(); - return 0; + return remain; } static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) - return sha1_finup_arm(desc, data, len, out); - kernel_neon_begin(); - if (len) - sha1_base_do_update(desc, data, len, sha1_ce_transform); - sha1_base_do_finalize(desc, sha1_ce_transform); + sha1_base_do_finup(desc, data, len, sha1_ce_transform); kernel_neon_end(); return sha1_base_finish(desc, out); } -static int sha1_ce_final(struct shash_desc *desc, u8 *out) -{ - return sha1_ce_finup(desc, NULL, 0, out); -} - static struct shash_alg alg = { .init = sha1_base_init, .update = sha1_ce_update, - .final = sha1_ce_final, .finup = sha1_ce_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .digestsize = SHA1_DIGEST_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-ce", .cra_priority = 200, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm/crypto/sha1.h b/arch/arm/crypto/sha1.h deleted file mode 100644 index b1b7e21da2c3..000000000000 --- a/arch/arm/crypto/sha1.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_ARM_CRYPTO_SHA1_H -#define ASM_ARM_CRYPTO_SHA1_H - -#include <linux/crypto.h> -#include <crypto/sha1.h> - -extern int sha1_update_arm(struct shash_desc *desc, const u8 *data, - unsigned int len); - -extern int sha1_finup_arm(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out); - -#endif diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c index 95a727bcd664..255da00c7d98 100644 --- a/arch/arm/crypto/sha1_glue.c +++ b/arch/arm/crypto/sha1_glue.c @@ -12,53 +12,42 @@ */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/byteorder.h> - -#include "sha1.h" +#include <linux/kernel.h> +#include <linux/module.h> asmlinkage void sha1_block_data_order(struct sha1_state *digest, const u8 *data, int rounds); -int sha1_update_arm(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int sha1_update_arm(struct shash_desc *desc, const u8 *data, + unsigned int len) { /* make sure signature matches sha1_block_fn() */ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); - return sha1_base_do_update(desc, data, len, sha1_block_data_order); + return sha1_base_do_update_blocks(desc, data, len, + sha1_block_data_order); } -EXPORT_SYMBOL_GPL(sha1_update_arm); -static int sha1_final(struct shash_desc *desc, u8 *out) +static int sha1_finup_arm(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - sha1_base_do_finalize(desc, sha1_block_data_order); + sha1_base_do_finup(desc, data, len, sha1_block_data_order); return sha1_base_finish(desc, out); } -int sha1_finup_arm(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha1_base_do_update(desc, data, len, sha1_block_data_order); - return sha1_final(desc, out); -} -EXPORT_SYMBOL_GPL(sha1_finup_arm); - static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_update_arm, - .final = sha1_final, .finup = sha1_finup_arm, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-asm", .cra_priority = 150, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c index 9c70b87e69f7..d321850f22a6 100644 --- a/arch/arm/crypto/sha1_neon_glue.c +++ b/arch/arm/crypto/sha1_neon_glue.c @@ -13,18 +13,12 @@ * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com> */ +#include <asm/neon.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/neon.h> -#include <asm/simd.h> - -#include "sha1.h" +#include <linux/kernel.h> +#include <linux/module.h> asmlinkage void sha1_transform_neon(struct sha1_state *state_h, const u8 *data, int rounds); @@ -32,50 +26,37 @@ asmlinkage void sha1_transform_neon(struct sha1_state *state_h, static int sha1_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) - return sha1_update_arm(desc, data, len); + int remain; kernel_neon_begin(); - sha1_base_do_update(desc, data, len, sha1_transform_neon); + remain = sha1_base_do_update_blocks(desc, data, len, + sha1_transform_neon); kernel_neon_end(); - return 0; + return remain; } static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) - return sha1_finup_arm(desc, data, len, out); - kernel_neon_begin(); - if (len) - sha1_base_do_update(desc, data, len, sha1_transform_neon); - sha1_base_do_finalize(desc, sha1_transform_neon); + sha1_base_do_finup(desc, data, len, sha1_transform_neon); kernel_neon_end(); return sha1_base_finish(desc, out); } -static int sha1_neon_final(struct shash_desc *desc, u8 *out) -{ - return sha1_neon_finup(desc, NULL, 0, out); -} - static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_neon_update, - .final = sha1_neon_final, .finup = sha1_neon_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-neon", .cra_priority = 250, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c deleted file mode 100644 index aeac45bfbf9f..000000000000 --- a/arch/arm/crypto/sha2-ce-glue.c +++ /dev/null @@ -1,109 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions - * - * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> - */ - -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <linux/cpufeature.h> -#include <linux/crypto.h> -#include <linux/module.h> - -#include <asm/hwcap.h> -#include <asm/simd.h> -#include <asm/neon.h> -#include <linux/unaligned.h> - -#include "sha256_glue.h" - -MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); - -asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src, - int blocks); - -static int sha2_ce_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) - return crypto_sha256_arm_update(desc, data, len); - - kernel_neon_begin(); - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha2_ce_transform); - kernel_neon_end(); - - return 0; -} - -static int sha2_ce_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - if (!crypto_simd_usable()) - return crypto_sha256_arm_finup(desc, data, len, out); - - kernel_neon_begin(); - if (len) - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha2_ce_transform); - sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); - kernel_neon_end(); - - return sha256_base_finish(desc, out); -} - -static int sha2_ce_final(struct shash_desc *desc, u8 *out) -{ - return sha2_ce_finup(desc, NULL, 0, out); -} - -static struct shash_alg algs[] = { { - .init = sha224_base_init, - .update = sha2_ce_update, - .final = sha2_ce_final, - .finup = sha2_ce_finup, - .descsize = sizeof(struct sha256_state), - .digestsize = SHA224_DIGEST_SIZE, - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-ce", - .cra_priority = 300, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .init = sha256_base_init, - .update = sha2_ce_update, - .final = sha2_ce_final, - .finup = sha2_ce_finup, - .descsize = sizeof(struct sha256_state), - .digestsize = SHA256_DIGEST_SIZE, - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-ce", - .cra_priority = 300, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha2_ce_mod_init(void) -{ - return crypto_register_shashes(algs, ARRAY_SIZE(algs)); -} - -static void __exit sha2_ce_mod_fini(void) -{ - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); -} - -module_cpu_feature_match(SHA2, sha2_ce_mod_init); -module_exit(sha2_ce_mod_fini); diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c deleted file mode 100644 index f85933fdec75..000000000000 --- a/arch/arm/crypto/sha256_glue.c +++ /dev/null @@ -1,117 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Glue code for the SHA256 Secure Hash Algorithm assembly implementation - * using optimized ARM assembler and NEON instructions. - * - * Copyright © 2015 Google Inc. - * - * This file is based on sha256_ssse3_glue.c: - * Copyright (C) 2013 Intel Corporation - * Author: Tim Chen <tim.c.chen@linux.intel.com> - */ - -#include <crypto/internal/hash.h> -#include <linux/crypto.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <linux/string.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <asm/simd.h> -#include <asm/neon.h> - -#include "sha256_glue.h" - -asmlinkage void sha256_block_data_order(struct sha256_state *state, - const u8 *data, int num_blks); - -int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - /* make sure casting to sha256_block_fn() is safe */ - BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); - - return sha256_base_do_update(desc, data, len, sha256_block_data_order); -} -EXPORT_SYMBOL(crypto_sha256_arm_update); - -static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out) -{ - sha256_base_do_finalize(desc, sha256_block_data_order); - return sha256_base_finish(desc, out); -} - -int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha256_base_do_update(desc, data, len, sha256_block_data_order); - return crypto_sha256_arm_final(desc, out); -} -EXPORT_SYMBOL(crypto_sha256_arm_finup); - -static struct shash_alg algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = crypto_sha256_arm_update, - .final = crypto_sha256_arm_final, - .finup = crypto_sha256_arm_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-asm", - .cra_priority = 150, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = crypto_sha256_arm_update, - .final = crypto_sha256_arm_final, - .finup = crypto_sha256_arm_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-asm", - .cra_priority = 150, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha256_mod_init(void) -{ - int res = crypto_register_shashes(algs, ARRAY_SIZE(algs)); - - if (res < 0) - return res; - - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) { - res = crypto_register_shashes(sha256_neon_algs, - ARRAY_SIZE(sha256_neon_algs)); - - if (res < 0) - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); - } - - return res; -} - -static void __exit sha256_mod_fini(void) -{ - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); - - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) - crypto_unregister_shashes(sha256_neon_algs, - ARRAY_SIZE(sha256_neon_algs)); -} - -module_init(sha256_mod_init); -module_exit(sha256_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON"); - -MODULE_ALIAS_CRYPTO("sha256"); diff --git a/arch/arm/crypto/sha256_glue.h b/arch/arm/crypto/sha256_glue.h deleted file mode 100644 index 9f0d578bab5f..000000000000 --- a/arch/arm/crypto/sha256_glue.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _CRYPTO_SHA256_GLUE_H -#define _CRYPTO_SHA256_GLUE_H - -#include <linux/crypto.h> - -extern struct shash_alg sha256_neon_algs[2]; - -int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, - unsigned int len); - -int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash); - -#endif /* _CRYPTO_SHA256_GLUE_H */ diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c deleted file mode 100644 index ccdcfff71910..000000000000 --- a/arch/arm/crypto/sha256_neon_glue.c +++ /dev/null @@ -1,92 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Glue code for the SHA256 Secure Hash Algorithm assembly implementation - * using NEON instructions. - * - * Copyright © 2015 Google Inc. - * - * This file is based on sha512_neon_glue.c: - * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> - */ - -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/types.h> -#include <linux/string.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <asm/byteorder.h> -#include <asm/simd.h> -#include <asm/neon.h> - -#include "sha256_glue.h" - -asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, - const u8 *data, int num_blks); - -static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) - return crypto_sha256_arm_update(desc, data, len); - - kernel_neon_begin(); - sha256_base_do_update(desc, data, len, sha256_block_data_order_neon); - kernel_neon_end(); - - return 0; -} - -static int crypto_sha256_neon_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - if (!crypto_simd_usable()) - return crypto_sha256_arm_finup(desc, data, len, out); - - kernel_neon_begin(); - if (len) - sha256_base_do_update(desc, data, len, - sha256_block_data_order_neon); - sha256_base_do_finalize(desc, sha256_block_data_order_neon); - kernel_neon_end(); - - return sha256_base_finish(desc, out); -} - -static int crypto_sha256_neon_final(struct shash_desc *desc, u8 *out) -{ - return crypto_sha256_neon_finup(desc, NULL, 0, out); -} - -struct shash_alg sha256_neon_algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = crypto_sha256_neon_update, - .final = crypto_sha256_neon_final, - .finup = crypto_sha256_neon_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-neon", - .cra_priority = 250, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = crypto_sha256_neon_update, - .final = crypto_sha256_neon_final, - .finup = crypto_sha256_neon_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-neon", - .cra_priority = 250, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c index 1be5bd498af3..f8a6480889b1 100644 --- a/arch/arm/crypto/sha512-glue.c +++ b/arch/arm/crypto/sha512-glue.c @@ -5,15 +5,14 @@ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> */ +#include <asm/hwcap.h> +#include <asm/neon.h> #include <crypto/internal/hash.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/hwcap.h> -#include <asm/neon.h> - #include "sha512.h" MODULE_DESCRIPTION("Accelerated SHA-384/SHA-512 secure hash for ARM"); @@ -28,50 +27,47 @@ MODULE_ALIAS_CRYPTO("sha512-arm"); asmlinkage void sha512_block_data_order(struct sha512_state *state, u8 const *src, int blocks); -int sha512_arm_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int sha512_arm_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - return sha512_base_do_update(desc, data, len, sha512_block_data_order); + return sha512_base_do_update_blocks(desc, data, len, + sha512_block_data_order); } -static int sha512_arm_final(struct shash_desc *desc, u8 *out) +static int sha512_arm_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - sha512_base_do_finalize(desc, sha512_block_data_order); + sha512_base_do_finup(desc, data, len, sha512_block_data_order); return sha512_base_finish(desc, out); } -int sha512_arm_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha512_base_do_update(desc, data, len, sha512_block_data_order); - return sha512_arm_final(desc, out); -} - static struct shash_alg sha512_arm_algs[] = { { .init = sha384_base_init, .update = sha512_arm_update, - .final = sha512_arm_final, .finup = sha512_arm_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA384_DIGEST_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-arm", .cra_priority = 250, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .init = sha512_base_init, .update = sha512_arm_update, - .final = sha512_arm_final, .finup = sha512_arm_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA512_DIGEST_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-arm", .cra_priority = 250, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c index c6e58fe475ac..bd528077fefb 100644 --- a/arch/arm/crypto/sha512-neon-glue.c +++ b/arch/arm/crypto/sha512-neon-glue.c @@ -5,16 +5,13 @@ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> */ +#include <asm/neon.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/simd.h> -#include <asm/neon.h> - #include "sha512.h" MODULE_ALIAS_CRYPTO("sha384-neon"); @@ -26,51 +23,36 @@ asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, static int sha512_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha512_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) - return sha512_arm_update(desc, data, len); + int remain; kernel_neon_begin(); - sha512_base_do_update(desc, data, len, sha512_block_data_order_neon); + remain = sha512_base_do_update_blocks(desc, data, len, + sha512_block_data_order_neon); kernel_neon_end(); - - return 0; + return remain; } static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) - return sha512_arm_finup(desc, data, len, out); - kernel_neon_begin(); - if (len) - sha512_base_do_update(desc, data, len, - sha512_block_data_order_neon); - sha512_base_do_finalize(desc, sha512_block_data_order_neon); + sha512_base_do_finup(desc, data, len, sha512_block_data_order_neon); kernel_neon_end(); - return sha512_base_finish(desc, out); } -static int sha512_neon_final(struct shash_desc *desc, u8 *out) -{ - return sha512_neon_finup(desc, NULL, 0, out); -} - struct shash_alg sha512_neon_algs[] = { { .init = sha384_base_init, .update = sha512_neon_update, - .final = sha512_neon_final, .finup = sha512_neon_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA384_DIGEST_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-neon", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, @@ -78,14 +60,15 @@ struct shash_alg sha512_neon_algs[] = { { }, { .init = sha512_base_init, .update = sha512_neon_update, - .final = sha512_neon_final, .finup = sha512_neon_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA512_DIGEST_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-neon", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm/crypto/sha512.h b/arch/arm/crypto/sha512.h index e14572be76d1..eeaee52cda69 100644 --- a/arch/arm/crypto/sha512.h +++ b/arch/arm/crypto/sha512.h @@ -1,9 +1,3 @@ /* SPDX-License-Identifier: GPL-2.0 */ -int sha512_arm_update(struct shash_desc *desc, const u8 *data, - unsigned int len); - -int sha512_arm_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out); - extern struct shash_alg sha512_neon_algs[2]; diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h index 82191dbd7e78..d37559762180 100644 --- a/arch/arm/include/asm/simd.h +++ b/arch/arm/include/asm/simd.h @@ -1,8 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SIMD_H +#define _ASM_SIMD_H -#include <linux/hardirq.h> +#include <linux/compiler_attributes.h> +#include <linux/preempt.h> +#include <linux/types.h> static __must_check inline bool may_use_simd(void) { return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq(); } + +#endif /* _ASM_SIMD_H */ diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 007874320937..91ea0e29107a 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -5,6 +5,8 @@ # Copyright (C) 1995-2000 Russell King # +obj-y += crypto/ + lib-y := changebit.o csumipv6.o csumpartial.o \ csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ delay.o delay-loop.o findbit.o memchr.o memcpy.o \ @@ -47,7 +49,7 @@ endif obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o obj-$(CONFIG_CRC32_ARCH) += crc32-arm.o -crc32-arm-y := crc32-glue.o crc32-core.o +crc32-arm-y := crc32.o crc32-core.o obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm.o -crc-t10dif-arm-y := crc-t10dif-glue.o crc-t10dif-core.o +crc-t10dif-arm-y := crc-t10dif.o crc-t10dif-core.o diff --git a/arch/arm/lib/crc-t10dif-glue.c b/arch/arm/lib/crc-t10dif.c index 6efad3d78284..1093f8ec13b0 100644 --- a/arch/arm/lib/crc-t10dif-glue.c +++ b/arch/arm/lib/crc-t10dif.c @@ -16,8 +16,8 @@ #include <asm/neon.h> #include <asm/simd.h> -static DEFINE_STATIC_KEY_FALSE(have_neon); -static DEFINE_STATIC_KEY_FALSE(have_pmull); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull); #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U @@ -60,7 +60,7 @@ static int __init crc_t10dif_arm_init(void) } return 0; } -arch_initcall(crc_t10dif_arm_init); +subsys_initcall(crc_t10dif_arm_init); static void __exit crc_t10dif_arm_exit(void) { diff --git a/arch/arm/lib/crc32-glue.c b/arch/arm/lib/crc32.c index 4340351dbde8..f2bef8849c7c 100644 --- a/arch/arm/lib/crc32-glue.c +++ b/arch/arm/lib/crc32.c @@ -18,8 +18,8 @@ #include <asm/neon.h> #include <asm/simd.h> -static DEFINE_STATIC_KEY_FALSE(have_crc32); -static DEFINE_STATIC_KEY_FALSE(have_pmull); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull); #define PMULL_MIN_LEN 64 /* min size of buffer for pmull functions */ @@ -103,7 +103,7 @@ static int __init crc32_arm_init(void) static_branch_enable(&have_pmull); return 0; } -arch_initcall(crc32_arm_init); +subsys_initcall(crc32_arm_init); static void __exit crc32_arm_exit(void) { diff --git a/arch/arm/lib/crypto/.gitignore b/arch/arm/lib/crypto/.gitignore new file mode 100644 index 000000000000..12d74d8b03d0 --- /dev/null +++ b/arch/arm/lib/crypto/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +poly1305-core.S +sha256-core.S diff --git a/arch/arm/lib/crypto/Kconfig b/arch/arm/lib/crypto/Kconfig new file mode 100644 index 000000000000..d1ad664f0c67 --- /dev/null +++ b/arch/arm/lib/crypto/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_BLAKE2S_ARM + bool "Hash functions: BLAKE2s" + select CRYPTO_ARCH_HAVE_LIB_BLAKE2S + help + BLAKE2s cryptographic hash function (RFC 7693) + + Architecture: arm + + This is faster than the generic implementations of BLAKE2s and + BLAKE2b, but slower than the NEON implementation of BLAKE2b. + There is no NEON implementation of BLAKE2s, since NEON doesn't + really help with it. + +config CRYPTO_CHACHA20_NEON + tristate + default CRYPTO_LIB_CHACHA + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_POLY1305_ARM + tristate + default CRYPTO_LIB_POLY1305 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 + +config CRYPTO_SHA256_ARM + tristate + depends on !CPU_V7M + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD diff --git a/arch/arm/lib/crypto/Makefile b/arch/arm/lib/crypto/Makefile new file mode 100644 index 000000000000..431f77c3ff6f --- /dev/null +++ b/arch/arm/lib/crypto/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o +libblake2s-arm-y := blake2s-core.o blake2s-glue.o + +obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o +chacha-neon-y := chacha-scalar-core.o chacha-glue.o +chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o + +obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o +poly1305-arm-y := poly1305-core.o poly1305-glue.o + +obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o +sha256-arm-y := sha256.o sha256-core.o +sha256-arm-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o + +quiet_cmd_perl = PERL $@ + cmd_perl = $(PERL) $(<) > $(@) + +$(obj)/%-core.S: $(src)/%-armv4.pl + $(call cmd,perl) + +clean-files += poly1305-core.S sha256-core.S + +aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1 + +# massage the perlasm code a bit so we only get the NEON routine if we need it +poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5 +poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7 +AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y) + +AFLAGS_sha256-core.o += $(aflags-thumb2-y) diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/lib/crypto/blake2s-core.S index df40e46601f1..df40e46601f1 100644 --- a/arch/arm/crypto/blake2s-core.S +++ b/arch/arm/lib/crypto/blake2s-core.S diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/lib/crypto/blake2s-glue.c index 0238a70d9581..0238a70d9581 100644 --- a/arch/arm/crypto/blake2s-glue.c +++ b/arch/arm/lib/crypto/blake2s-glue.c diff --git a/arch/arm/lib/crypto/chacha-glue.c b/arch/arm/lib/crypto/chacha-glue.c new file mode 100644 index 000000000000..88ec96415283 --- /dev/null +++ b/arch/arm/lib/crypto/chacha-glue.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ChaCha and HChaCha functions (ARM optimized) + * + * Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2015 Martin Willi + */ + +#include <crypto/chacha.h> +#include <crypto/internal/simd.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <asm/cputype.h> +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <asm/simd.h> + +asmlinkage void chacha_block_xor_neon(const struct chacha_state *state, + u8 *dst, const u8 *src, int nrounds); +asmlinkage void chacha_4block_xor_neon(const struct chacha_state *state, + u8 *dst, const u8 *src, + int nrounds, unsigned int nbytes); +asmlinkage void hchacha_block_arm(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); +asmlinkage void hchacha_block_neon(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); + +asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes, + const struct chacha_state *state, int nrounds); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon); + +static inline bool neon_usable(void) +{ + return static_branch_likely(&use_neon) && crypto_simd_usable(); +} + +static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + u8 buf[CHACHA_BLOCK_SIZE]; + + while (bytes > CHACHA_BLOCK_SIZE) { + unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U); + + chacha_4block_xor_neon(state, dst, src, nrounds, l); + bytes -= l; + src += l; + dst += l; + state->x[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); + } + if (bytes) { + const u8 *s = src; + u8 *d = dst; + + if (bytes != CHACHA_BLOCK_SIZE) + s = d = memcpy(buf, src, bytes); + chacha_block_xor_neon(state, d, s, nrounds); + if (d != dst) + memcpy(dst, buf, bytes); + state->x[12]++; + } +} + +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) +{ + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) { + hchacha_block_arm(state, out, nrounds); + } else { + kernel_neon_begin(); + hchacha_block_neon(state, out, nrounds); + kernel_neon_end(); + } +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() || + bytes <= CHACHA_BLOCK_SIZE) { + chacha_doarm(dst, src, bytes, state, nrounds); + state->x[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE); + return; + } + + do { + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); + + kernel_neon_begin(); + chacha_doneon(state, dst, src, todo, nrounds); + kernel_neon_end(); + + bytes -= todo; + src += todo; + dst += todo; + } while (bytes); +} +EXPORT_SYMBOL(chacha_crypt_arch); + +bool chacha_is_arch_optimized(void) +{ + /* We always can use at least the ARM scalar implementation. */ + return true; +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +static int __init chacha_arm_mod_init(void) +{ + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) { + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A7: + case ARM_CPU_PART_CORTEX_A5: + /* + * The Cortex-A7 and Cortex-A5 do not perform well with + * the NEON implementation but do incredibly with the + * scalar one and use less power. + */ + break; + default: + static_branch_enable(&use_neon); + } + } + return 0; +} +subsys_initcall(chacha_arm_mod_init); + +static void __exit chacha_arm_mod_exit(void) +{ +} +module_exit(chacha_arm_mod_exit); + +MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM optimized)"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/crypto/chacha-neon-core.S b/arch/arm/lib/crypto/chacha-neon-core.S index 13d12f672656..ddd62b6294a5 100644 --- a/arch/arm/crypto/chacha-neon-core.S +++ b/arch/arm/lib/crypto/chacha-neon-core.S @@ -1,5 +1,5 @@ /* - * ChaCha/XChaCha NEON helper functions + * ChaCha/HChaCha NEON helper functions * * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org> * diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/lib/crypto/chacha-scalar-core.S index 083fe1ab96d0..4951df05c158 100644 --- a/arch/arm/crypto/chacha-scalar-core.S +++ b/arch/arm/lib/crypto/chacha-scalar-core.S @@ -367,7 +367,7 @@ /* * void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes, - * const u32 *state, int nrounds); + * const struct chacha_state *state, int nrounds); */ ENTRY(chacha_doarm) cmp r2, #0 // len == 0? @@ -407,7 +407,8 @@ ENTRY(chacha_doarm) ENDPROC(chacha_doarm) /* - * void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds); + * void hchacha_block_arm(const struct chacha_state *state, + * u32 out[HCHACHA_OUT_WORDS], int nrounds); */ ENTRY(hchacha_block_arm) push {r1,r4-r11,lr} diff --git a/arch/arm/crypto/poly1305-armv4.pl b/arch/arm/lib/crypto/poly1305-armv4.pl index 6d79498d3115..d57c6e2fc84a 100644 --- a/arch/arm/crypto/poly1305-armv4.pl +++ b/arch/arm/lib/crypto/poly1305-armv4.pl @@ -43,9 +43,9 @@ $code.=<<___; #else # define __ARM_ARCH__ __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ -# define poly1305_init poly1305_init_arm +# define poly1305_init poly1305_block_init_arch # define poly1305_blocks poly1305_blocks_arm -# define poly1305_emit poly1305_emit_arm +# define poly1305_emit poly1305_emit_arch .globl poly1305_blocks_neon #endif diff --git a/arch/arm/lib/crypto/poly1305-glue.c b/arch/arm/lib/crypto/poly1305-glue.c new file mode 100644 index 000000000000..2603b0771f2c --- /dev/null +++ b/arch/arm/lib/crypto/poly1305-glue.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM + * + * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> + */ + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <crypto/internal/poly1305.h> +#include <linux/cpufeature.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/unaligned.h> + +asmlinkage void poly1305_block_init_arch( + struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +EXPORT_SYMBOL_GPL(poly1305_block_init_arch); +asmlinkage void poly1305_blocks_arm(struct poly1305_block_state *state, + const u8 *src, u32 len, u32 hibit); +asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state, + const u8 *src, u32 len, u32 hibit); +asmlinkage void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]); +EXPORT_SYMBOL_GPL(poly1305_emit_arch); + +void __weak poly1305_blocks_neon(struct poly1305_block_state *state, + const u8 *src, u32 len, u32 hibit) +{ +} + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); + +void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src, + unsigned int len, u32 padbit) +{ + len = round_down(len, POLY1305_BLOCK_SIZE); + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + static_branch_likely(&have_neon)) { + do { + unsigned int todo = min_t(unsigned int, len, SZ_4K); + + kernel_neon_begin(); + poly1305_blocks_neon(state, src, todo, padbit); + kernel_neon_end(); + + len -= todo; + src += todo; + } while (len); + } else + poly1305_blocks_arm(state, src, len, padbit); +} +EXPORT_SYMBOL_GPL(poly1305_blocks_arch); + +bool poly1305_is_arch_optimized(void) +{ + /* We always can use at least the ARM scalar implementation. */ + return true; +} +EXPORT_SYMBOL(poly1305_is_arch_optimized); + +static int __init arm_poly1305_mod_init(void) +{ + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + (elf_hwcap & HWCAP_NEON)) + static_branch_enable(&have_neon); + return 0; +} +subsys_initcall(arm_poly1305_mod_init); + +static void __exit arm_poly1305_mod_exit(void) +{ +} +module_exit(arm_poly1305_mod_exit); + +MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/lib/crypto/sha256-armv4.pl index f3a2b54efd4e..8122db7fd599 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/lib/crypto/sha256-armv4.pl @@ -204,18 +204,18 @@ K256: .word 0 @ terminator #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) .LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha256_block_data_order +.word OPENSSL_armcap_P-sha256_blocks_arch #endif .align 5 -.global sha256_block_data_order -.type sha256_block_data_order,%function -sha256_block_data_order: -.Lsha256_block_data_order: +.global sha256_blocks_arch +.type sha256_blocks_arch,%function +sha256_blocks_arch: +.Lsha256_blocks_arch: #if __ARM_ARCH__<7 - sub r3,pc,#8 @ sha256_block_data_order + sub r3,pc,#8 @ sha256_blocks_arch #else - adr r3,.Lsha256_block_data_order + adr r3,.Lsha256_blocks_arch #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap @@ -282,7 +282,7 @@ $code.=<<___; moveq pc,lr @ be binary compatible with V4, yet bx lr @ interoperable with Thumb ISA:-) #endif -.size sha256_block_data_order,.-sha256_block_data_order +.size sha256_blocks_arch,.-sha256_blocks_arch ___ ###################################################################### # NEON stuff @@ -470,8 +470,8 @@ sha256_block_data_order_neon: stmdb sp!,{r4-r12,lr} sub $H,sp,#16*4+16 - adr $Ktbl,.Lsha256_block_data_order - sub $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256 + adr $Ktbl,.Lsha256_blocks_arch + sub $Ktbl,$Ktbl,#.Lsha256_blocks_arch-K256 bic $H,$H,#15 @ align for 128-bit stores mov $t2,sp mov sp,$H @ alloca diff --git a/arch/arm/crypto/sha2-ce-core.S b/arch/arm/lib/crypto/sha256-ce.S index b6369d2440a1..ac2c9b01b22d 100644 --- a/arch/arm/crypto/sha2-ce-core.S +++ b/arch/arm/lib/crypto/sha256-ce.S @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * sha2-ce-core.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions + * sha256-ce.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions * * Copyright (C) 2015 Linaro Ltd. * Author: Ard Biesheuvel <ard.biesheuvel@linaro.org> @@ -67,10 +67,10 @@ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* - * void sha2_ce_transform(struct sha256_state *sst, u8 const *src, - int blocks); + * void sha256_ce_transform(u32 state[SHA256_STATE_WORDS], + * const u8 *data, size_t nblocks); */ -ENTRY(sha2_ce_transform) +ENTRY(sha256_ce_transform) /* load state */ vld1.32 {dga-dgb}, [r0] @@ -120,4 +120,4 @@ ENTRY(sha2_ce_transform) /* store new state */ vst1.32 {dga-dgb}, [r0] bx lr -ENDPROC(sha2_ce_transform) +ENDPROC(sha256_ce_transform) diff --git a/arch/arm/lib/crypto/sha256.c b/arch/arm/lib/crypto/sha256.c new file mode 100644 index 000000000000..109192e54b0f --- /dev/null +++ b/arch/arm/lib/crypto/sha256.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256 optimized for ARM + * + * Copyright 2025 Google LLC + */ +#include <asm/neon.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +EXPORT_SYMBOL_GPL(sha256_blocks_arch); +asmlinkage void sha256_block_data_order_neon(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce); + +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + static_branch_likely(&have_neon)) { + kernel_neon_begin(); + if (static_branch_likely(&have_ce)) + sha256_ce_transform(state, data, nblocks); + else + sha256_block_data_order_neon(state, data, nblocks); + kernel_neon_end(); + } else { + sha256_blocks_arch(state, data, nblocks); + } +} +EXPORT_SYMBOL_GPL(sha256_blocks_simd); + +bool sha256_is_arch_optimized(void) +{ + /* We always can use at least the ARM scalar implementation. */ + return true; +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +static int __init sha256_arm_mod_init(void) +{ + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) { + static_branch_enable(&have_neon); + if (elf_hwcap2 & HWCAP2_SHA2) + static_branch_enable(&have_ce); + } + return 0; +} +subsys_initcall(sha256_arm_mod_init); + +static void __exit sha256_arm_mod_exit(void) +{ +} +module_exit(sha256_arm_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 optimized for ARM"); diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index cac4e82f6c82..150a1e56dcae 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -209,9 +209,8 @@ static int __init exynos_pmu_irq_init(struct device_node *node, return -ENOMEM; } - domain = irq_domain_add_hierarchy(parent_domain, 0, 0, - node, &exynos_pmu_domain_ops, - NULL); + domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node), + &exynos_pmu_domain_ops, NULL); if (!domain) { iounmap(pmu_base_addr); pmu_base_addr = NULL; diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c index cf6546ddc7a3..3067c06b4b8e 100644 --- a/arch/arm/mach-imx/avic.c +++ b/arch/arm/mach-imx/avic.c @@ -201,8 +201,8 @@ static void __init mxc_init_irq(void __iomem *irqbase) WARN_ON(irq_base < 0); np = of_find_compatible_node(NULL, NULL, "fsl,avic"); - domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0, - &irq_domain_simple_ops, NULL); + domain = irq_domain_create_legacy(of_fwnode_handle(np), AVIC_NUM_IRQS, irq_base, 0, + &irq_domain_simple_ops, NULL); WARN_ON(!domain); for (i = 0; i < AVIC_NUM_IRQS / 32; i++, irq_base += 32) diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 5909088d5482..2e633569d2f8 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -245,9 +245,8 @@ static int __init imx_gpc_init(struct device_node *node, if (WARN_ON(!gpc_base)) return -ENOMEM; - domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, - node, &imx_gpc_domain_ops, - NULL); + domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS, of_fwnode_handle(node), + &imx_gpc_domain_ops, NULL); if (!domain) { iounmap(gpc_base); return -ENOMEM; diff --git a/arch/arm/mach-imx/tzic.c b/arch/arm/mach-imx/tzic.c index 8b3d98d288d9..50a5668e65d2 100644 --- a/arch/arm/mach-imx/tzic.c +++ b/arch/arm/mach-imx/tzic.c @@ -175,8 +175,8 @@ static int __init tzic_init_dt(struct device_node *np, struct device_node *p) irq_base = irq_alloc_descs(-1, 0, TZIC_NUM_IRQS, numa_node_id()); WARN_ON(irq_base < 0); - domain = irq_domain_add_legacy(np, TZIC_NUM_IRQS, irq_base, 0, - &irq_domain_simple_ops, NULL); + domain = irq_domain_create_legacy(of_fwnode_handle(np), TZIC_NUM_IRQS, irq_base, 0, + &irq_domain_simple_ops, NULL); WARN_ON(!domain); for (i = 0; i < 4; i++, irq_base += 32) diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c index 9b587ecebb1c..bb1bc060ecd8 100644 --- a/arch/arm/mach-omap1/irq.c +++ b/arch/arm/mach-omap1/irq.c @@ -220,8 +220,7 @@ void __init omap1_init_irq(void) omap_l2_irq = irq_base; omap_l2_irq -= NR_IRQS_LEGACY; - domain = irq_domain_add_legacy(NULL, nr_irqs, irq_base, 0, - &irq_domain_simple_ops, NULL); + domain = irq_domain_create_legacy(NULL, nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL); pr_info("Total of %lu interrupts in %i interrupt banks\n", nr_irqs, irq_bank_count); diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 6f0d6120c174..a66b1dc61571 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -585,9 +585,8 @@ static int __init wakeupgen_init(struct device_node *node, wakeupgen_ops = &am43xx_wakeupgen_ops; } - domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs, - node, &wakeupgen_domain_ops, - NULL); + domain = irq_domain_create_hierarchy(parent_domain, 0, max_irqs, of_fwnode_handle(node), + &wakeupgen_domain_ops, NULL); if (!domain) { iounmap(wakeupgen_base); return -ENOMEM; diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index d9cadd97748a..5bfce8aa4102 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -147,9 +147,8 @@ pxa_init_irq_common(struct device_node *node, int irq_nr, int n; pxa_internal_irq_nr = irq_nr; - pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, - PXA_IRQ(0), 0, - &pxa_irq_ops, NULL); + pxa_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), irq_nr, PXA_IRQ(0), 0, + &pxa_irq_ops, NULL); if (!pxa_irq_domain) panic("Unable to add PXA IRQ domain\n"); irq_set_default_domain(pxa_irq_domain); diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 595e9cb33c1d..326616fbdc44 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -496,11 +496,10 @@ static void orion_gpio_unmask_irq(struct irq_data *d) u32 reg_val; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); reg_val = irq_reg_readl(gc, ct->regs.mask); reg_val |= mask; irq_reg_writel(gc, reg_val, ct->regs.mask); - irq_gc_unlock(gc); } static void orion_gpio_mask_irq(struct irq_data *d) @@ -510,11 +509,10 @@ static void orion_gpio_mask_irq(struct irq_data *d) u32 mask = d->mask; u32 reg_val; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); reg_val = irq_reg_readl(gc, ct->regs.mask); reg_val &= ~mask; irq_reg_writel(gc, reg_val, ct->regs.mask); - irq_gc_unlock(gc); } void __init orion_gpio_init(int gpio_base, int ngpio, @@ -602,12 +600,12 @@ void __init orion_gpio_init(int gpio_base, int ngpio, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); /* Setup irq domain on top of the generic chip. */ - ochip->domain = irq_domain_add_legacy(NULL, - ochip->chip.ngpio, - ochip->secondary_irq_base, - ochip->secondary_irq_base, - &irq_domain_simple_ops, - ochip); + ochip->domain = irq_domain_create_legacy(NULL, + ochip->chip.ngpio, + ochip->secondary_irq_base, + ochip->secondary_irq_base, + &irq_domain_simple_ops, + ochip); if (!ochip->domain) panic("%s: couldn't allocate irq domain (DT).\n", ochip->chip.label); diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts index 13a0e63afeaf..2c64d834a2c4 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts @@ -152,28 +152,12 @@ vcc-pg-supply = <®_aldo1>; }; -&r_ir { - linux,rc-map-name = "rc-beelink-gs1"; - status = "okay"; -}; - -&r_pio { - /* - * FIXME: We can't add that supply for now since it would - * create a circular dependency between pinctrl, the regulator - * and the RSB Bus. - * - * vcc-pl-supply = <®_aldo1>; - */ - vcc-pm-supply = <®_aldo1>; -}; - -&r_rsb { +&r_i2c { status = "okay"; - axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; @@ -291,6 +275,22 @@ }; }; +&r_ir { + linux,rc-map-name = "rc-beelink-gs1"; + status = "okay"; +}; + +&r_pio { + /* + * PL0 and PL1 are used for PMIC I2C + * don't enable the pl-supply else + * it will fail at boot + * + * vcc-pl-supply = <®_aldo1>; + */ + vcc-pm-supply = <®_aldo1>; +}; + &spdif { pinctrl-names = "default"; pinctrl-0 = <&spdif_tx_pin>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts index ab87c3447cd7..f005072c68a1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts @@ -176,16 +176,12 @@ vcc-pg-supply = <®_vcc_wifi_io>; }; -&r_ir { - status = "okay"; -}; - -&r_rsb { +&r_i2c { status = "okay"; - axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; @@ -296,6 +292,10 @@ }; }; +&r_ir { + status = "okay"; +}; + &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi index d05dc5d6e6b9..e34dbb992021 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi @@ -113,20 +113,12 @@ vcc-pg-supply = <®_aldo1>; }; -&r_ir { - status = "okay"; -}; - -&r_pio { - vcc-pm-supply = <®_bldo3>; -}; - -&r_rsb { +&r_i2c { status = "okay"; - axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; @@ -241,6 +233,14 @@ }; }; +&r_ir { + status = "okay"; +}; + +&r_pio { + vcc-pm-supply = <®_bldo3>; +}; + &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi index 3a9b6907185d..242820845707 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi @@ -26,6 +26,8 @@ leds { compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&spi_quad_pins>; led-power1 { label = "udpu:green:power"; @@ -82,8 +84,6 @@ &spi0 { status = "okay"; - pinctrl-names = "default"; - pinctrl-0 = <&spi_quad_pins>; flash@0 { compatible = "jedec,spi-nor"; @@ -108,6 +108,10 @@ }; }; +&spi_quad_pins { + function = "gpio"; +}; + &pinctrl_nb { i2c2_recovery_pins: i2c2-recovery-pins { groups = "i2c2"; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 5bb8f09422a2..370ad70b4be8 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1729,15 +1729,14 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_SECURITY=y CONFIG_CRYPTO_USER=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y CONFIG_CRYPTO_SHA512_ARM64_CE=m CONFIG_CRYPTO_SHA3_ARM64=m CONFIG_CRYPTO_SM3_ARM64_CE=m diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 3418c8d3c78d..c44b0f202a1f 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -25,18 +25,6 @@ config CRYPTO_NHPOLY1305_NEON Architecture: arm64 using: - NEON (Advanced SIMD) extensions -config CRYPTO_POLY1305_NEON - tristate - depends on KERNEL_MODE_NEON - select CRYPTO_HASH - select CRYPTO_ARCH_HAVE_LIB_POLY1305 - default CRYPTO_LIB_POLY1305_INTERNAL - help - Poly1305 authenticator algorithm (RFC7539) - - Architecture: arm64 using: - - NEON (Advanced SIMD) extensions - config CRYPTO_SHA1_ARM64_CE tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)" depends on KERNEL_MODE_NEON @@ -48,25 +36,6 @@ config CRYPTO_SHA1_ARM64_CE Architecture: arm64 using: - ARMv8 Crypto Extensions -config CRYPTO_SHA256_ARM64 - tristate "Hash functions: SHA-224 and SHA-256" - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: arm64 - -config CRYPTO_SHA2_ARM64_CE - tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)" - depends on KERNEL_MODE_NEON - select CRYPTO_HASH - select CRYPTO_SHA256_ARM64 - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: arm64 using: - - ARMv8 Crypto Extensions - config CRYPTO_SHA512_ARM64 tristate "Hash functions: SHA-384 and SHA-512" select CRYPTO_HASH @@ -101,7 +70,7 @@ config CRYPTO_SM3_NEON tristate "Hash functions: SM3 (NEON)" depends on KERNEL_MODE_NEON select CRYPTO_HASH - select CRYPTO_SM3 + select CRYPTO_LIB_SM3 help SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012) @@ -112,7 +81,7 @@ config CRYPTO_SM3_ARM64_CE tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)" depends on KERNEL_MODE_NEON select CRYPTO_HASH - select CRYPTO_SM3 + select CRYPTO_LIB_SM3 help SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012) @@ -143,7 +112,7 @@ config CRYPTO_AES_ARM64 config CRYPTO_AES_ARM64_CE tristate "Ciphers: AES (ARMv8 Crypto Extensions)" - depends on ARM64 && KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON select CRYPTO_ALGAPI select CRYPTO_LIB_AES help @@ -186,20 +155,6 @@ config CRYPTO_AES_ARM64_NEON_BLK Architecture: arm64 using: - NEON (Advanced SIMD) extensions -config CRYPTO_CHACHA20_NEON - tristate - depends on KERNEL_MODE_NEON - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - default CRYPTO_LIB_CHACHA_INTERNAL - help - Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 - stream cipher algorithms - - Architecture: arm64 using: - - NEON (Advanced SIMD) extensions - config CRYPTO_AES_ARM64_BS tristate "Ciphers: AES, modes: ECB/CBC/CTR/XCTR/XTS modes (bit-sliced NEON)" depends on KERNEL_MODE_NEON @@ -267,7 +222,7 @@ config CRYPTO_SM4_ARM64_NEON_BLK config CRYPTO_AES_ARM64_CE_CCM tristate "AEAD cipher: AES in CCM mode (ARMv8 Crypto Extensions)" - depends on ARM64 && KERNEL_MODE_NEON + depends on KERNEL_MODE_NEON select CRYPTO_ALGAPI select CRYPTO_AES_ARM64_CE select CRYPTO_AES_ARM64_CE_BLK diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index e7139c4768ce..c231c980c514 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -8,9 +8,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o -obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o -sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o - obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o @@ -56,19 +53,9 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o aes-neon-blk-y := aes-glue-neon.o aes-neon.o -obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o -sha256-arm64-y := sha256-glue.o sha256-core.o - obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o sha512-arm64-y := sha512-glue.o sha512-core.o -obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o -chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o - -obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o -poly1305-neon-y := poly1305-core.o poly1305-glue.o -AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_init_arm64 - obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o @@ -81,10 +68,7 @@ aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $(<) void $(@) -$(obj)/%-core.S: $(src)/%-armv8.pl - $(call cmd,perlasm) - -$(obj)/sha256-core.S: $(src)/sha512-armv8.pl +$(obj)/sha512-core.S: $(src)/../lib/crypto/sha2-armv8.pl $(call cmd,perlasm) -clean-files += poly1305-core.S sha256-core.S sha512-core.S +clean-files += sha512-core.S diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index b0150999743f..81560f722b9d 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -5,19 +5,20 @@ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ -#include <asm/neon.h> #include <asm/hwcap.h> -#include <asm/simd.h> +#include <asm/neon.h> #include <crypto/aes.h> #include <crypto/ctr.h> -#include <crypto/sha2.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> -#include <linux/module.h> -#include <linux/cpufeature.h> +#include <crypto/sha2.h> +#include <crypto/utils.h> #include <crypto/xts.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> #include "aes-ce-setkey.h" @@ -130,7 +131,6 @@ struct mac_tfm_ctx { }; struct mac_desc_ctx { - unsigned int len; u8 dg[AES_BLOCK_SIZE]; }; @@ -869,109 +869,64 @@ static int mac_init(struct shash_desc *desc) struct mac_desc_ctx *ctx = shash_desc_ctx(desc); memset(ctx->dg, 0, AES_BLOCK_SIZE); - ctx->len = 0; - return 0; } static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, - u8 dg[], int enc_before, int enc_after) + u8 dg[], int enc_before) { int rounds = 6 + ctx->key_length / 4; + int rem; - if (crypto_simd_usable()) { - int rem; - - do { - kernel_neon_begin(); - rem = aes_mac_update(in, ctx->key_enc, rounds, blocks, - dg, enc_before, enc_after); - kernel_neon_end(); - in += (blocks - rem) * AES_BLOCK_SIZE; - blocks = rem; - enc_before = 0; - } while (blocks); - } else { - if (enc_before) - aes_encrypt(ctx, dg, dg); - - while (blocks--) { - crypto_xor(dg, in, AES_BLOCK_SIZE); - in += AES_BLOCK_SIZE; - - if (blocks || enc_after) - aes_encrypt(ctx, dg, dg); - } - } + do { + kernel_neon_begin(); + rem = aes_mac_update(in, ctx->key_enc, rounds, blocks, + dg, enc_before, !enc_before); + kernel_neon_end(); + in += (blocks - rem) * AES_BLOCK_SIZE; + blocks = rem; + } while (blocks); } static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); + int blocks = len / AES_BLOCK_SIZE; - while (len > 0) { - unsigned int l; - - if ((ctx->len % AES_BLOCK_SIZE) == 0 && - (ctx->len + len) > AES_BLOCK_SIZE) { - - int blocks = len / AES_BLOCK_SIZE; - - len %= AES_BLOCK_SIZE; - - mac_do_update(&tctx->key, p, blocks, ctx->dg, - (ctx->len != 0), (len != 0)); - - p += blocks * AES_BLOCK_SIZE; - - if (!len) { - ctx->len = AES_BLOCK_SIZE; - break; - } - ctx->len = 0; - } - - l = min(len, AES_BLOCK_SIZE - ctx->len); - - if (l <= AES_BLOCK_SIZE) { - crypto_xor(ctx->dg + ctx->len, p, l); - ctx->len += l; - len -= l; - p += l; - } - } - - return 0; + len %= AES_BLOCK_SIZE; + mac_do_update(&tctx->key, p, blocks, ctx->dg, 0); + return len; } -static int cbcmac_final(struct shash_desc *desc, u8 *out) +static int cbcmac_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); - mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0); - + if (len) { + crypto_xor(ctx->dg, src, len); + mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1); + } memcpy(out, ctx->dg, AES_BLOCK_SIZE); - return 0; } -static int cmac_final(struct shash_desc *desc, u8 *out) +static int cmac_finup(struct shash_desc *desc, const u8 *src, unsigned int len, + u8 *out) { struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct mac_desc_ctx *ctx = shash_desc_ctx(desc); u8 *consts = tctx->consts; - if (ctx->len != AES_BLOCK_SIZE) { - ctx->dg[ctx->len] ^= 0x80; + crypto_xor(ctx->dg, src, len); + if (len != AES_BLOCK_SIZE) { + ctx->dg[len] ^= 0x80; consts += AES_BLOCK_SIZE; } - - mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1); - + mac_do_update(&tctx->key, consts, 1, ctx->dg, 0); memcpy(out, ctx->dg, AES_BLOCK_SIZE); - return 0; } @@ -979,6 +934,8 @@ static struct shash_alg mac_algs[] = { { .base.cra_name = "cmac(aes)", .base.cra_driver_name = "cmac-aes-" MODE, .base.cra_priority = PRIO, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 2 * AES_BLOCK_SIZE, @@ -987,13 +944,15 @@ static struct shash_alg mac_algs[] = { { .digestsize = AES_BLOCK_SIZE, .init = mac_init, .update = mac_update, - .final = cmac_final, + .finup = cmac_finup, .setkey = cmac_setkey, .descsize = sizeof(struct mac_desc_ctx), }, { .base.cra_name = "xcbc(aes)", .base.cra_driver_name = "xcbc-aes-" MODE, .base.cra_priority = PRIO, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) + 2 * AES_BLOCK_SIZE, @@ -1002,21 +961,22 @@ static struct shash_alg mac_algs[] = { { .digestsize = AES_BLOCK_SIZE, .init = mac_init, .update = mac_update, - .final = cmac_final, + .finup = cmac_finup, .setkey = xcbc_setkey, .descsize = sizeof(struct mac_desc_ctx), }, { .base.cra_name = "cbcmac(aes)", .base.cra_driver_name = "cbcmac-aes-" MODE, .base.cra_priority = PRIO, - .base.cra_blocksize = 1, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, + .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct mac_tfm_ctx), .base.cra_module = THIS_MODULE, .digestsize = AES_BLOCK_SIZE, .init = mac_init, .update = mac_update, - .final = cbcmac_final, + .finup = cbcmac_finup, .setkey = cbcmac_setkey, .descsize = sizeof(struct mac_desc_ctx), } }; diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c deleted file mode 100644 index 229876acfc58..000000000000 --- a/arch/arm64/crypto/chacha-neon-glue.c +++ /dev/null @@ -1,237 +0,0 @@ -/* - * ARM NEON and scalar accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Based on: - * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code - * - * Copyright (C) 2015 Martin Willi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/simd.h> -#include <crypto/internal/skcipher.h> -#include <linux/jump_label.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> - -asmlinkage void chacha_block_xor_neon(u32 *state, u8 *dst, const u8 *src, - int nrounds); -asmlinkage void chacha_4block_xor_neon(u32 *state, u8 *dst, const u8 *src, - int nrounds, int bytes); -asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); - -static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, - int bytes, int nrounds) -{ - while (bytes > 0) { - int l = min(bytes, CHACHA_BLOCK_SIZE * 5); - - if (l <= CHACHA_BLOCK_SIZE) { - u8 buf[CHACHA_BLOCK_SIZE]; - - memcpy(buf, src, l); - chacha_block_xor_neon(state, buf, buf, nrounds); - memcpy(dst, buf, l); - state[12] += 1; - break; - } - chacha_4block_xor_neon(state, dst, src, nrounds, l); - bytes -= l; - src += l; - dst += l; - state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); - } -} - -void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -{ - if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) { - hchacha_block_generic(state, stream, nrounds); - } else { - kernel_neon_begin(); - hchacha_block_neon(state, stream, nrounds); - kernel_neon_end(); - } -} -EXPORT_SYMBOL(hchacha_block_arch); - -void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, - int nrounds) -{ - if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE || - !crypto_simd_usable()) - return chacha_crypt_generic(state, dst, src, bytes, nrounds); - - do { - unsigned int todo = min_t(unsigned int, bytes, SZ_4K); - - kernel_neon_begin(); - chacha_doneon(state, dst, src, todo, nrounds); - kernel_neon_end(); - - bytes -= todo; - src += todo; - dst += todo; - } while (bytes); -} -EXPORT_SYMBOL(chacha_crypt_arch); - -static int chacha_neon_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - chacha_init(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = rounddown(nbytes, walk.stride); - - if (!static_branch_likely(&have_neon) || - !crypto_simd_usable()) { - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); - } else { - kernel_neon_begin(); - chacha_doneon(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, ctx->nrounds); - kernel_neon_end(); - } - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static int chacha_neon(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_neon_stream_xor(req, ctx, req->iv); -} - -static int xchacha_neon(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - chacha_init(state, ctx->key, req->iv); - hchacha_block_arch(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); - return chacha_neon_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 5 * CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_neon, - .decrypt = chacha_neon, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 5 * CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_neon, - .decrypt = xchacha_neon, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-neon", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 5 * CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_neon, - .decrypt = xchacha_neon, - } -}; - -static int __init chacha_simd_mod_init(void) -{ - if (!cpu_have_named_feature(ASIMD)) - return 0; - - static_branch_enable(&have_neon); - - return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ? - crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; -} - -static void __exit chacha_simd_mod_fini(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && cpu_have_named_feature(ASIMD)) - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -module_init(chacha_simd_mod_init); -module_exit(chacha_simd_mod_fini); - -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-neon"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-neon"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-neon"); diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 071e122f9c37..4995b6e22335 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -6,30 +6,27 @@ */ #include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> #include <crypto/aes.h> -#include <crypto/gcm.h> -#include <crypto/algapi.h> #include <crypto/b128ops.h> +#include <crypto/gcm.h> +#include <crypto/ghash.h> #include <crypto/gf128mul.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("ghash"); -#define GHASH_BLOCK_SIZE 16 -#define GHASH_DIGEST_SIZE 16 - #define RFC4106_NONCE_SIZE 4 struct ghash_key { @@ -37,10 +34,8 @@ struct ghash_key { u64 h[][2]; }; -struct ghash_desc_ctx { +struct arm_ghash_desc_ctx { u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)]; - u8 buf[GHASH_BLOCK_SIZE]; - u32 count; }; struct gcm_aes_ctx { @@ -65,36 +60,12 @@ asmlinkage int pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[], static int ghash_init(struct shash_desc *desc) { - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); - *ctx = (struct ghash_desc_ctx){}; + *ctx = (struct arm_ghash_desc_ctx){}; return 0; } -static void ghash_do_update(int blocks, u64 dg[], const char *src, - struct ghash_key *key, const char *head) -{ - be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; - - do { - const u8 *in = src; - - if (head) { - in = head; - blocks++; - head = NULL; - } else { - src += GHASH_BLOCK_SIZE; - } - - crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); - gf128mul_lle(&dst, &key->k); - } while (--blocks); - - dg[0] = be64_to_cpu(dst.b); - dg[1] = be64_to_cpu(dst.a); -} - static __always_inline void ghash_do_simd_update(int blocks, u64 dg[], const char *src, struct ghash_key *key, const char *head, @@ -103,13 +74,9 @@ void ghash_do_simd_update(int blocks, u64 dg[], const char *src, u64 const h[][2], const char *head)) { - if (likely(crypto_simd_usable())) { - kernel_neon_begin(); - simd_update(blocks, dg, src, key->h, head); - kernel_neon_end(); - } else { - ghash_do_update(blocks, dg, src, key, head); - } + kernel_neon_begin(); + simd_update(blocks, dg, src, key->h, head); + kernel_neon_end(); } /* avoid hogging the CPU for too long */ @@ -118,61 +85,59 @@ void ghash_do_simd_update(int blocks, u64 dg[], const char *src, static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int len) { - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); - unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); + struct ghash_key *key = crypto_shash_ctx(desc->tfm); + int blocks; - ctx->count += len; + blocks = len / GHASH_BLOCK_SIZE; + len -= blocks * GHASH_BLOCK_SIZE; - if ((partial + len) >= GHASH_BLOCK_SIZE) { - struct ghash_key *key = crypto_shash_ctx(desc->tfm); - int blocks; - - if (partial) { - int p = GHASH_BLOCK_SIZE - partial; + do { + int chunk = min(blocks, MAX_BLOCKS); - memcpy(ctx->buf + partial, src, p); - src += p; - len -= p; - } + ghash_do_simd_update(chunk, ctx->digest, src, key, NULL, + pmull_ghash_update_p8); + blocks -= chunk; + src += chunk * GHASH_BLOCK_SIZE; + } while (unlikely(blocks > 0)); + return len; +} - blocks = len / GHASH_BLOCK_SIZE; - len %= GHASH_BLOCK_SIZE; +static int ghash_export(struct shash_desc *desc, void *out) +{ + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); + u8 *dst = out; - do { - int chunk = min(blocks, MAX_BLOCKS); + put_unaligned_be64(ctx->digest[1], dst); + put_unaligned_be64(ctx->digest[0], dst + 8); + return 0; +} - ghash_do_simd_update(chunk, ctx->digest, src, key, - partial ? ctx->buf : NULL, - pmull_ghash_update_p8); +static int ghash_import(struct shash_desc *desc, const void *in) +{ + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); + const u8 *src = in; - blocks -= chunk; - src += chunk * GHASH_BLOCK_SIZE; - partial = 0; - } while (unlikely(blocks > 0)); - } - if (len) - memcpy(ctx->buf + partial, src, len); + ctx->digest[1] = get_unaligned_be64(src); + ctx->digest[0] = get_unaligned_be64(src + 8); return 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { - struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); - unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; - - if (partial) { - struct ghash_key *key = crypto_shash_ctx(desc->tfm); + struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc); + struct ghash_key *key = crypto_shash_ctx(desc->tfm); - memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); + if (len) { + u8 buf[GHASH_BLOCK_SIZE] = {}; - ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL, + memcpy(buf, src, len); + ghash_do_simd_update(1, ctx->digest, src, key, NULL, pmull_ghash_update_p8); + memzero_explicit(buf, sizeof(buf)); } - put_unaligned_be64(ctx->digest[1], dst); - put_unaligned_be64(ctx->digest[0], dst + 8); - - memzero_explicit(ctx, sizeof(*ctx)); - return 0; + return ghash_export(desc, dst); } static void ghash_reflect(u64 h[], const be128 *k) @@ -205,6 +170,7 @@ static struct shash_alg ghash_alg = { .base.cra_name = "ghash", .base.cra_driver_name = "ghash-neon", .base.cra_priority = 150, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = GHASH_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, @@ -212,9 +178,12 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, - .final = ghash_final, + .finup = ghash_finup, .setkey = ghash_setkey, - .descsize = sizeof(struct ghash_desc_ctx), + .export = ghash_export, + .import = ghash_import, + .descsize = sizeof(struct arm_ghash_desc_ctx), + .statesize = sizeof(struct ghash_desc_ctx), }; static int num_rounds(struct crypto_aes_ctx *ctx) diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c deleted file mode 100644 index 18883ea438f3..000000000000 --- a/arch/arm64/crypto/poly1305-glue.c +++ /dev/null @@ -1,232 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * OpenSSL/Cryptogams accelerated Poly1305 transform for arm64 - * - * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> - */ - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <crypto/internal/simd.h> -#include <linux/cpufeature.h> -#include <linux/crypto.h> -#include <linux/jump_label.h> -#include <linux/module.h> - -asmlinkage void poly1305_init_arm64(void *state, const u8 *key); -asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit); -asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit); -asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce); - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); - -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) -{ - poly1305_init_arm64(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(key + 16); - dctx->s[1] = get_unaligned_le32(key + 20); - dctx->s[2] = get_unaligned_le32(key + 24); - dctx->s[3] = get_unaligned_le32(key + 28); - dctx->buflen = 0; -} -EXPORT_SYMBOL(poly1305_init_arch); - -static int neon_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - u32 len, u32 hibit, bool do_neon) -{ - if (unlikely(!dctx->sset)) { - if (!dctx->rset) { - poly1305_init_arm64(&dctx->h, src); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (len >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - if (len < POLY1305_BLOCK_SIZE) - return; - } - - len &= ~(POLY1305_BLOCK_SIZE - 1); - - if (static_branch_likely(&have_neon) && likely(do_neon)) - poly1305_blocks_neon(&dctx->h, src, len, hibit); - else - poly1305_blocks(&dctx->h, src, len, hibit); -} - -static void neon_poly1305_do_update(struct poly1305_desc_ctx *dctx, - const u8 *src, u32 len, bool do_neon) -{ - if (unlikely(dctx->buflen)) { - u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - len -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - neon_poly1305_blocks(dctx, dctx->buf, - POLY1305_BLOCK_SIZE, 1, false); - dctx->buflen = 0; - } - } - - if (likely(len >= POLY1305_BLOCK_SIZE)) { - neon_poly1305_blocks(dctx, src, len, 1, do_neon); - src += round_down(len, POLY1305_BLOCK_SIZE); - len %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(len)) { - dctx->buflen = len; - memcpy(dctx->buf, src, len); - } -} - -static int neon_poly1305_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - bool do_neon = crypto_simd_usable() && srclen > 128; - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (static_branch_likely(&have_neon) && do_neon) - kernel_neon_begin(); - neon_poly1305_do_update(dctx, src, srclen, do_neon); - if (static_branch_likely(&have_neon) && do_neon) - kernel_neon_end(); - return 0; -} - -void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int nbytes) -{ - if (unlikely(dctx->buflen)) { - u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - nbytes -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { - unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); - - if (static_branch_likely(&have_neon) && crypto_simd_usable()) { - do { - unsigned int todo = min_t(unsigned int, len, SZ_4K); - - kernel_neon_begin(); - poly1305_blocks_neon(&dctx->h, src, todo, 1); - kernel_neon_end(); - - len -= todo; - src += todo; - } while (len); - } else { - poly1305_blocks(&dctx->h, src, len, 1); - src += len; - } - nbytes %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(nbytes)) { - dctx->buflen = nbytes; - memcpy(dctx->buf, src, nbytes); - } -} -EXPORT_SYMBOL(poly1305_update_arch); - -void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -{ - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); - poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - - poly1305_emit(&dctx->h, dst, dctx->s); - memzero_explicit(dctx, sizeof(*dctx)); -} -EXPORT_SYMBOL(poly1305_final_arch); - -static int neon_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_arch(dctx, dst); - return 0; -} - -static struct shash_alg neon_poly1305_alg = { - .init = neon_poly1305_init, - .update = neon_poly1305_update, - .final = neon_poly1305_final, - .digestsize = POLY1305_DIGEST_SIZE, - .descsize = sizeof(struct poly1305_desc_ctx), - - .base.cra_name = "poly1305", - .base.cra_driver_name = "poly1305-neon", - .base.cra_priority = 200, - .base.cra_blocksize = POLY1305_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}; - -static int __init neon_poly1305_mod_init(void) -{ - if (!cpu_have_named_feature(ASIMD)) - return 0; - - static_branch_enable(&have_neon); - - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? - crypto_register_shash(&neon_poly1305_alg) : 0; -} - -static void __exit neon_poly1305_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && cpu_have_named_feature(ASIMD)) - crypto_unregister_shash(&neon_poly1305_alg); -} - -module_init(neon_poly1305_mod_init); -module_exit(neon_poly1305_mod_exit); - -MODULE_DESCRIPTION("Poly1305 transform using NEON instructions"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-neon"); diff --git a/arch/arm64/crypto/polyval-ce-glue.c b/arch/arm64/crypto/polyval-ce-glue.c index 0a3b5718df85..c4e653688ea0 100644 --- a/arch/arm64/crypto/polyval-ce-glue.c +++ b/arch/arm64/crypto/polyval-ce-glue.c @@ -15,17 +15,15 @@ * ARMv8 Crypto Extensions instructions to implement the finite field operations. */ -#include <crypto/algapi.h> +#include <asm/neon.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/polyval.h> -#include <linux/crypto.h> -#include <linux/init.h> +#include <crypto/utils.h> +#include <linux/cpufeature.h> +#include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/cpufeature.h> -#include <asm/neon.h> -#include <asm/simd.h> +#include <linux/string.h> #define NUM_KEY_POWERS 8 @@ -38,7 +36,6 @@ struct polyval_tfm_ctx { struct polyval_desc_ctx { u8 buffer[POLYVAL_BLOCK_SIZE]; - u32 bytes; }; asmlinkage void pmull_polyval_update(const struct polyval_tfm_ctx *keys, @@ -48,25 +45,16 @@ asmlinkage void pmull_polyval_mul(u8 *op1, const u8 *op2); static void internal_polyval_update(const struct polyval_tfm_ctx *keys, const u8 *in, size_t nblocks, u8 *accumulator) { - if (likely(crypto_simd_usable())) { - kernel_neon_begin(); - pmull_polyval_update(keys, in, nblocks, accumulator); - kernel_neon_end(); - } else { - polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in, - nblocks, accumulator); - } + kernel_neon_begin(); + pmull_polyval_update(keys, in, nblocks, accumulator); + kernel_neon_end(); } static void internal_polyval_mul(u8 *op1, const u8 *op2) { - if (likely(crypto_simd_usable())) { - kernel_neon_begin(); - pmull_polyval_mul(op1, op2); - kernel_neon_end(); - } else { - polyval_mul_non4k(op1, op2); - } + kernel_neon_begin(); + pmull_polyval_mul(op1, op2); + kernel_neon_end(); } static int polyval_arm64_setkey(struct crypto_shash *tfm, @@ -103,49 +91,27 @@ static int polyval_arm64_update(struct shash_desc *desc, { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - u8 *pos; unsigned int nblocks; - unsigned int n; - - if (dctx->bytes) { - n = min(srclen, dctx->bytes); - pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes; - - dctx->bytes -= n; - srclen -= n; - while (n--) - *pos++ ^= *src++; - - if (!dctx->bytes) - internal_polyval_mul(dctx->buffer, - tctx->key_powers[NUM_KEY_POWERS-1]); - } - - while (srclen >= POLYVAL_BLOCK_SIZE) { + do { /* allow rescheduling every 4K bytes */ nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; internal_polyval_update(tctx, src, nblocks, dctx->buffer); srclen -= nblocks * POLYVAL_BLOCK_SIZE; src += nblocks * POLYVAL_BLOCK_SIZE; - } + } while (srclen >= POLYVAL_BLOCK_SIZE); - if (srclen) { - dctx->bytes = POLYVAL_BLOCK_SIZE - srclen; - pos = dctx->buffer; - while (srclen--) - *pos++ ^= *src++; - } - - return 0; + return srclen; } -static int polyval_arm64_final(struct shash_desc *desc, u8 *dst) +static int polyval_arm64_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - if (dctx->bytes) { + if (len) { + crypto_xor(dctx->buffer, src, len); internal_polyval_mul(dctx->buffer, tctx->key_powers[NUM_KEY_POWERS-1]); } @@ -159,13 +125,14 @@ static struct shash_alg polyval_alg = { .digestsize = POLYVAL_DIGEST_SIZE, .init = polyval_arm64_init, .update = polyval_arm64_update, - .final = polyval_arm64_final, + .finup = polyval_arm64_finup, .setkey = polyval_arm64_setkey, .descsize = sizeof(struct polyval_desc_ctx), .base = { .cra_name = "polyval", .cra_driver_name = "polyval-ce", .cra_priority = 200, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = POLYVAL_BLOCK_SIZE, .cra_ctxsize = sizeof(struct polyval_tfm_ctx), .cra_module = THIS_MODULE, diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index cbd14f208f83..65b6980817e5 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -7,14 +7,14 @@ #include <asm/neon.h> #include <asm/simd.h> -#include <linux/unaligned.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); @@ -56,79 +56,49 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, { struct sha1_ce_state *sctx = shash_desc_ctx(desc); - if (!crypto_simd_usable()) - return crypto_sha1_update(desc, data, len); - sctx->finalize = 0; - sha1_base_do_update(desc, data, len, sha1_ce_transform); - - return 0; + return sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform); } static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct sha1_ce_state *sctx = shash_desc_ctx(desc); - bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len; - - if (!crypto_simd_usable()) - return crypto_sha1_finup(desc, data, len, out); + bool finalized = false; /* * Allow the asm code to perform the finalization if there is no * partial data and the input is a round multiple of the block size. */ - sctx->finalize = finalize; - - sha1_base_do_update(desc, data, len, sha1_ce_transform); - if (!finalize) - sha1_base_do_finalize(desc, sha1_ce_transform); - return sha1_base_finish(desc, out); -} - -static int sha1_ce_final(struct shash_desc *desc, u8 *out) -{ - struct sha1_ce_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable()) - return crypto_sha1_finup(desc, NULL, 0, out); - - sctx->finalize = 0; - sha1_base_do_finalize(desc, sha1_ce_transform); + if (len >= SHA1_BLOCK_SIZE) { + unsigned int remain = len - round_down(len, SHA1_BLOCK_SIZE); + + finalized = !remain; + sctx->finalize = finalized; + sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform); + data += len - remain; + len = remain; + } + if (!finalized) { + sctx->finalize = 0; + sha1_base_do_finup(desc, data, len, sha1_ce_transform); + } return sha1_base_finish(desc, out); } -static int sha1_ce_export(struct shash_desc *desc, void *out) -{ - struct sha1_ce_state *sctx = shash_desc_ctx(desc); - - memcpy(out, &sctx->sst, sizeof(struct sha1_state)); - return 0; -} - -static int sha1_ce_import(struct shash_desc *desc, const void *in) -{ - struct sha1_ce_state *sctx = shash_desc_ctx(desc); - - memcpy(&sctx->sst, in, sizeof(struct sha1_state)); - sctx->finalize = 0; - return 0; -} - static struct shash_alg alg = { .init = sha1_base_init, .update = sha1_ce_update, - .final = sha1_ce_final, .finup = sha1_ce_finup, - .import = sha1_ce_import, - .export = sha1_ce_export, .descsize = sizeof(struct sha1_ce_state), - .statesize = sizeof(struct sha1_state), + .statesize = SHA1_STATE_SIZE, .digestsize = SHA1_DIGEST_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-ce", .cra_priority = 200, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c deleted file mode 100644 index 6b4866a88ded..000000000000 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions - * - * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> - */ - -#include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <linux/cpufeature.h> -#include <linux/crypto.h> -#include <linux/module.h> - -MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha256"); - -struct sha256_ce_state { - struct sha256_state sst; - u32 finalize; -}; - -extern const u32 sha256_ce_offsetof_count; -extern const u32 sha256_ce_offsetof_finalize; - -asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src, - int blocks); - -static void sha256_ce_transform(struct sha256_state *sst, u8 const *src, - int blocks) -{ - while (blocks) { - int rem; - - kernel_neon_begin(); - rem = __sha256_ce_transform(container_of(sst, - struct sha256_ce_state, - sst), src, blocks); - kernel_neon_end(); - src += (blocks - rem) * SHA256_BLOCK_SIZE; - blocks = rem; - } -} - -const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state, - sst.count); -const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state, - finalize); - -asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks); - -static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src, - int blocks) -{ - sha256_block_data_order(sst->state, src, blocks); -} - -static int sha256_ce_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_ce_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable()) - return sha256_base_do_update(desc, data, len, - sha256_arm64_transform); - - sctx->finalize = 0; - sha256_base_do_update(desc, data, len, sha256_ce_transform); - - return 0; -} - -static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct sha256_ce_state *sctx = shash_desc_ctx(desc); - bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len; - - if (!crypto_simd_usable()) { - if (len) - sha256_base_do_update(desc, data, len, - sha256_arm64_transform); - sha256_base_do_finalize(desc, sha256_arm64_transform); - return sha256_base_finish(desc, out); - } - - /* - * Allow the asm code to perform the finalization if there is no - * partial data and the input is a round multiple of the block size. - */ - sctx->finalize = finalize; - - sha256_base_do_update(desc, data, len, sha256_ce_transform); - if (!finalize) - sha256_base_do_finalize(desc, sha256_ce_transform); - return sha256_base_finish(desc, out); -} - -static int sha256_ce_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_ce_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable()) { - sha256_base_do_finalize(desc, sha256_arm64_transform); - return sha256_base_finish(desc, out); - } - - sctx->finalize = 0; - sha256_base_do_finalize(desc, sha256_ce_transform); - return sha256_base_finish(desc, out); -} - -static int sha256_ce_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha256_base_init(desc); - return sha256_ce_finup(desc, data, len, out); -} - -static int sha256_ce_export(struct shash_desc *desc, void *out) -{ - struct sha256_ce_state *sctx = shash_desc_ctx(desc); - - memcpy(out, &sctx->sst, sizeof(struct sha256_state)); - return 0; -} - -static int sha256_ce_import(struct shash_desc *desc, const void *in) -{ - struct sha256_ce_state *sctx = shash_desc_ctx(desc); - - memcpy(&sctx->sst, in, sizeof(struct sha256_state)); - sctx->finalize = 0; - return 0; -} - -static struct shash_alg algs[] = { { - .init = sha224_base_init, - .update = sha256_ce_update, - .final = sha256_ce_final, - .finup = sha256_ce_finup, - .export = sha256_ce_export, - .import = sha256_ce_import, - .descsize = sizeof(struct sha256_ce_state), - .statesize = sizeof(struct sha256_state), - .digestsize = SHA224_DIGEST_SIZE, - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-ce", - .cra_priority = 200, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .init = sha256_base_init, - .update = sha256_ce_update, - .final = sha256_ce_final, - .finup = sha256_ce_finup, - .digest = sha256_ce_digest, - .export = sha256_ce_export, - .import = sha256_ce_import, - .descsize = sizeof(struct sha256_ce_state), - .statesize = sizeof(struct sha256_state), - .digestsize = SHA256_DIGEST_SIZE, - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-ce", - .cra_priority = 200, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha2_ce_mod_init(void) -{ - return crypto_register_shashes(algs, ARRAY_SIZE(algs)); -} - -static void __exit sha2_ce_mod_fini(void) -{ - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); -} - -module_cpu_feature_match(SHA2, sha2_ce_mod_init); -module_exit(sha2_ce_mod_fini); diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c deleted file mode 100644 index 35356987cc1e..000000000000 --- a/arch/arm64/crypto/sha256-glue.c +++ /dev/null @@ -1,194 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64 - * - * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org> - */ - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/types.h> - -MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64"); -MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha256"); - -asmlinkage void sha256_block_data_order(u32 *digest, const void *data, - unsigned int num_blks); -EXPORT_SYMBOL(sha256_block_data_order); - -static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src, - int blocks) -{ - sha256_block_data_order(sst->state, src, blocks); -} - -asmlinkage void sha256_block_neon(u32 *digest, const void *data, - unsigned int num_blks); - -static void sha256_neon_transform(struct sha256_state *sst, u8 const *src, - int blocks) -{ - sha256_block_neon(sst->state, src, blocks); -} - -static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return sha256_base_do_update(desc, data, len, sha256_arm64_transform); -} - -static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - if (len) - sha256_base_do_update(desc, data, len, sha256_arm64_transform); - sha256_base_do_finalize(desc, sha256_arm64_transform); - - return sha256_base_finish(desc, out); -} - -static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out) -{ - return crypto_sha256_arm64_finup(desc, NULL, 0, out); -} - -static struct shash_alg algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = crypto_sha256_arm64_update, - .final = crypto_sha256_arm64_final, - .finup = crypto_sha256_arm64_finup, - .descsize = sizeof(struct sha256_state), - .base.cra_name = "sha256", - .base.cra_driver_name = "sha256-arm64", - .base.cra_priority = 125, - .base.cra_blocksize = SHA256_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = crypto_sha256_arm64_update, - .final = crypto_sha256_arm64_final, - .finup = crypto_sha256_arm64_finup, - .descsize = sizeof(struct sha256_state), - .base.cra_name = "sha224", - .base.cra_driver_name = "sha224-arm64", - .base.cra_priority = 125, - .base.cra_blocksize = SHA224_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -} }; - -static int sha256_update_neon(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable()) - return sha256_base_do_update(desc, data, len, - sha256_arm64_transform); - - while (len > 0) { - unsigned int chunk = len; - - /* - * Don't hog the CPU for the entire time it takes to process all - * input when running on a preemptible kernel, but process the - * data block by block instead. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) && - chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE) - chunk = SHA256_BLOCK_SIZE - - sctx->count % SHA256_BLOCK_SIZE; - - kernel_neon_begin(); - sha256_base_do_update(desc, data, chunk, sha256_neon_transform); - kernel_neon_end(); - data += chunk; - len -= chunk; - } - return 0; -} - -static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - if (!crypto_simd_usable()) { - if (len) - sha256_base_do_update(desc, data, len, - sha256_arm64_transform); - sha256_base_do_finalize(desc, sha256_arm64_transform); - } else { - if (len) - sha256_update_neon(desc, data, len); - kernel_neon_begin(); - sha256_base_do_finalize(desc, sha256_neon_transform); - kernel_neon_end(); - } - return sha256_base_finish(desc, out); -} - -static int sha256_final_neon(struct shash_desc *desc, u8 *out) -{ - return sha256_finup_neon(desc, NULL, 0, out); -} - -static struct shash_alg neon_algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = sha256_update_neon, - .final = sha256_final_neon, - .finup = sha256_finup_neon, - .descsize = sizeof(struct sha256_state), - .base.cra_name = "sha256", - .base.cra_driver_name = "sha256-arm64-neon", - .base.cra_priority = 150, - .base.cra_blocksize = SHA256_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = sha256_update_neon, - .final = sha256_final_neon, - .finup = sha256_finup_neon, - .descsize = sizeof(struct sha256_state), - .base.cra_name = "sha224", - .base.cra_driver_name = "sha224-arm64-neon", - .base.cra_priority = 150, - .base.cra_blocksize = SHA224_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -} }; - -static int __init sha256_mod_init(void) -{ - int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); - if (ret) - return ret; - - if (cpu_have_named_feature(ASIMD)) { - ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs)); - if (ret) - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); - } - return ret; -} - -static void __exit sha256_mod_fini(void) -{ - if (cpu_have_named_feature(ASIMD)) - crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs)); - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); -} - -module_init(sha256_mod_init); -module_exit(sha256_mod_fini); diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index 5662c3ac49e9..b4f1001046c9 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -12,13 +12,13 @@ #include <asm/hwcap.h> #include <asm/neon.h> #include <asm/simd.h> -#include <linux/unaligned.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/sha3.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); @@ -35,74 +35,55 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - - if (!crypto_simd_usable()) - return crypto_sha3_update(desc, data, len); - - if ((sctx->partial + len) >= sctx->rsiz) { - int blocks; - - if (sctx->partial) { - int p = sctx->rsiz - sctx->partial; - - memcpy(sctx->buf + sctx->partial, data, p); - kernel_neon_begin(); - sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size); - kernel_neon_end(); - - data += p; - len -= p; - sctx->partial = 0; - } - - blocks = len / sctx->rsiz; - len %= sctx->rsiz; - - while (blocks) { - int rem; - - kernel_neon_begin(); - rem = sha3_ce_transform(sctx->st, data, blocks, - digest_size); - kernel_neon_end(); - data += (blocks - rem) * sctx->rsiz; - blocks = rem; - } - } - - if (len) { - memcpy(sctx->buf + sctx->partial, data, len); - sctx->partial += len; - } - return 0; + struct crypto_shash *tfm = desc->tfm; + unsigned int bs, ds; + int blocks; + + ds = crypto_shash_digestsize(tfm); + bs = crypto_shash_blocksize(tfm); + blocks = len / bs; + len -= blocks * bs; + do { + int rem; + + kernel_neon_begin(); + rem = sha3_ce_transform(sctx->st, data, blocks, ds); + kernel_neon_end(); + data += (blocks - rem) * bs; + blocks = rem; + } while (blocks); + return len; } -static int sha3_final(struct shash_desc *desc, u8 *out) +static int sha3_finup(struct shash_desc *desc, const u8 *src, unsigned int len, + u8 *out) { struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + struct crypto_shash *tfm = desc->tfm; __le64 *digest = (__le64 *)out; + u8 block[SHA3_224_BLOCK_SIZE]; + unsigned int bs, ds; int i; - if (!crypto_simd_usable()) - return crypto_sha3_final(desc, out); + ds = crypto_shash_digestsize(tfm); + bs = crypto_shash_blocksize(tfm); + memcpy(block, src, len); - sctx->buf[sctx->partial++] = 0x06; - memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial); - sctx->buf[sctx->rsiz - 1] |= 0x80; + block[len++] = 0x06; + memset(block + len, 0, bs - len); + block[bs - 1] |= 0x80; kernel_neon_begin(); - sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size); + sha3_ce_transform(sctx->st, block, 1, ds); kernel_neon_end(); + memzero_explicit(block , sizeof(block)); - for (i = 0; i < digest_size / 8; i++) + for (i = 0; i < ds / 8; i++) put_unaligned_le64(sctx->st[i], digest++); - if (digest_size & 4) + if (ds & 4) put_unaligned_le32(sctx->st[i], (__le32 *)digest); - memzero_explicit(sctx, sizeof(*sctx)); return 0; } @@ -110,10 +91,11 @@ static struct shash_alg algs[] = { { .digestsize = SHA3_224_DIGEST_SIZE, .init = crypto_sha3_init, .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-224", .base.cra_driver_name = "sha3-224-ce", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, @@ -121,10 +103,11 @@ static struct shash_alg algs[] = { { .digestsize = SHA3_256_DIGEST_SIZE, .init = crypto_sha3_init, .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-256", .base.cra_driver_name = "sha3-256-ce", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, @@ -132,10 +115,11 @@ static struct shash_alg algs[] = { { .digestsize = SHA3_384_DIGEST_SIZE, .init = crypto_sha3_init, .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-384", .base.cra_driver_name = "sha3-384-ce", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, @@ -143,10 +127,11 @@ static struct shash_alg algs[] = { { .digestsize = SHA3_512_DIGEST_SIZE, .init = crypto_sha3_init, .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-512", .base.cra_driver_name = "sha3-512-ce", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index 071f64293227..6fb3001fa2c9 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -10,14 +10,11 @@ */ #include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions"); @@ -29,12 +26,10 @@ MODULE_ALIAS_CRYPTO("sha512"); asmlinkage int __sha512_ce_transform(struct sha512_state *sst, u8 const *src, int blocks); -asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); - static void sha512_ce_transform(struct sha512_state *sst, u8 const *src, int blocks) { - while (blocks) { + do { int rem; kernel_neon_begin(); @@ -42,67 +37,47 @@ static void sha512_ce_transform(struct sha512_state *sst, u8 const *src, kernel_neon_end(); src += (blocks - rem) * SHA512_BLOCK_SIZE; blocks = rem; - } -} - -static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src, - int blocks) -{ - sha512_block_data_order(sst->state, src, blocks); + } while (blocks); } static int sha512_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform - : sha512_arm64_transform; - - sha512_base_do_update(desc, data, len, fn); - return 0; + return sha512_base_do_update_blocks(desc, data, len, + sha512_ce_transform); } static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform - : sha512_arm64_transform; - - sha512_base_do_update(desc, data, len, fn); - sha512_base_do_finalize(desc, fn); - return sha512_base_finish(desc, out); -} - -static int sha512_ce_final(struct shash_desc *desc, u8 *out) -{ - sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform - : sha512_arm64_transform; - - sha512_base_do_finalize(desc, fn); + sha512_base_do_finup(desc, data, len, sha512_ce_transform); return sha512_base_finish(desc, out); } static struct shash_alg algs[] = { { .init = sha384_base_init, .update = sha512_ce_update, - .final = sha512_ce_final, .finup = sha512_ce_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA384_DIGEST_SIZE, .base.cra_name = "sha384", .base.cra_driver_name = "sha384-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .base.cra_blocksize = SHA512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .init = sha512_base_init, .update = sha512_ce_update, - .final = sha512_ce_final, .finup = sha512_ce_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA512_DIGEST_SIZE, .base.cra_name = "sha512", .base.cra_driver_name = "sha512-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .base.cra_blocksize = SHA512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c index 62f129dea83d..15aa9d8b7b2c 100644 --- a/arch/arm64/crypto/sha512-glue.c +++ b/arch/arm64/crypto/sha512-glue.c @@ -6,11 +6,10 @@ */ #include <crypto/internal/hash.h> -#include <linux/types.h> -#include <linux/string.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <asm/neon.h> +#include <linux/kernel.h> +#include <linux/module.h> MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash for arm64"); MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>"); @@ -19,59 +18,53 @@ MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("sha384"); MODULE_ALIAS_CRYPTO("sha512"); -asmlinkage void sha512_block_data_order(u64 *digest, const void *data, - unsigned int num_blks); -EXPORT_SYMBOL(sha512_block_data_order); +asmlinkage void sha512_blocks_arch(u64 *digest, const void *data, + unsigned int num_blks); static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src, int blocks) { - sha512_block_data_order(sst->state, src, blocks); + sha512_blocks_arch(sst->state, src, blocks); } static int sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha512_base_do_update(desc, data, len, sha512_arm64_transform); + return sha512_base_do_update_blocks(desc, data, len, + sha512_arm64_transform); } static int sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (len) - sha512_base_do_update(desc, data, len, sha512_arm64_transform); - sha512_base_do_finalize(desc, sha512_arm64_transform); - + sha512_base_do_finup(desc, data, len, sha512_arm64_transform); return sha512_base_finish(desc, out); } -static int sha512_final(struct shash_desc *desc, u8 *out) -{ - return sha512_finup(desc, NULL, 0, out); -} - static struct shash_alg algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_update, - .final = sha512_final, .finup = sha512_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base.cra_name = "sha512", .base.cra_driver_name = "sha512-arm64", .base.cra_priority = 150, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .base.cra_blocksize = SHA512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_update, - .final = sha512_final, .finup = sha512_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base.cra_name = "sha384", .base.cra_driver_name = "sha384-arm64", .base.cra_priority = 150, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .base.cra_blocksize = SHA384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c index 1a71788c4cda..eac6f5fa0abe 100644 --- a/arch/arm64/crypto/sm3-ce-glue.c +++ b/arch/arm64/crypto/sm3-ce-glue.c @@ -6,14 +6,11 @@ */ #include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/sm3.h> #include <crypto/sm3_base.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> MODULE_DESCRIPTION("SM3 secure hash using ARMv8 Crypto Extensions"); @@ -26,50 +23,20 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src, static int sm3_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - if (!crypto_simd_usable()) { - sm3_update(shash_desc_ctx(desc), data, len); - return 0; - } + int remain; kernel_neon_begin(); - sm3_base_do_update(desc, data, len, sm3_ce_transform); + remain = sm3_base_do_update_blocks(desc, data, len, sm3_ce_transform); kernel_neon_end(); - - return 0; -} - -static int sm3_ce_final(struct shash_desc *desc, u8 *out) -{ - if (!crypto_simd_usable()) { - sm3_final(shash_desc_ctx(desc), out); - return 0; - } - - kernel_neon_begin(); - sm3_base_do_finalize(desc, sm3_ce_transform); - kernel_neon_end(); - - return sm3_base_finish(desc, out); + return remain; } static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) { - struct sm3_state *sctx = shash_desc_ctx(desc); - - if (len) - sm3_update(sctx, data, len); - sm3_final(sctx, out); - return 0; - } - kernel_neon_begin(); - if (len) - sm3_base_do_update(desc, data, len, sm3_ce_transform); - sm3_base_do_finalize(desc, sm3_ce_transform); + sm3_base_do_finup(desc, data, len, sm3_ce_transform); kernel_neon_end(); - return sm3_base_finish(desc, out); } @@ -77,11 +44,12 @@ static struct shash_alg sm3_alg = { .digestsize = SM3_DIGEST_SIZE, .init = sm3_base_init, .update = sm3_ce_update, - .final = sm3_ce_final, .finup = sm3_ce_finup, - .descsize = sizeof(struct sm3_state), + .descsize = SM3_STATE_SIZE, .base.cra_name = "sm3", .base.cra_driver_name = "sm3-ce", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .base.cra_blocksize = SM3_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 400, diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c index 8dd71ce79b69..6c4611a503a3 100644 --- a/arch/arm64/crypto/sm3-neon-glue.c +++ b/arch/arm64/crypto/sm3-neon-glue.c @@ -6,14 +6,11 @@ */ #include <asm/neon.h> -#include <asm/simd.h> -#include <linux/unaligned.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/sm3.h> #include <crypto/sm3_base.h> #include <linux/cpufeature.h> -#include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/module.h> @@ -23,50 +20,20 @@ asmlinkage void sm3_neon_transform(struct sm3_state *sst, u8 const *src, static int sm3_neon_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - if (!crypto_simd_usable()) { - sm3_update(shash_desc_ctx(desc), data, len); - return 0; - } + int remain; kernel_neon_begin(); - sm3_base_do_update(desc, data, len, sm3_neon_transform); + remain = sm3_base_do_update_blocks(desc, data, len, sm3_neon_transform); kernel_neon_end(); - - return 0; -} - -static int sm3_neon_final(struct shash_desc *desc, u8 *out) -{ - if (!crypto_simd_usable()) { - sm3_final(shash_desc_ctx(desc), out); - return 0; - } - - kernel_neon_begin(); - sm3_base_do_finalize(desc, sm3_neon_transform); - kernel_neon_end(); - - return sm3_base_finish(desc, out); + return remain; } static int sm3_neon_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) { - struct sm3_state *sctx = shash_desc_ctx(desc); - - if (len) - sm3_update(sctx, data, len); - sm3_final(sctx, out); - return 0; - } - kernel_neon_begin(); - if (len) - sm3_base_do_update(desc, data, len, sm3_neon_transform); - sm3_base_do_finalize(desc, sm3_neon_transform); + sm3_base_do_finup(desc, data, len, sm3_neon_transform); kernel_neon_end(); - return sm3_base_finish(desc, out); } @@ -74,11 +41,12 @@ static struct shash_alg sm3_alg = { .digestsize = SM3_DIGEST_SIZE, .init = sm3_base_init, .update = sm3_neon_update, - .final = sm3_neon_final, .finup = sm3_neon_finup, - .descsize = sizeof(struct sm3_state), + .descsize = SM3_STATE_SIZE, .base.cra_name = "sm3", .base.cra_driver_name = "sm3-neon", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .base.cra_blocksize = SM3_BLOCK_SIZE, .base.cra_module = THIS_MODULE, .base.cra_priority = 200, diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 43741bed874e..7a60e7b559dc 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -8,19 +8,18 @@ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/cpufeature.h> #include <asm/neon.h> -#include <asm/simd.h> #include <crypto/b128ops.h> -#include <crypto/internal/simd.h> -#include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> -#include <crypto/xts.h> #include <crypto/sm4.h> +#include <crypto/utils.h> +#include <crypto/xts.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> #define BYTES2BLKS(nbytes) ((nbytes) >> 4) @@ -64,7 +63,6 @@ struct sm4_mac_tfm_ctx { }; struct sm4_mac_desc_ctx { - unsigned int len; u8 digest[SM4_BLOCK_SIZE]; }; @@ -591,8 +589,6 @@ static int sm4_mac_init(struct shash_desc *desc) struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); memset(ctx->digest, 0, SM4_BLOCK_SIZE); - ctx->len = 0; - return 0; } @@ -601,87 +597,50 @@ static int sm4_mac_update(struct shash_desc *desc, const u8 *p, { struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); - unsigned int l, nblocks; - - if (len == 0) - return 0; - - if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) { - l = min(len, SM4_BLOCK_SIZE - ctx->len); - - crypto_xor(ctx->digest + ctx->len, p, l); - ctx->len += l; - len -= l; - p += l; - } - - if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) { - kernel_neon_begin(); - - if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) { - sm4_ce_crypt_block(tctx->key.rkey_enc, - ctx->digest, ctx->digest); - ctx->len = 0; - } else { - nblocks = len / SM4_BLOCK_SIZE; - len %= SM4_BLOCK_SIZE; + unsigned int nblocks = len / SM4_BLOCK_SIZE; - sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p, - nblocks, (ctx->len == SM4_BLOCK_SIZE), - (len != 0)); - - p += nblocks * SM4_BLOCK_SIZE; - - if (len == 0) - ctx->len = SM4_BLOCK_SIZE; - } - - kernel_neon_end(); - - if (len) { - crypto_xor(ctx->digest, p, len); - ctx->len = len; - } - } - - return 0; + len %= SM4_BLOCK_SIZE; + kernel_neon_begin(); + sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p, + nblocks, false, true); + kernel_neon_end(); + return len; } -static int sm4_cmac_final(struct shash_desc *desc, u8 *out) +static int sm4_cmac_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); const u8 *consts = tctx->consts; - if (ctx->len != SM4_BLOCK_SIZE) { - ctx->digest[ctx->len] ^= 0x80; + crypto_xor(ctx->digest, src, len); + if (len != SM4_BLOCK_SIZE) { + ctx->digest[len] ^= 0x80; consts += SM4_BLOCK_SIZE; } - kernel_neon_begin(); sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1, false, true); kernel_neon_end(); - memcpy(out, ctx->digest, SM4_BLOCK_SIZE); - return 0; } -static int sm4_cbcmac_final(struct shash_desc *desc, u8 *out) +static int sm4_cbcmac_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); - if (ctx->len) { + if (len) { + crypto_xor(ctx->digest, src, len); kernel_neon_begin(); sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest, ctx->digest); kernel_neon_end(); } - memcpy(out, ctx->digest, SM4_BLOCK_SIZE); - return 0; } @@ -691,6 +650,8 @@ static struct shash_alg sm4_mac_algs[] = { .cra_name = "cmac(sm4)", .cra_driver_name = "cmac-sm4-ce", .cra_priority = 400, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx) + SM4_BLOCK_SIZE * 2, @@ -699,7 +660,7 @@ static struct shash_alg sm4_mac_algs[] = { .digestsize = SM4_BLOCK_SIZE, .init = sm4_mac_init, .update = sm4_mac_update, - .final = sm4_cmac_final, + .finup = sm4_cmac_finup, .setkey = sm4_cmac_setkey, .descsize = sizeof(struct sm4_mac_desc_ctx), }, { @@ -707,6 +668,8 @@ static struct shash_alg sm4_mac_algs[] = { .cra_name = "xcbc(sm4)", .cra_driver_name = "xcbc-sm4-ce", .cra_priority = 400, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx) + SM4_BLOCK_SIZE * 2, @@ -715,7 +678,7 @@ static struct shash_alg sm4_mac_algs[] = { .digestsize = SM4_BLOCK_SIZE, .init = sm4_mac_init, .update = sm4_mac_update, - .final = sm4_cmac_final, + .finup = sm4_cmac_finup, .setkey = sm4_xcbc_setkey, .descsize = sizeof(struct sm4_mac_desc_ctx), }, { @@ -723,14 +686,15 @@ static struct shash_alg sm4_mac_algs[] = { .cra_name = "cbcmac(sm4)", .cra_driver_name = "cbcmac-sm4-ce", .cra_priority = 400, - .cra_blocksize = 1, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, + .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx), .cra_module = THIS_MODULE, }, .digestsize = SM4_BLOCK_SIZE, .init = sm4_mac_init, .update = sm4_mac_update, - .final = sm4_cbcmac_final, + .finup = sm4_cbcmac_finup, .setkey = sm4_cbcmac_setkey, .descsize = sizeof(struct sm4_mac_desc_ctx), } diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 4d49dff721a8..027bfa9689c6 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,4 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 + +obj-y += crypto/ + lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_page.o \ clear_page.o csum.o insn.o memchr.o memcpy.o \ @@ -14,10 +17,10 @@ endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o obj-$(CONFIG_CRC32_ARCH) += crc32-arm64.o -crc32-arm64-y := crc32.o crc32-glue.o +crc32-arm64-y := crc32.o crc32-core.o obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm64.o -crc-t10dif-arm64-y := crc-t10dif-glue.o crc-t10dif-core.o +crc-t10dif-arm64-y := crc-t10dif.o crc-t10dif-core.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/crc-t10dif-glue.c b/arch/arm64/lib/crc-t10dif.c index bacd18f23168..c2ffe4fdb59d 100644 --- a/arch/arm64/lib/crc-t10dif-glue.c +++ b/arch/arm64/lib/crc-t10dif.c @@ -17,8 +17,8 @@ #include <asm/neon.h> #include <asm/simd.h> -static DEFINE_STATIC_KEY_FALSE(have_asimd); -static DEFINE_STATIC_KEY_FALSE(have_pmull); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_asimd); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull); #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U @@ -61,7 +61,7 @@ static int __init crc_t10dif_arm64_init(void) } return 0; } -arch_initcall(crc_t10dif_arm64_init); +subsys_initcall(crc_t10dif_arm64_init); static void __exit crc_t10dif_arm64_exit(void) { diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32-core.S index 68825317460f..68825317460f 100644 --- a/arch/arm64/lib/crc32.S +++ b/arch/arm64/lib/crc32-core.S diff --git a/arch/arm64/lib/crc32-glue.c b/arch/arm64/lib/crc32.c index ed3acd71178f..ed3acd71178f 100644 --- a/arch/arm64/lib/crc32-glue.c +++ b/arch/arm64/lib/crc32.c diff --git a/arch/arm64/lib/crypto/.gitignore b/arch/arm64/lib/crypto/.gitignore new file mode 100644 index 000000000000..12d74d8b03d0 --- /dev/null +++ b/arch/arm64/lib/crypto/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +poly1305-core.S +sha256-core.S diff --git a/arch/arm64/lib/crypto/Kconfig b/arch/arm64/lib/crypto/Kconfig new file mode 100644 index 000000000000..129a7685cb4c --- /dev/null +++ b/arch/arm64/lib/crypto/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_CHACHA20_NEON + tristate + depends on KERNEL_MODE_NEON + default CRYPTO_LIB_CHACHA + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_POLY1305_NEON + tristate + depends on KERNEL_MODE_NEON + default CRYPTO_LIB_POLY1305 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 + +config CRYPTO_SHA256_ARM64 + tristate + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD diff --git a/arch/arm64/lib/crypto/Makefile b/arch/arm64/lib/crypto/Makefile new file mode 100644 index 000000000000..946c09903711 --- /dev/null +++ b/arch/arm64/lib/crypto/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o +chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o + +obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o +poly1305-neon-y := poly1305-core.o poly1305-glue.o +AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_block_init_arch +AFLAGS_poly1305-core.o += -Dpoly1305_emit=poly1305_emit_arch + +obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o +sha256-arm64-y := sha256.o sha256-core.o +sha256-arm64-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o + +quiet_cmd_perlasm = PERLASM $@ + cmd_perlasm = $(PERL) $(<) void $(@) + +$(obj)/%-core.S: $(src)/%-armv8.pl + $(call cmd,perlasm) + +$(obj)/sha256-core.S: $(src)/sha2-armv8.pl + $(call cmd,perlasm) + +clean-files += poly1305-core.S sha256-core.S diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/lib/crypto/chacha-neon-core.S index b70ac76f2610..80079586ecc7 100644 --- a/arch/arm64/crypto/chacha-neon-core.S +++ b/arch/arm64/lib/crypto/chacha-neon-core.S @@ -1,5 +1,5 @@ /* - * ChaCha/XChaCha NEON helper functions + * ChaCha/HChaCha NEON helper functions * * Copyright (C) 2016-2018 Linaro, Ltd. <ard.biesheuvel@linaro.org> * diff --git a/arch/arm64/lib/crypto/chacha-neon-glue.c b/arch/arm64/lib/crypto/chacha-neon-glue.c new file mode 100644 index 000000000000..d0188f974ca5 --- /dev/null +++ b/arch/arm64/lib/crypto/chacha-neon-glue.c @@ -0,0 +1,119 @@ +/* + * ChaCha and HChaCha functions (ARM64 optimized) + * + * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on: + * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <crypto/chacha.h> +#include <crypto/internal/simd.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <asm/simd.h> + +asmlinkage void chacha_block_xor_neon(const struct chacha_state *state, + u8 *dst, const u8 *src, int nrounds); +asmlinkage void chacha_4block_xor_neon(const struct chacha_state *state, + u8 *dst, const u8 *src, + int nrounds, int bytes); +asmlinkage void hchacha_block_neon(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); + +static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src, + int bytes, int nrounds) +{ + while (bytes > 0) { + int l = min(bytes, CHACHA_BLOCK_SIZE * 5); + + if (l <= CHACHA_BLOCK_SIZE) { + u8 buf[CHACHA_BLOCK_SIZE]; + + memcpy(buf, src, l); + chacha_block_xor_neon(state, buf, buf, nrounds); + memcpy(dst, buf, l); + state->x[12] += 1; + break; + } + chacha_4block_xor_neon(state, dst, src, nrounds, l); + bytes -= l; + src += l; + dst += l; + state->x[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); + } +} + +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) +{ + if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) { + hchacha_block_generic(state, out, nrounds); + } else { + kernel_neon_begin(); + hchacha_block_neon(state, out, nrounds); + kernel_neon_end(); + } +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE || + !crypto_simd_usable()) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + + do { + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); + + kernel_neon_begin(); + chacha_doneon(state, dst, src, todo, nrounds); + kernel_neon_end(); + + bytes -= todo; + src += todo; + dst += todo; + } while (bytes); +} +EXPORT_SYMBOL(chacha_crypt_arch); + +bool chacha_is_arch_optimized(void) +{ + return static_key_enabled(&have_neon); +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +static int __init chacha_simd_mod_init(void) +{ + if (cpu_have_named_feature(ASIMD)) + static_branch_enable(&have_neon); + return 0; +} +subsys_initcall(chacha_simd_mod_init); + +static void __exit chacha_simd_mod_exit(void) +{ +} +module_exit(chacha_simd_mod_exit); + +MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM64 optimized)"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/lib/crypto/poly1305-armv8.pl index 22c9069c0650..22c9069c0650 100644 --- a/arch/arm64/crypto/poly1305-armv8.pl +++ b/arch/arm64/lib/crypto/poly1305-armv8.pl diff --git a/arch/arm64/lib/crypto/poly1305-glue.c b/arch/arm64/lib/crypto/poly1305-glue.c new file mode 100644 index 000000000000..6a661cf04821 --- /dev/null +++ b/arch/arm64/lib/crypto/poly1305-glue.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OpenSSL/Cryptogams accelerated Poly1305 transform for arm64 + * + * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> + */ + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <crypto/internal/poly1305.h> +#include <linux/cpufeature.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/unaligned.h> + +asmlinkage void poly1305_block_init_arch( + struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +EXPORT_SYMBOL_GPL(poly1305_block_init_arch); +asmlinkage void poly1305_blocks(struct poly1305_block_state *state, + const u8 *src, u32 len, u32 hibit); +asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state, + const u8 *src, u32 len, u32 hibit); +asmlinkage void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]); +EXPORT_SYMBOL_GPL(poly1305_emit_arch); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); + +void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src, + unsigned int len, u32 padbit) +{ + len = round_down(len, POLY1305_BLOCK_SIZE); + if (static_branch_likely(&have_neon)) { + do { + unsigned int todo = min_t(unsigned int, len, SZ_4K); + + kernel_neon_begin(); + poly1305_blocks_neon(state, src, todo, 1); + kernel_neon_end(); + + len -= todo; + src += todo; + } while (len); + } else + poly1305_blocks(state, src, len, 1); +} +EXPORT_SYMBOL_GPL(poly1305_blocks_arch); + +bool poly1305_is_arch_optimized(void) +{ + /* We always can use at least the ARM64 scalar implementation. */ + return true; +} +EXPORT_SYMBOL(poly1305_is_arch_optimized); + +static int __init neon_poly1305_mod_init(void) +{ + if (cpu_have_named_feature(ASIMD)) + static_branch_enable(&have_neon); + return 0; +} +subsys_initcall(neon_poly1305_mod_init); + +static void __exit neon_poly1305_mod_exit(void) +{ +} +module_exit(neon_poly1305_mod_exit); + +MODULE_DESCRIPTION("Poly1305 authenticator (ARM64 optimized)"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/lib/crypto/sha2-armv8.pl index 35ec9ae99fe1..4aebd20c498b 100644 --- a/arch/arm64/crypto/sha512-armv8.pl +++ b/arch/arm64/lib/crypto/sha2-armv8.pl @@ -95,7 +95,7 @@ if ($output =~ /512/) { $reg_t="w"; } -$func="sha${BITS}_block_data_order"; +$func="sha${BITS}_blocks_arch"; ($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30)); diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/lib/crypto/sha256-ce.S index fce84d88ddb2..f3e21c6d87d2 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/lib/crypto/sha256-ce.S @@ -71,8 +71,8 @@ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* - * int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src, - * int blocks) + * size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS], + * const u8 *data, size_t nblocks); */ .text SYM_FUNC_START(__sha256_ce_transform) @@ -86,20 +86,16 @@ SYM_FUNC_START(__sha256_ce_transform) /* load state */ ld1 {dgav.4s, dgbv.4s}, [x0] - /* load sha256_ce_state::finalize */ - ldr_l w4, sha256_ce_offsetof_finalize, x4 - ldr w4, [x0, x4] - /* load input */ 0: ld1 {v16.4s-v19.4s}, [x1], #64 - sub w2, w2, #1 + sub x2, x2, #1 CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v19.16b, v19.16b ) -1: add t0.4s, v16.4s, v0.4s + add t0.4s, v16.4s, v0.4s mov dg0v.16b, dgav.16b mov dg1v.16b, dgbv.16b @@ -127,31 +123,14 @@ CPU_LE( rev32 v19.16b, v19.16b ) add dgav.4s, dgav.4s, dg0v.4s add dgbv.4s, dgbv.4s, dg1v.4s - /* handled all input blocks? */ - cbz w2, 2f - cond_yield 3f, x5, x6 - b 0b + /* return early if voluntary preemption is needed */ + cond_yield 1f, x5, x6 - /* - * Final block: add padding and total bit count. - * Skip if the input size was not a round multiple of the block size, - * the padding is handled by the C code in that case. - */ -2: cbz x4, 3f - ldr_l w4, sha256_ce_offsetof_count, x4 - ldr x4, [x0, x4] - movi v17.2d, #0 - mov x8, #0x80000000 - movi v18.2d, #0 - ror x7, x4, #29 // ror(lsl(x4, 3), 32) - fmov d16, x8 - mov x4, #0 - mov v19.d[0], xzr - mov v19.d[1], x7 - b 1b + /* handled all input blocks? */ + cbnz x2, 0b /* store new state */ -3: st1 {dgav.4s, dgbv.4s}, [x0] - mov w0, w2 +1: st1 {dgav.4s, dgbv.4s}, [x0] + mov x0, x2 ret SYM_FUNC_END(__sha256_ce_transform) diff --git a/arch/arm64/lib/crypto/sha256.c b/arch/arm64/lib/crypto/sha256.c new file mode 100644 index 000000000000..bcf7a3adc0c4 --- /dev/null +++ b/arch/arm64/lib/crypto/sha256.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256 optimized for ARM64 + * + * Copyright 2025 Google LLC + */ +#include <asm/neon.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +EXPORT_SYMBOL_GPL(sha256_blocks_arch); +asmlinkage void sha256_block_neon(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce); + +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + static_branch_likely(&have_neon)) { + if (static_branch_likely(&have_ce)) { + do { + size_t rem; + + kernel_neon_begin(); + rem = __sha256_ce_transform(state, + data, nblocks); + kernel_neon_end(); + data += (nblocks - rem) * SHA256_BLOCK_SIZE; + nblocks = rem; + } while (nblocks); + } else { + kernel_neon_begin(); + sha256_block_neon(state, data, nblocks); + kernel_neon_end(); + } + } else { + sha256_blocks_arch(state, data, nblocks); + } +} +EXPORT_SYMBOL_GPL(sha256_blocks_simd); + +bool sha256_is_arch_optimized(void) +{ + /* We always can use at least the ARM64 scalar implementation. */ + return true; +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +static int __init sha256_arm64_mod_init(void) +{ + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && + cpu_have_named_feature(ASIMD)) { + static_branch_enable(&have_neon); + if (cpu_have_named_feature(SHA2)) + static_branch_enable(&have_ce); + } + return 0; +} +subsys_initcall(sha256_arm64_mod_init); + +static void __exit sha256_arm64_mod_exit(void) +{ +} +module_exit(sha256_arm64_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 optimized for ARM64"); diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c index e5f18420ce64..e0a36acd265b 100644 --- a/arch/csky/kernel/perf_event.c +++ b/arch/csky/kernel/perf_event.c @@ -1139,8 +1139,7 @@ static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev) perf_sample_data_init(&data, 0, hwc->last_period); csky_pmu_event_set_period(event); - if (perf_event_overflow(event, &data, regs)) - csky_pmu_stop_event(event); + perf_event_overflow(event, &data, regs); } csky_pmu_enable(&csky_pmu.pmu); diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 90f21dfe22b1..0d59af6007b7 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -1026,7 +1026,7 @@ CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_YAMA=y CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SELFTESTS=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_ANUBIS=m diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h index 51f224bcfc65..704066b4f736 100644 --- a/arch/loongarch/include/asm/asm-prototypes.h +++ b/arch/loongarch/include/asm/asm-prototypes.h @@ -12,3 +12,11 @@ __int128_t __ashlti3(__int128_t a, int b); __int128_t __ashrti3(__int128_t a, int b); __int128_t __lshrti3(__int128_t a, int b); #endif + +asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev, + struct pt_regs *regs); + +asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev, + struct pt_regs *regs, + int (*fn)(void *), + void *fn_arg); diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 48e7e34e355e..2abc29e57381 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -77,24 +77,22 @@ SYM_CODE_START(handle_syscall) SYM_CODE_END(handle_syscall) _ASM_NOKPROBE(handle_syscall) -SYM_CODE_START(ret_from_fork) +SYM_CODE_START(ret_from_fork_asm) UNWIND_HINT_REGS - bl schedule_tail # a0 = struct task_struct *prev - move a0, sp - bl syscall_exit_to_user_mode + move a1, sp + bl ret_from_fork RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET -SYM_CODE_END(ret_from_fork) +SYM_CODE_END(ret_from_fork_asm) -SYM_CODE_START(ret_from_kernel_thread) +SYM_CODE_START(ret_from_kernel_thread_asm) UNWIND_HINT_REGS - bl schedule_tail # a0 = struct task_struct *prev - move a0, s1 - jirl ra, s0, 0 - move a0, sp - bl syscall_exit_to_user_mode + move a1, sp + move a2, s0 + move a3, s1 + bl ret_from_kernel_thread RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET -SYM_CODE_END(ret_from_kernel_thread) +SYM_CODE_END(ret_from_kernel_thread_asm) diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index f86a4b838dd7..8ad098703488 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -479,8 +479,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, int idx, if (!loongarch_pmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - loongarch_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } static irqreturn_t pmu_handle_irq(int irq, void *dev) diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 6e58f65455c7..3582f591bab2 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -13,6 +13,7 @@ #include <linux/cpu.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/entry-common.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -34,6 +35,7 @@ #include <linux/nmi.h> #include <asm/asm.h> +#include <asm/asm-prototypes.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/elf.h> @@ -47,6 +49,7 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/reg.h> +#include <asm/switch_to.h> #include <asm/unwind.h> #include <asm/vdso.h> @@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard); unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); -asmlinkage void ret_from_fork(void); -asmlinkage void ret_from_kernel_thread(void); +asmlinkage void restore_and_ret(void); +asmlinkage void ret_from_fork_asm(void); +asmlinkage void ret_from_kernel_thread_asm(void); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } +asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev, + struct pt_regs *regs) +{ + schedule_tail(prev); + syscall_exit_to_user_mode(regs); +} + +asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev, + struct pt_regs *regs, + int (*fn)(void *), + void *fn_arg) +{ + schedule_tail(prev); + fn(fn_arg); + syscall_exit_to_user_mode(regs); +} + /* * Copy architecture-specific thread state */ @@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.reg03 = childksp; p->thread.reg23 = (unsigned long)args->fn; p->thread.reg24 = (unsigned long)args->fn_arg; - p->thread.reg01 = (unsigned long)ret_from_kernel_thread; - p->thread.sched_ra = (unsigned long)ret_from_kernel_thread; + p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm; + p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm; memset(childregs, 0, sizeof(struct pt_regs)); childregs->csr_euen = p->thread.csr_euen; childregs->csr_crmd = p->thread.csr_crmd; @@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->regs[3] = usp; p->thread.reg03 = (unsigned long) childregs; - p->thread.reg01 = (unsigned long) ret_from_fork; - p->thread.sched_ra = (unsigned long) ret_from_fork; + p->thread.reg01 = (unsigned long) ret_from_fork_asm; + p->thread.sched_ra = (unsigned long) ret_from_fork_asm; /* * New tasks lose permission to use the fpu. This accelerates context diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c index c44ee4f32557..b37cd8537b45 100644 --- a/arch/loongarch/lib/crc32-loongarch.c +++ b/arch/loongarch/lib/crc32-loongarch.c @@ -26,7 +26,7 @@ do { \ #define CRC32(crc, value, size) _CRC32(crc, value, size, crc) #define CRC32C(crc, value, size) _CRC32(crc, value, size, crcc) -static DEFINE_STATIC_KEY_FALSE(have_crc32); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { @@ -114,7 +114,7 @@ static int __init crc32_loongarch_init(void) static_branch_enable(&have_crc32); return 0; } -arch_initcall(crc32_loongarch_init); +subsys_initcall(crc32_loongarch_init); static void __exit crc32_loongarch_exit(void) { diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index 110279a64aa4..60767811e34a 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -2,7 +2,6 @@ CONFIG_LOCALVERSION="amcore-002" CONFIG_DEFAULT_HOSTNAME="amcore" CONFIG_SYSVIPC=y # CONFIG_FHANDLE is not set -# CONFIG_USELIB is not set CONFIG_LOG_BUF_SHIFT=14 CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_AIO is not set diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 31ecb8b7b9f1..77f78d326a32 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -551,7 +551,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 1f57514624d5..f4031aa5d37f 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -508,7 +508,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 02db7a48e57e..fa92131cf4b3 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -528,7 +528,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index f0e673cb17eb..9c2afc477061 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -500,7 +500,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index e8ca5a50b86d..e7cdab059d96 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -510,7 +510,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index b3a270441bb1..0a79751c20a5 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -527,7 +527,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index d215dba006ce..f8ca490ee65a 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -614,7 +614,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index a888ed93ff82..88fdcea906f3 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -500,7 +500,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index b481782375f6..8acbe83dac72 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -501,7 +501,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 6eba743d8eb5..e3095301f3c5 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -517,7 +517,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 9bdbb418ffa8..948e48ddd128 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -498,7 +498,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index e1cf20fa5343..5bcf9181c37c 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -498,7 +498,7 @@ CONFIG_ENCRYPTED_KEYS=m CONFIG_HARDENED_USERCOPY=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c index 8ccf167c167e..e8c38aaf46a2 100644 --- a/arch/mips/ath25/ar2315.c +++ b/arch/mips/ath25/ar2315.c @@ -149,8 +149,8 @@ void __init ar2315_arch_init_irq(void) ath25_irq_dispatch = ar2315_irq_dispatch; - domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT, - &ar2315_misc_irq_domain_ops, NULL); + domain = irq_domain_create_linear(NULL, AR2315_MISC_IRQ_COUNT, + &ar2315_misc_irq_domain_ops, NULL); if (!domain) panic("Failed to add IRQ domain"); diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c index cfa103518113..4a1d874be766 100644 --- a/arch/mips/ath25/ar5312.c +++ b/arch/mips/ath25/ar5312.c @@ -143,8 +143,8 @@ void __init ar5312_arch_init_irq(void) ath25_irq_dispatch = ar5312_irq_dispatch; - domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, - &ar5312_misc_irq_domain_ops, NULL); + domain = irq_domain_create_linear(NULL, AR5312_MISC_IRQ_COUNT, + &ar5312_misc_irq_domain_ops, NULL); if (!domain) panic("Failed to add IRQ domain"); diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 450e979ef5d9..11f4aa6e80e9 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -23,6 +23,12 @@ config CAVIUM_OCTEON_CVMSEG_SIZE legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is between zero and 6192 bytes). +config CRYPTO_SHA256_OCTEON + tristate + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_LIB_SHA256_GENERIC + endif # CPU_CAVIUM_OCTEON if CAVIUM_OCTEON_SOC diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c index 5ee4ade99b99..fbc84eb7fedf 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-md5.c +++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c @@ -19,22 +19,26 @@ * any later version. */ +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> #include <crypto/md5.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> -#include <asm/byteorder.h> -#include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> +#include <linux/unaligned.h> #include "octeon-crypto.h" +struct octeon_md5_state { + __le32 hash[MD5_HASH_WORDS]; + u64 byte_count; +}; + /* * We pass everything as 64-bit. OCTEON can handle misaligned data. */ -static void octeon_md5_store_hash(struct md5_state *ctx) +static void octeon_md5_store_hash(struct octeon_md5_state *ctx) { u64 *hash = (u64 *)ctx->hash; @@ -42,7 +46,7 @@ static void octeon_md5_store_hash(struct md5_state *ctx) write_octeon_64bit_hash_dword(hash[1], 1); } -static void octeon_md5_read_hash(struct md5_state *ctx) +static void octeon_md5_read_hash(struct octeon_md5_state *ctx) { u64 *hash = (u64 *)ctx->hash; @@ -66,13 +70,12 @@ static void octeon_md5_transform(const void *_block) static int octeon_md5_init(struct shash_desc *desc) { - struct md5_state *mctx = shash_desc_ctx(desc); + struct octeon_md5_state *mctx = shash_desc_ctx(desc); - mctx->hash[0] = MD5_H0; - mctx->hash[1] = MD5_H1; - mctx->hash[2] = MD5_H2; - mctx->hash[3] = MD5_H3; - cpu_to_le32_array(mctx->hash, 4); + mctx->hash[0] = cpu_to_le32(MD5_H0); + mctx->hash[1] = cpu_to_le32(MD5_H1); + mctx->hash[2] = cpu_to_le32(MD5_H2); + mctx->hash[3] = cpu_to_le32(MD5_H3); mctx->byte_count = 0; return 0; @@ -81,52 +84,38 @@ static int octeon_md5_init(struct shash_desc *desc) static int octeon_md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct md5_state *mctx = shash_desc_ctx(desc); - const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + struct octeon_md5_state *mctx = shash_desc_ctx(desc); struct octeon_cop2_state state; unsigned long flags; mctx->byte_count += len; - - if (avail > len) { - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, len); - return 0; - } - - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, - avail); - flags = octeon_crypto_enable(&state); octeon_md5_store_hash(mctx); - octeon_md5_transform(mctx->block); - data += avail; - len -= avail; - - while (len >= sizeof(mctx->block)) { + do { octeon_md5_transform(data); - data += sizeof(mctx->block); - len -= sizeof(mctx->block); - } + data += MD5_HMAC_BLOCK_SIZE; + len -= MD5_HMAC_BLOCK_SIZE; + } while (len >= MD5_HMAC_BLOCK_SIZE); octeon_md5_read_hash(mctx); octeon_crypto_disable(&state, flags); - - memcpy(mctx->block, data, len); - - return 0; + mctx->byte_count -= len; + return len; } -static int octeon_md5_final(struct shash_desc *desc, u8 *out) +static int octeon_md5_finup(struct shash_desc *desc, const u8 *src, + unsigned int offset, u8 *out) { - struct md5_state *mctx = shash_desc_ctx(desc); - const unsigned int offset = mctx->byte_count & 0x3f; - char *p = (char *)mctx->block + offset; + struct octeon_md5_state *mctx = shash_desc_ctx(desc); int padding = 56 - (offset + 1); struct octeon_cop2_state state; + u32 block[MD5_BLOCK_WORDS]; unsigned long flags; + char *p; + p = memcpy(block, src, offset); + p += offset; *p++ = 0x80; flags = octeon_crypto_enable(&state); @@ -134,39 +123,56 @@ static int octeon_md5_final(struct shash_desc *desc, u8 *out) if (padding < 0) { memset(p, 0x00, padding + sizeof(u64)); - octeon_md5_transform(mctx->block); - p = (char *)mctx->block; + octeon_md5_transform(block); + p = (char *)block; padding = 56; } memset(p, 0, padding); - mctx->block[14] = mctx->byte_count << 3; - mctx->block[15] = mctx->byte_count >> 29; - cpu_to_le32_array(mctx->block + 14, 2); - octeon_md5_transform(mctx->block); + mctx->byte_count += offset; + block[14] = mctx->byte_count << 3; + block[15] = mctx->byte_count >> 29; + cpu_to_le32_array(block + 14, 2); + octeon_md5_transform(block); octeon_md5_read_hash(mctx); octeon_crypto_disable(&state, flags); + memzero_explicit(block, sizeof(block)); memcpy(out, mctx->hash, sizeof(mctx->hash)); - memset(mctx, 0, sizeof(*mctx)); return 0; } static int octeon_md5_export(struct shash_desc *desc, void *out) { - struct md5_state *ctx = shash_desc_ctx(desc); - - memcpy(out, ctx, sizeof(*ctx)); + struct octeon_md5_state *ctx = shash_desc_ctx(desc); + union { + u8 *u8; + u32 *u32; + u64 *u64; + } p = { .u8 = out }; + int i; + + for (i = 0; i < MD5_HASH_WORDS; i++) + put_unaligned(le32_to_cpu(ctx->hash[i]), p.u32++); + put_unaligned(ctx->byte_count, p.u64); return 0; } static int octeon_md5_import(struct shash_desc *desc, const void *in) { - struct md5_state *ctx = shash_desc_ctx(desc); - - memcpy(ctx, in, sizeof(*ctx)); + struct octeon_md5_state *ctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u32 *u32; + const u64 *u64; + } p = { .u8 = in }; + int i; + + for (i = 0; i < MD5_HASH_WORDS; i++) + ctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++)); + ctx->byte_count = get_unaligned(p.u64); return 0; } @@ -174,15 +180,16 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = octeon_md5_init, .update = octeon_md5_update, - .final = octeon_md5_final, + .finup = octeon_md5_finup, .export = octeon_md5_export, .import = octeon_md5_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .statesize = MD5_STATE_SIZE, + .descsize = sizeof(struct octeon_md5_state), .base = { .cra_name = "md5", .cra_driver_name= "octeon-md5", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c index 37a07b3c4568..e70f21a473da 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c +++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c @@ -13,15 +13,13 @@ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> */ -#include <linux/mm.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/byteorder.h> -#include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> #include "octeon-crypto.h" @@ -58,49 +56,23 @@ static void octeon_sha1_read_hash(struct sha1_state *sctx) memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword)); } -static void octeon_sha1_transform(const void *_block) +static void octeon_sha1_transform(struct sha1_state *sctx, const u8 *src, + int blocks) { - const u64 *block = _block; - - write_octeon_64bit_block_dword(block[0], 0); - write_octeon_64bit_block_dword(block[1], 1); - write_octeon_64bit_block_dword(block[2], 2); - write_octeon_64bit_block_dword(block[3], 3); - write_octeon_64bit_block_dword(block[4], 4); - write_octeon_64bit_block_dword(block[5], 5); - write_octeon_64bit_block_dword(block[6], 6); - octeon_sha1_start(block[7]); -} - -static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data, - unsigned int len) -{ - unsigned int partial; - unsigned int done; - const u8 *src; - - partial = sctx->count % SHA1_BLOCK_SIZE; - sctx->count += len; - done = 0; - src = data; - - if ((partial + len) >= SHA1_BLOCK_SIZE) { - if (partial) { - done = -partial; - memcpy(sctx->buffer + partial, data, - done + SHA1_BLOCK_SIZE); - src = sctx->buffer; - } - - do { - octeon_sha1_transform(src); - done += SHA1_BLOCK_SIZE; - src = data + done; - } while (done + SHA1_BLOCK_SIZE <= len); - - partial = 0; - } - memcpy(sctx->buffer + partial, src, len - done); + do { + const u64 *block = (const u64 *)src; + + write_octeon_64bit_block_dword(block[0], 0); + write_octeon_64bit_block_dword(block[1], 1); + write_octeon_64bit_block_dword(block[2], 2); + write_octeon_64bit_block_dword(block[3], 3); + write_octeon_64bit_block_dword(block[4], 4); + write_octeon_64bit_block_dword(block[5], 5); + write_octeon_64bit_block_dword(block[6], 6); + octeon_sha1_start(block[7]); + + src += SHA1_BLOCK_SIZE; + } while (--blocks); } static int octeon_sha1_update(struct shash_desc *desc, const u8 *data, @@ -109,95 +81,47 @@ static int octeon_sha1_update(struct shash_desc *desc, const u8 *data, struct sha1_state *sctx = shash_desc_ctx(desc); struct octeon_cop2_state state; unsigned long flags; - - /* - * Small updates never reach the crypto engine, so the generic sha1 is - * faster because of the heavyweight octeon_crypto_enable() / - * octeon_crypto_disable(). - */ - if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) - return crypto_sha1_update(desc, data, len); + int remain; flags = octeon_crypto_enable(&state); octeon_sha1_store_hash(sctx); - __octeon_sha1_update(sctx, data, len); + remain = sha1_base_do_update_blocks(desc, data, len, + octeon_sha1_transform); octeon_sha1_read_hash(sctx); octeon_crypto_disable(&state, flags); - - return 0; + return remain; } -static int octeon_sha1_final(struct shash_desc *desc, u8 *out) +static int octeon_sha1_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); - static const u8 padding[64] = { 0x80, }; struct octeon_cop2_state state; - __be32 *dst = (__be32 *)out; - unsigned int pad_len; unsigned long flags; - unsigned int index; - __be64 bits; - int i; - - /* Save number of bits. */ - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64. */ - index = sctx->count & 0x3f; - pad_len = (index < 56) ? (56 - index) : ((64+56) - index); flags = octeon_crypto_enable(&state); octeon_sha1_store_hash(sctx); - __octeon_sha1_update(sctx, padding, pad_len); - - /* Append length (before padding). */ - __octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); + sha1_base_do_finup(desc, src, len, octeon_sha1_transform); octeon_sha1_read_hash(sctx); octeon_crypto_disable(&state, flags); - - /* Store state in digest */ - for (i = 0; i < 5; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int octeon_sha1_export(struct shash_desc *desc, void *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int octeon_sha1_import(struct shash_desc *desc, const void *in) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; + return sha1_base_finish(desc, out); } static struct shash_alg octeon_sha1_alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = octeon_sha1_update, - .final = octeon_sha1_final, - .export = octeon_sha1_export, - .import = octeon_sha1_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = octeon_sha1_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "octeon-sha1", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c index 435e4a6e7f13..f93faaf1f4af 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c +++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Cryptographic API. - * - * SHA-224 and SHA-256 Secure Hash Algorithm. + * SHA-256 Secure Hash Algorithm. * * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. * @@ -14,15 +12,10 @@ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> */ -#include <linux/mm.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/module.h> -#include <asm/byteorder.h> #include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> #include "octeon-crypto.h" @@ -30,212 +23,51 @@ * We pass everything as 64-bit. OCTEON can handle misaligned data. */ -static void octeon_sha256_store_hash(struct sha256_state *sctx) -{ - u64 *hash = (u64 *)sctx->state; - - write_octeon_64bit_hash_dword(hash[0], 0); - write_octeon_64bit_hash_dword(hash[1], 1); - write_octeon_64bit_hash_dword(hash[2], 2); - write_octeon_64bit_hash_dword(hash[3], 3); -} - -static void octeon_sha256_read_hash(struct sha256_state *sctx) -{ - u64 *hash = (u64 *)sctx->state; - - hash[0] = read_octeon_64bit_hash_dword(0); - hash[1] = read_octeon_64bit_hash_dword(1); - hash[2] = read_octeon_64bit_hash_dword(2); - hash[3] = read_octeon_64bit_hash_dword(3); -} - -static void octeon_sha256_transform(const void *_block) -{ - const u64 *block = _block; - - write_octeon_64bit_block_dword(block[0], 0); - write_octeon_64bit_block_dword(block[1], 1); - write_octeon_64bit_block_dword(block[2], 2); - write_octeon_64bit_block_dword(block[3], 3); - write_octeon_64bit_block_dword(block[4], 4); - write_octeon_64bit_block_dword(block[5], 5); - write_octeon_64bit_block_dword(block[6], 6); - octeon_sha256_start(block[7]); -} - -static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data, - unsigned int len) -{ - unsigned int partial; - unsigned int done; - const u8 *src; - - partial = sctx->count % SHA256_BLOCK_SIZE; - sctx->count += len; - done = 0; - src = data; - - if ((partial + len) >= SHA256_BLOCK_SIZE) { - if (partial) { - done = -partial; - memcpy(sctx->buf + partial, data, - done + SHA256_BLOCK_SIZE); - src = sctx->buf; - } - - do { - octeon_sha256_transform(src); - done += SHA256_BLOCK_SIZE; - src = data + done; - } while (done + SHA256_BLOCK_SIZE <= len); - - partial = 0; - } - memcpy(sctx->buf + partial, src, len - done); -} - -static int octeon_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - struct octeon_cop2_state state; - unsigned long flags; - - /* - * Small updates never reach the crypto engine, so the generic sha256 is - * faster because of the heavyweight octeon_crypto_enable() / - * octeon_crypto_disable(). - */ - if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) - return crypto_sha256_update(desc, data, len); - - flags = octeon_crypto_enable(&state); - octeon_sha256_store_hash(sctx); - - __octeon_sha256_update(sctx, data, len); - - octeon_sha256_read_hash(sctx); - octeon_crypto_disable(&state, flags); - - return 0; -} - -static int octeon_sha256_final(struct shash_desc *desc, u8 *out) +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) { - struct sha256_state *sctx = shash_desc_ctx(desc); - static const u8 padding[64] = { 0x80, }; - struct octeon_cop2_state state; - __be32 *dst = (__be32 *)out; - unsigned int pad_len; + struct octeon_cop2_state cop2_state; + u64 *state64 = (u64 *)state; unsigned long flags; - unsigned int index; - __be64 bits; - int i; - - /* Save number of bits. */ - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64. */ - index = sctx->count & 0x3f; - pad_len = (index < 56) ? (56 - index) : ((64+56) - index); - - flags = octeon_crypto_enable(&state); - octeon_sha256_store_hash(sctx); - - __octeon_sha256_update(sctx, padding, pad_len); - - /* Append length (before padding). */ - __octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); - - octeon_sha256_read_hash(sctx); - octeon_crypto_disable(&state, flags); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int octeon_sha224_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[SHA256_DIGEST_SIZE]; - - octeon_sha256_final(desc, D); - memcpy(hash, D, SHA224_DIGEST_SIZE); - memzero_explicit(D, SHA256_DIGEST_SIZE); - - return 0; -} - -static int octeon_sha256_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int octeon_sha256_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; -} - -static struct shash_alg octeon_sha256_algs[2] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = octeon_sha256_update, - .final = octeon_sha256_final, - .export = octeon_sha256_export, - .import = octeon_sha256_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "octeon-sha256", - .cra_priority = OCTEON_CR_OPCODE_PRIORITY, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = octeon_sha256_update, - .final = octeon_sha224_final, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "octeon-sha224", - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init octeon_sha256_mod_init(void) -{ if (!octeon_has_crypto()) - return -ENOTSUPP; - return crypto_register_shashes(octeon_sha256_algs, - ARRAY_SIZE(octeon_sha256_algs)); + return sha256_blocks_generic(state, data, nblocks); + + flags = octeon_crypto_enable(&cop2_state); + write_octeon_64bit_hash_dword(state64[0], 0); + write_octeon_64bit_hash_dword(state64[1], 1); + write_octeon_64bit_hash_dword(state64[2], 2); + write_octeon_64bit_hash_dword(state64[3], 3); + + do { + const u64 *block = (const u64 *)data; + + write_octeon_64bit_block_dword(block[0], 0); + write_octeon_64bit_block_dword(block[1], 1); + write_octeon_64bit_block_dword(block[2], 2); + write_octeon_64bit_block_dword(block[3], 3); + write_octeon_64bit_block_dword(block[4], 4); + write_octeon_64bit_block_dword(block[5], 5); + write_octeon_64bit_block_dword(block[6], 6); + octeon_sha256_start(block[7]); + + data += SHA256_BLOCK_SIZE; + } while (--nblocks); + + state64[0] = read_octeon_64bit_hash_dword(0); + state64[1] = read_octeon_64bit_hash_dword(1); + state64[2] = read_octeon_64bit_hash_dword(2); + state64[3] = read_octeon_64bit_hash_dword(3); + octeon_crypto_disable(&cop2_state, flags); } +EXPORT_SYMBOL_GPL(sha256_blocks_arch); -static void __exit octeon_sha256_mod_fini(void) +bool sha256_is_arch_optimized(void) { - crypto_unregister_shashes(octeon_sha256_algs, - ARRAY_SIZE(octeon_sha256_algs)); + return octeon_has_crypto(); } - -module_init(octeon_sha256_mod_init); -module_exit(octeon_sha256_mod_fini); +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)"); +MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm (OCTEON)"); MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>"); diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c index 2dee9354e33f..215311053db3 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c +++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c @@ -13,15 +13,12 @@ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> */ -#include <linux/mm.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/byteorder.h> -#include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> #include "octeon-crypto.h" @@ -53,60 +50,31 @@ static void octeon_sha512_read_hash(struct sha512_state *sctx) sctx->state[7] = read_octeon_64bit_hash_sha512(7); } -static void octeon_sha512_transform(const void *_block) +static void octeon_sha512_transform(struct sha512_state *sctx, + const u8 *src, int blocks) { - const u64 *block = _block; - - write_octeon_64bit_block_sha512(block[0], 0); - write_octeon_64bit_block_sha512(block[1], 1); - write_octeon_64bit_block_sha512(block[2], 2); - write_octeon_64bit_block_sha512(block[3], 3); - write_octeon_64bit_block_sha512(block[4], 4); - write_octeon_64bit_block_sha512(block[5], 5); - write_octeon_64bit_block_sha512(block[6], 6); - write_octeon_64bit_block_sha512(block[7], 7); - write_octeon_64bit_block_sha512(block[8], 8); - write_octeon_64bit_block_sha512(block[9], 9); - write_octeon_64bit_block_sha512(block[10], 10); - write_octeon_64bit_block_sha512(block[11], 11); - write_octeon_64bit_block_sha512(block[12], 12); - write_octeon_64bit_block_sha512(block[13], 13); - write_octeon_64bit_block_sha512(block[14], 14); - octeon_sha512_start(block[15]); -} - -static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data, - unsigned int len) -{ - unsigned int part_len; - unsigned int index; - unsigned int i; - - /* Compute number of bytes mod 128. */ - index = sctx->count[0] % SHA512_BLOCK_SIZE; - - /* Update number of bytes. */ - if ((sctx->count[0] += len) < len) - sctx->count[1]++; - - part_len = SHA512_BLOCK_SIZE - index; - - /* Transform as many times as possible. */ - if (len >= part_len) { - memcpy(&sctx->buf[index], data, part_len); - octeon_sha512_transform(sctx->buf); - - for (i = part_len; i + SHA512_BLOCK_SIZE <= len; - i += SHA512_BLOCK_SIZE) - octeon_sha512_transform(&data[i]); - - index = 0; - } else { - i = 0; - } - - /* Buffer remaining input. */ - memcpy(&sctx->buf[index], &data[i], len - i); + do { + const u64 *block = (const u64 *)src; + + write_octeon_64bit_block_sha512(block[0], 0); + write_octeon_64bit_block_sha512(block[1], 1); + write_octeon_64bit_block_sha512(block[2], 2); + write_octeon_64bit_block_sha512(block[3], 3); + write_octeon_64bit_block_sha512(block[4], 4); + write_octeon_64bit_block_sha512(block[5], 5); + write_octeon_64bit_block_sha512(block[6], 6); + write_octeon_64bit_block_sha512(block[7], 7); + write_octeon_64bit_block_sha512(block[8], 8); + write_octeon_64bit_block_sha512(block[9], 9); + write_octeon_64bit_block_sha512(block[10], 10); + write_octeon_64bit_block_sha512(block[11], 11); + write_octeon_64bit_block_sha512(block[12], 12); + write_octeon_64bit_block_sha512(block[13], 13); + write_octeon_64bit_block_sha512(block[14], 14); + octeon_sha512_start(block[15]); + + src += SHA512_BLOCK_SIZE; + } while (--blocks); } static int octeon_sha512_update(struct shash_desc *desc, const u8 *data, @@ -115,89 +83,48 @@ static int octeon_sha512_update(struct shash_desc *desc, const u8 *data, struct sha512_state *sctx = shash_desc_ctx(desc); struct octeon_cop2_state state; unsigned long flags; - - /* - * Small updates never reach the crypto engine, so the generic sha512 is - * faster because of the heavyweight octeon_crypto_enable() / - * octeon_crypto_disable(). - */ - if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) - return crypto_sha512_update(desc, data, len); + int remain; flags = octeon_crypto_enable(&state); octeon_sha512_store_hash(sctx); - __octeon_sha512_update(sctx, data, len); + remain = sha512_base_do_update_blocks(desc, data, len, + octeon_sha512_transform); octeon_sha512_read_hash(sctx); octeon_crypto_disable(&state, flags); - - return 0; + return remain; } -static int octeon_sha512_final(struct shash_desc *desc, u8 *hash) +static int octeon_sha512_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *hash) { struct sha512_state *sctx = shash_desc_ctx(desc); - static u8 padding[128] = { 0x80, }; struct octeon_cop2_state state; - __be64 *dst = (__be64 *)hash; - unsigned int pad_len; unsigned long flags; - unsigned int index; - __be64 bits[2]; - int i; - - /* Save number of bits. */ - bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); - - /* Pad out to 112 mod 128. */ - index = sctx->count[0] & 0x7f; - pad_len = (index < 112) ? (112 - index) : ((128+112) - index); flags = octeon_crypto_enable(&state); octeon_sha512_store_hash(sctx); - __octeon_sha512_update(sctx, padding, pad_len); - - /* Append length (before padding). */ - __octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits)); + sha512_base_do_finup(desc, src, len, octeon_sha512_transform); octeon_sha512_read_hash(sctx); octeon_crypto_disable(&state, flags); - - /* Store state in digest. */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be64(sctx->state[i]); - - /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(struct sha512_state)); - - return 0; -} - -static int octeon_sha384_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[64]; - - octeon_sha512_final(desc, D); - - memcpy(hash, D, 48); - memzero_explicit(D, 64); - - return 0; + return sha512_base_finish(desc, hash); } static struct shash_alg octeon_sha512_algs[2] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = octeon_sha512_update, - .final = octeon_sha512_final, - .descsize = sizeof(struct sha512_state), + .finup = octeon_sha512_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name= "octeon-sha512", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -205,12 +132,14 @@ static struct shash_alg octeon_sha512_algs[2] = { { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = octeon_sha512_update, - .final = octeon_sha384_final, - .descsize = sizeof(struct sha512_state), + .finup = octeon_sha512_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name= "octeon-sha384", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index e6b4d9c0c169..5c3de175ef5b 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -1503,8 +1503,8 @@ static int __init octeon_irq_init_ciu( /* Mips internal */ octeon_irq_init_core(); - ciu_domain = irq_domain_add_tree( - ciu_node, &octeon_irq_domain_ciu_ops, dd); + ciu_domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_irq_domain_ciu_ops, + dd); irq_set_default_domain(ciu_domain); /* CIU_0 */ @@ -1637,8 +1637,8 @@ static int __init octeon_irq_init_gpio( if (gpiod) { /* gpio domain host_data is the base hwirq number. */ gpiod->base_hwirq = base_hwirq; - irq_domain_add_linear( - gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); + irq_domain_create_linear(of_fwnode_handle(gpio_node), 16, + &octeon_irq_domain_gpio_ops, gpiod); } else { pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); return -ENOMEM; @@ -2074,8 +2074,8 @@ static int __init octeon_irq_init_ciu2( /* Mips internal */ octeon_irq_init_core(); - ciu_domain = irq_domain_add_tree( - ciu_node, &octeon_irq_domain_ciu2_ops, NULL); + ciu_domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_irq_domain_ciu2_ops, + NULL); irq_set_default_domain(ciu_domain); /* CUI2 */ @@ -2331,11 +2331,12 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, } host_data->max_bits = val; - cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, - &octeon_irq_domain_cib_ops, - host_data); + cib_domain = irq_domain_create_linear(of_fwnode_handle(ciu_node), + host_data->max_bits, + &octeon_irq_domain_cib_ops, + host_data); if (!cib_domain) { - pr_err("ERROR: Couldn't irq_domain_add_linear()\n"); + pr_err("ERROR: Couldn't irq_domain_create_linear()\n"); return -ENOMEM; } @@ -2918,8 +2919,8 @@ static int __init octeon_irq_init_ciu3(struct device_node *ciu_node, * Initialize all domains to use the default domain. Specific major * blocks will overwrite the default domain as needed. */ - domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops, - ciu3_info); + domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_dflt_domain_ciu3_ops, + ciu3_info); for (i = 0; i < MAX_CIU3_DOMAINS; i++) ciu3_info->domain[i] = domain; diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index f523ee6f25bf..88ae0aa85364 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -157,7 +157,6 @@ CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5_OCTEON=y CONFIG_CRYPTO_SHA1_OCTEON=m -CONFIG_CRYPTO_SHA256_OCTEON=m CONFIG_CRYPTO_SHA512_OCTEON=m CONFIG_CRYPTO_DES=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig index 9655567614aa..85a4472cb058 100644 --- a/arch/mips/configs/decstation_64_defconfig +++ b/arch/mips/configs/decstation_64_defconfig @@ -168,7 +168,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_CRYPTO_RSA=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig index 1539fe8eb34d..a3b2c8da2dde 100644 --- a/arch/mips/configs/decstation_defconfig +++ b/arch/mips/configs/decstation_defconfig @@ -163,7 +163,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_CRYPTO_RSA=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig index 58c36720c94a..a476717b8a6a 100644 --- a/arch/mips/configs/decstation_r4k_defconfig +++ b/arch/mips/configs/decstation_r4k_defconfig @@ -163,7 +163,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_CRYPTO_RSA=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig index bc1ef66e3999..8b7ad877e07a 100644 --- a/arch/mips/configs/gcw0_defconfig +++ b/arch/mips/configs/gcw0_defconfig @@ -13,7 +13,6 @@ CONFIG_MIPS_CMDLINE_DTB_EXTEND=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set -# CONFIG_BOUNCE is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 12f3eed8a946..48c8feec958f 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -273,7 +273,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_850=y CONFIG_NLS_ISO8859_1=y CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig index e0040110a3ee..6db21e498faa 100644 --- a/arch/mips/configs/ip28_defconfig +++ b/arch/mips/configs/ip28_defconfig @@ -60,6 +60,5 @@ CONFIG_TMPFS_POSIX_ACL=y CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y -CONFIG_CRYPTO_MANAGER=y # CONFIG_CRYPTO_HW is not set CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 71d6340497c9..5038a27d035f 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -297,7 +297,7 @@ CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=y CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 06b7a0b97eca..cbf9c35a6177 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -662,7 +662,7 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5=y diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig index 42b161d587c7..9fb114ef5e2d 100644 --- a/arch/mips/configs/rb532_defconfig +++ b/arch/mips/configs/rb532_defconfig @@ -153,6 +153,6 @@ CONFIG_JFFS2_FS=y CONFIG_JFFS2_SUMMARY=y CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_SQUASHFS=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m # CONFIG_CRYPTO_HW is not set CONFIG_STRIP_ASM_SYMS=y diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig index 545fc0e12422..6bf073ae7613 100644 --- a/arch/mips/crypto/Kconfig +++ b/arch/mips/crypto/Kconfig @@ -2,17 +2,6 @@ menu "Accelerated Cryptographic Algorithms for CPU (mips)" -config CRYPTO_POLY1305_MIPS - tristate - depends on MIPS - select CRYPTO_HASH - select CRYPTO_ARCH_HAVE_LIB_POLY1305 - default CRYPTO_LIB_POLY1305_INTERNAL - help - Poly1305 authenticator algorithm (RFC7539) - - Architecture: mips - config CRYPTO_MD5_OCTEON tristate "Digests: MD5 (OCTEON)" depends on CPU_CAVIUM_OCTEON @@ -33,16 +22,6 @@ config CRYPTO_SHA1_OCTEON Architecture: mips OCTEON -config CRYPTO_SHA256_OCTEON - tristate "Hash functions: SHA-224 and SHA-256 (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: mips OCTEON using crypto instructions, when available - config CRYPTO_SHA512_OCTEON tristate "Hash functions: SHA-384 and SHA-512 (OCTEON)" depends on CPU_CAVIUM_OCTEON @@ -53,16 +32,4 @@ config CRYPTO_SHA512_OCTEON Architecture: mips OCTEON using crypto instructions, when available -config CRYPTO_CHACHA_MIPS - tristate - depends on CPU_MIPS32_R2 - select CRYPTO_SKCIPHER - select CRYPTO_ARCH_HAVE_LIB_CHACHA - default CRYPTO_LIB_CHACHA_INTERNAL - help - Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 - stream cipher algorithms - - Architecture: MIPS32r2 - endmenu diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile index fddc88281412..5adb631a69c1 100644 --- a/arch/mips/crypto/Makefile +++ b/arch/mips/crypto/Makefile @@ -3,20 +3,3 @@ # Makefile for MIPS crypto files.. # -obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o -chacha-mips-y := chacha-core.o chacha-glue.o -AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots - -obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o -poly1305-mips-y := poly1305-core.o poly1305-glue.o - -perlasm-flavour-$(CONFIG_32BIT) := o32 -perlasm-flavour-$(CONFIG_64BIT) := 64 - -quiet_cmd_perlasm = PERLASM $@ - cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@) - -$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE - $(call if_changed,perlasm) - -targets += poly1305-core.S diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c deleted file mode 100644 index f6fc2e1079a1..000000000000 --- a/arch/mips/crypto/chacha-glue.c +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * MIPS accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> - */ - -#include <asm/byteorder.h> -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> - -asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds); -EXPORT_SYMBOL(chacha_crypt_arch); - -asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds); -EXPORT_SYMBOL(hchacha_block_arch); - -static int chacha_mips_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - chacha_init(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes, ctx->nrounds); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static int chacha_mips(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_mips_stream_xor(req, ctx, req->iv); -} - -static int xchacha_mips(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - chacha_init(state, ctx->key, req->iv); - - hchacha_block(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); - return chacha_mips_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-mips", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_mips, - .decrypt = chacha_mips, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-mips", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_mips, - .decrypt = xchacha_mips, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-mips", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_mips, - .decrypt = xchacha_mips, - } -}; - -static int __init chacha_simd_mod_init(void) -{ - return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ? - crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; -} - -static void __exit chacha_simd_mod_fini(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -module_init(chacha_simd_mod_init); -module_exit(chacha_simd_mod_fini); - -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-mips"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-mips"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-mips"); diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c deleted file mode 100644 index c03ad0bbe69c..000000000000 --- a/arch/mips/crypto/poly1305-glue.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS - * - * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> - */ - -#include <linux/unaligned.h> -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <linux/cpufeature.h> -#include <linux/crypto.h> -#include <linux/module.h> - -asmlinkage void poly1305_init_mips(void *state, const u8 *key); -asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); -asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); - -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) -{ - poly1305_init_mips(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(key + 16); - dctx->s[1] = get_unaligned_le32(key + 20); - dctx->s[2] = get_unaligned_le32(key + 24); - dctx->s[3] = get_unaligned_le32(key + 28); - dctx->buflen = 0; -} -EXPORT_SYMBOL(poly1305_init_arch); - -static int mips_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - u32 len, u32 hibit) -{ - if (unlikely(!dctx->sset)) { - if (!dctx->rset) { - poly1305_init_mips(&dctx->h, src); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (len >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - if (len < POLY1305_BLOCK_SIZE) - return; - } - - len &= ~(POLY1305_BLOCK_SIZE - 1); - - poly1305_blocks_mips(&dctx->h, src, len, hibit); -} - -static int mips_poly1305_update(struct shash_desc *desc, const u8 *src, - unsigned int len) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(dctx->buflen)) { - u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - len -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(len >= POLY1305_BLOCK_SIZE)) { - mips_poly1305_blocks(dctx, src, len, 1); - src += round_down(len, POLY1305_BLOCK_SIZE); - len %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(len)) { - dctx->buflen = len; - memcpy(dctx->buf, src, len); - } - return 0; -} - -void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int nbytes) -{ - if (unlikely(dctx->buflen)) { - u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - nbytes -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks_mips(&dctx->h, dctx->buf, - POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { - unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); - - poly1305_blocks_mips(&dctx->h, src, len, 1); - src += len; - nbytes %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(nbytes)) { - dctx->buflen = nbytes; - memcpy(dctx->buf, src, nbytes); - } -} -EXPORT_SYMBOL(poly1305_update_arch); - -void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -{ - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); - poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - - poly1305_emit_mips(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; -} -EXPORT_SYMBOL(poly1305_final_arch); - -static int mips_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_arch(dctx, dst); - return 0; -} - -static struct shash_alg mips_poly1305_alg = { - .init = mips_poly1305_init, - .update = mips_poly1305_update, - .final = mips_poly1305_final, - .digestsize = POLY1305_DIGEST_SIZE, - .descsize = sizeof(struct poly1305_desc_ctx), - - .base.cra_name = "poly1305", - .base.cra_driver_name = "poly1305-mips", - .base.cra_priority = 200, - .base.cra_blocksize = POLY1305_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}; - -static int __init mips_poly1305_mod_init(void) -{ - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? - crypto_register_shash(&mips_poly1305_alg) : 0; -} - -static void __exit mips_poly1305_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) - crypto_unregister_shash(&mips_poly1305_alg); -} - -module_init(mips_poly1305_mod_init); -module_exit(mips_poly1305_mod_exit); - -MODULE_DESCRIPTION("Poly1305 transform (MIPS accelerated"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-mips"); diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h index 4724a563c5bf..43a09f0dd3ff 100644 --- a/arch/mips/include/asm/socket.h +++ b/arch/mips/include/asm/socket.h @@ -36,15 +36,6 @@ enum sock_type { SOCK_PACKET = 10, }; -#define SOCK_MAX (SOCK_PACKET + 1) -/* Mask which covers at least up to SOCK_MASK-1. The - * * remaining bits are used as flags. */ -#define SOCK_TYPE_MASK 0xf - -/* Flags for socket, socketpair, paccept */ -#define SOCK_CLOEXEC O_CLOEXEC -#define SOCK_NONBLOCK O_NONBLOCK - #define ARCH_HAS_SOCKET_TYPES 1 #endif /* _ASM_SOCKET_H */ diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index c4d6b09136b1..196a070349b0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -791,8 +791,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, if (!mipspmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - mipsxx_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 8f208007b8e8..a112573b6e37 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -377,7 +377,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) for (i = 0; i < MAX_IM; i++) irq_set_chained_handler(i + 2, ltq_hw_irq_handler); - ltq_domain = irq_domain_add_linear(node, + ltq_domain = irq_domain_create_linear(of_fwnode_handle(node), (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, &irq_domain_ops, 0); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 9c024e6d5e54..9d75845ef78e 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -3,6 +3,8 @@ # Makefile for MIPS-specific library files.. # +obj-y += crypto/ + lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ mips-atomic.o strncpy_user.o \ strnlen_user.o uncached.o diff --git a/arch/mips/lib/crc32-mips.c b/arch/mips/lib/crc32-mips.c index 676a4b3e290b..45e4d2c9fbf5 100644 --- a/arch/mips/lib/crc32-mips.c +++ b/arch/mips/lib/crc32-mips.c @@ -62,7 +62,7 @@ do { \ #define CRC32C(crc, value, size) \ _CRC32(crc, value, size, crc32c) -static DEFINE_STATIC_KEY_FALSE(have_crc32); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { @@ -163,7 +163,7 @@ static int __init crc32_mips_init(void) static_branch_enable(&have_crc32); return 0; } -arch_initcall(crc32_mips_init); +subsys_initcall(crc32_mips_init); static void __exit crc32_mips_exit(void) { diff --git a/arch/mips/lib/crypto/.gitignore b/arch/mips/lib/crypto/.gitignore new file mode 100644 index 000000000000..0d47d4f21c6d --- /dev/null +++ b/arch/mips/lib/crypto/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +poly1305-core.S diff --git a/arch/mips/lib/crypto/Kconfig b/arch/mips/lib/crypto/Kconfig new file mode 100644 index 000000000000..0670a170c1be --- /dev/null +++ b/arch/mips/lib/crypto/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_CHACHA_MIPS + tristate + depends on CPU_MIPS32_R2 + default CRYPTO_LIB_CHACHA + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_POLY1305_MIPS + tristate + default CRYPTO_LIB_POLY1305 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 diff --git a/arch/mips/lib/crypto/Makefile b/arch/mips/lib/crypto/Makefile new file mode 100644 index 000000000000..804488c7aded --- /dev/null +++ b/arch/mips/lib/crypto/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o +chacha-mips-y := chacha-core.o chacha-glue.o +AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots + +obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o +poly1305-mips-y := poly1305-core.o poly1305-glue.o + +perlasm-flavour-$(CONFIG_32BIT) := o32 +perlasm-flavour-$(CONFIG_64BIT) := 64 + +quiet_cmd_perlasm = PERLASM $@ + cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@) + +$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE + $(call if_changed,perlasm) + +targets += poly1305-core.S diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/lib/crypto/chacha-core.S index 5755f69cfe00..5755f69cfe00 100644 --- a/arch/mips/crypto/chacha-core.S +++ b/arch/mips/lib/crypto/chacha-core.S diff --git a/arch/mips/lib/crypto/chacha-glue.c b/arch/mips/lib/crypto/chacha-glue.c new file mode 100644 index 000000000000..88c097594eb0 --- /dev/null +++ b/arch/mips/lib/crypto/chacha-glue.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ChaCha and HChaCha functions (MIPS optimized) + * + * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> + */ + +#include <crypto/chacha.h> +#include <linux/kernel.h> +#include <linux/module.h> + +asmlinkage void chacha_crypt_arch(struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int bytes, int nrounds); +EXPORT_SYMBOL(chacha_crypt_arch); + +asmlinkage void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); +EXPORT_SYMBOL(hchacha_block_arch); + +bool chacha_is_arch_optimized(void) +{ + return true; +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +MODULE_DESCRIPTION("ChaCha and HChaCha functions (MIPS optimized)"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/mips/lib/crypto/poly1305-glue.c b/arch/mips/lib/crypto/poly1305-glue.c new file mode 100644 index 000000000000..764a38a65200 --- /dev/null +++ b/arch/mips/lib/crypto/poly1305-glue.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS + * + * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> + */ + +#include <crypto/internal/poly1305.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/unaligned.h> + +asmlinkage void poly1305_block_init_arch( + struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +EXPORT_SYMBOL_GPL(poly1305_block_init_arch); +asmlinkage void poly1305_blocks_arch(struct poly1305_block_state *state, + const u8 *src, u32 len, u32 hibit); +EXPORT_SYMBOL_GPL(poly1305_blocks_arch); +asmlinkage void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]); +EXPORT_SYMBOL_GPL(poly1305_emit_arch); + +bool poly1305_is_arch_optimized(void) +{ + return true; +} +EXPORT_SYMBOL(poly1305_is_arch_optimized); + +MODULE_DESCRIPTION("Poly1305 transform (MIPS accelerated"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/mips/crypto/poly1305-mips.pl b/arch/mips/lib/crypto/poly1305-mips.pl index b05bab884ed2..399f10c3e385 100644 --- a/arch/mips/crypto/poly1305-mips.pl +++ b/arch/mips/lib/crypto/poly1305-mips.pl @@ -93,9 +93,9 @@ $code.=<<___; #endif #ifdef __KERNEL__ -# define poly1305_init poly1305_init_mips -# define poly1305_blocks poly1305_blocks_mips -# define poly1305_emit poly1305_emit_mips +# define poly1305_init poly1305_block_init_arch +# define poly1305_blocks poly1305_blocks_arch +# define poly1305_emit poly1305_emit_arch #endif #if defined(__MIPSEB__) && !defined(MIPSEB) @@ -565,9 +565,9 @@ $code.=<<___; #endif #ifdef __KERNEL__ -# define poly1305_init poly1305_init_mips -# define poly1305_blocks poly1305_blocks_mips -# define poly1305_emit poly1305_emit_mips +# define poly1305_init poly1305_block_init_arch +# define poly1305_blocks poly1305_blocks_arch +# define poly1305_emit poly1305_emit_arch #endif #if defined(__MIPSEB__) && !defined(MIPSEB) diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index a925842ee125..17fa97ec6ffb 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -469,8 +469,8 @@ static int ar2315_pci_probe(struct platform_device *pdev) if (err) return err; - apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT, - &ar2315_pci_irq_domain_ops, apc); + apc->domain = irq_domain_create_linear(NULL, AR2315_PCI_IRQ_COUNT, + &ar2315_pci_irq_domain_ops, apc); if (!apc->domain) { dev_err(dev, "failed to add IRQ domain\n"); return -ENOMEM; diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 4ac68a534e4f..14454ece485d 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -208,9 +208,10 @@ static int rt3883_pci_irq_init(struct device *dev, rt3883_pci_w32(rpc, 0, RT3883_PCI_REG_PCIENA); rpc->irq_domain = - irq_domain_add_linear(rpc->intc_of_node, RT3883_PCI_IRQ_COUNT, - &rt3883_pci_irq_domain_ops, - rpc); + irq_domain_create_linear(of_fwnode_handle(rpc->intc_of_node), + RT3883_PCI_IRQ_COUNT, + &rt3883_pci_irq_domain_ops, + rpc); if (!rpc->irq_domain) { dev_err(dev, "unable to add IRQ domain\n"); return -ENODEV; diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 46aef0a1b22a..af5bbbea949b 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -176,7 +176,7 @@ static int __init intc_of_init(struct device_node *node, /* route all INTC interrupts to MIPS HW0 interrupt */ rt_intc_w32(0, INTC_REG_TYPE); - domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, + domain = irq_domain_create_legacy(of_fwnode_handle(node), RALINK_INTC_IRQ_COUNT, RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain"); diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c index 8fa280660051..73568d8e21e0 100644 --- a/arch/nios2/kernel/irq.c +++ b/arch/nios2/kernel/irq.c @@ -69,7 +69,8 @@ void __init init_IRQ(void) BUG_ON(!node); - domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), + NIOS2_CPU_NR_IRQS, &irq_ops, NULL); BUG_ON(!domain); irq_set_default_domain(domain); diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 5b65c9859613..94928d114d4c 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -251,7 +251,7 @@ CONFIG_CIFS=m CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index ecc9ffcc11cd..d8cd7f858b2a 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -283,7 +283,6 @@ CONFIG_NLS_ASCII=m CONFIG_NLS_ISO8859_1=m CONFIG_NLS_ISO8859_2=m CONFIG_NLS_UTF8=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 6722625a406a..c3e0cc83f120 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -277,6 +277,7 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_RETHOOK if KPROBES select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE @@ -894,7 +895,7 @@ config DATA_SHIFT int "Data shift" if DATA_SHIFT_BOOL default 24 if STRICT_KERNEL_RWX && PPC64 range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 - range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx + range 14 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 @@ -907,10 +908,10 @@ config DATA_SHIFT On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. Smaller is the alignment, greater is the number of necessary DBATs. - On 8xx, large pages (512kb or 8M) are used to map kernel linear - memory. Aligning to 8M reduces TLB misses as only 8M pages are used - in that case. If PIN_TLB is selected, it must be aligned to 8M as - 8M pages will be pinned. + On 8xx, large pages (16kb or 512kb or 8M) are used to map kernel + linear memory. Aligning to 8M reduces TLB misses as only 8M pages + are used in that case. If PIN_TLB is selected, it must be aligned + to 8M as 8M pages will be pinned. config ARCH_FORCE_MAX_ORDER int "Order of maximal physically contiguous allocations" diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 184d0680e661..a7ab087d412c 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -70,6 +70,7 @@ BOOTCPPFLAGS := -nostdinc $(LINUXINCLUDE) BOOTCPPFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include) BOOTCFLAGS := $(BOOTTARGETFLAGS) \ + -std=gnu11 \ -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 \ -msoft-float -mno-altivec -mno-vsx \ diff --git a/arch/powerpc/boot/rs6000.h b/arch/powerpc/boot/rs6000.h index a9d879155ef9..16df8f3c43f1 100644 --- a/arch/powerpc/boot/rs6000.h +++ b/arch/powerpc/boot/rs6000.h @@ -1,11 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* IBM RS/6000 "XCOFF" file definitions for BFD. Copyright (C) 1990, 1991 Free Software Foundation, Inc. - FIXME: Can someone provide a transliteration of this name into ASCII? - Using the following chars caused a compiler warning on HIUX (so I replaced - them with octal escapes), and isn't useful without an understanding of what - character set it is. - Written by Mimi Ph\373\364ng-Th\345o V\365 of IBM + Written by Mimi Phuong-Thao Vo of IBM and John Gilmore of Cygnus Support. */ /********************** FILE HEADER **********************/ diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 7e58f3e6c987..428f17b45513 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -235,7 +235,7 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_BOOTX_TEXT=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 6b6d7467fecf..379229c982a4 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -317,7 +317,7 @@ CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5_PPC=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 5fa154185efa..3423c405cad4 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -377,7 +377,7 @@ CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y CONFIG_IMA_ARCH_POLICY=y CONFIG_IMA_APPRAISE_MODSIG=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_SERPENT=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index d2e659a2d8cb..90247b2a0ab0 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -220,7 +220,7 @@ CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index a91a766b71a4..242c1fab9d46 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -1073,7 +1073,7 @@ CONFIG_SECURITY_NETWORK_XFRM=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 370db8192ce6..caaa359f4742 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -17,7 +17,6 @@ config CRYPTO_CURVE25519_PPC64 config CRYPTO_MD5_PPC tristate "Digests: MD5" - depends on PPC select CRYPTO_HASH help MD5 message digest algorithm (RFC1321) @@ -26,7 +25,6 @@ config CRYPTO_MD5_PPC config CRYPTO_SHA1_PPC tristate "Hash functions: SHA-1" - depends on PPC help SHA-1 secure hash algorithm (FIPS 180) @@ -34,27 +32,16 @@ config CRYPTO_SHA1_PPC config CRYPTO_SHA1_PPC_SPE tristate "Hash functions: SHA-1 (SPE)" - depends on PPC && SPE + depends on SPE help SHA-1 secure hash algorithm (FIPS 180) Architecture: powerpc using - SPE (Signal Processing Engine) extensions -config CRYPTO_SHA256_PPC_SPE - tristate "Hash functions: SHA-224 and SHA-256 (SPE)" - depends on PPC && SPE - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: powerpc using - - SPE (Signal Processing Engine) extensions - config CRYPTO_AES_PPC_SPE tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)" - depends on PPC && SPE + depends on SPE select CRYPTO_SKCIPHER help Block ciphers: AES cipher algorithms (FIPS-197) @@ -92,33 +79,6 @@ config CRYPTO_AES_GCM_P10 Support for cryptographic acceleration instructions on Power10 or later CPU. This module supports stitched acceleration for AES/GCM. -config CRYPTO_CHACHA20_P10 - tristate - depends on PPC64 && CPU_LITTLE_ENDIAN && VSX - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - default CRYPTO_LIB_CHACHA_INTERNAL - help - Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 - stream cipher algorithms - - Architecture: PowerPC64 - - Power10 or later - - Little-endian - -config CRYPTO_POLY1305_P10 - tristate "Hash functions: Poly1305 (P10 or later)" - depends on PPC64 && CPU_LITTLE_ENDIAN && VSX - select CRYPTO_HASH - select CRYPTO_LIB_POLY1305_GENERIC - help - Poly1305 authenticator algorithm (RFC7539) - - Architecture: PowerPC64 - - Power10 or later - - Little-endian - config CRYPTO_DEV_VMX bool "Support for VMX cryptographic acceleration instructions" depends on PPC64 && VSX diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 2f00b22b0823..8c2936ae466f 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -9,10 +9,7 @@ obj-$(CONFIG_CRYPTO_AES_PPC_SPE) += aes-ppc-spe.o obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o -obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o -obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o -obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o obj-$(CONFIG_CRYPTO_CURVE25519_PPC64) += curve25519-ppc64le.o @@ -20,10 +17,7 @@ aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes- md5-ppc-y := md5-asm.o md5-glue.o sha1-powerpc-y := sha1-powerpc-asm.o sha1.o sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o -sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o -chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o -poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o curve25519-ppc64le-y := curve25519-ppc64le-core.o curve25519-ppc64le_asm.o diff --git a/arch/powerpc/crypto/aes.c b/arch/powerpc/crypto/aes.c index ec06189fbf99..3f1e5e894902 100644 --- a/arch/powerpc/crypto/aes.c +++ b/arch/powerpc/crypto/aes.c @@ -7,15 +7,15 @@ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> */ -#include <linux/types.h> -#include <linux/err.h> -#include <linux/crypto.h> -#include <linux/delay.h> #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" diff --git a/arch/powerpc/crypto/aes_cbc.c b/arch/powerpc/crypto/aes_cbc.c index ed0debc7acb5..5f2a4f375eef 100644 --- a/arch/powerpc/crypto/aes_cbc.c +++ b/arch/powerpc/crypto/aes_cbc.c @@ -12,6 +12,10 @@ #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" diff --git a/arch/powerpc/crypto/aes_ctr.c b/arch/powerpc/crypto/aes_ctr.c index 3da75f42529a..e27c4036e711 100644 --- a/arch/powerpc/crypto/aes_ctr.c +++ b/arch/powerpc/crypto/aes_ctr.c @@ -12,6 +12,10 @@ #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" diff --git a/arch/powerpc/crypto/aes_xts.c b/arch/powerpc/crypto/aes_xts.c index dabbccb41550..9440e771cede 100644 --- a/arch/powerpc/crypto/aes_xts.c +++ b/arch/powerpc/crypto/aes_xts.c @@ -13,6 +13,10 @@ #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> #include "aesp8-ppc.h" diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c deleted file mode 100644 index d8796decc1fb..000000000000 --- a/arch/powerpc/crypto/chacha-p10-glue.c +++ /dev/null @@ -1,221 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright 2023- IBM Corp. All rights reserved. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/simd.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/cpufeature.h> -#include <linux/sizes.h> -#include <asm/simd.h> -#include <asm/switch_to.h> - -asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); - -static void vsx_begin(void) -{ - preempt_disable(); - enable_kernel_vsx(); -} - -static void vsx_end(void) -{ - disable_kernel_vsx(); - preempt_enable(); -} - -static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds) -{ - unsigned int l = bytes & ~0x0FF; - - if (l > 0) { - chacha_p10le_8x(state, dst, src, l, nrounds); - bytes -= l; - src += l; - dst += l; - state[12] += l / CHACHA_BLOCK_SIZE; - } - - if (bytes > 0) - chacha_crypt_generic(state, dst, src, bytes, nrounds); -} - -void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -{ - hchacha_block_generic(state, stream, nrounds); -} -EXPORT_SYMBOL(hchacha_block_arch); - -void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, - int nrounds) -{ - if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE || - !crypto_simd_usable()) - return chacha_crypt_generic(state, dst, src, bytes, nrounds); - - do { - unsigned int todo = min_t(unsigned int, bytes, SZ_4K); - - vsx_begin(); - chacha_p10_do_8x(state, dst, src, todo, nrounds); - vsx_end(); - - bytes -= todo; - src += todo; - dst += todo; - } while (bytes); -} -EXPORT_SYMBOL(chacha_crypt_arch); - -static int chacha_p10_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - if (err) - return err; - - chacha_init(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = rounddown(nbytes, walk.stride); - - if (!crypto_simd_usable()) { - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); - } else { - vsx_begin(); - chacha_p10_do_8x(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, ctx->nrounds); - vsx_end(); - } - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - if (err) - break; - } - - return err; -} - -static int chacha_p10(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_p10_stream_xor(req, ctx, req->iv); -} - -static int xchacha_p10(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - chacha_init(state, ctx->key, req->iv); - hchacha_block_arch(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); - return chacha_p10_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-p10", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_p10, - .decrypt = chacha_p10, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-p10", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_p10, - .decrypt = xchacha_p10, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-p10", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_p10, - .decrypt = xchacha_p10, - } -}; - -static int __init chacha_p10_init(void) -{ - if (!cpu_has_feature(CPU_FTR_ARCH_31)) - return 0; - - static_branch_enable(&have_p10); - - return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -} - -static void __exit chacha_p10_exit(void) -{ - if (!static_branch_likely(&have_p10)) - return; - - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -module_init(chacha_p10_init); -module_exit(chacha_p10_exit); - -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)"); -MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-p10"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-p10"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-p10"); diff --git a/arch/powerpc/crypto/ghash.c b/arch/powerpc/crypto/ghash.c index 77eca20bc7ac..7308735bdb33 100644 --- a/arch/powerpc/crypto/ghash.c +++ b/arch/powerpc/crypto/ghash.c @@ -11,19 +11,18 @@ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> */ -#include <linux/types.h> -#include <linux/err.h> -#include <linux/crypto.h> -#include <linux/delay.h> -#include <asm/simd.h> +#include "aesp8-ppc.h" #include <asm/switch_to.h> #include <crypto/aes.h> +#include <crypto/gf128mul.h> #include <crypto/ghash.h> -#include <crypto/scatterwalk.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> -#include <crypto/b128ops.h> -#include "aesp8-ppc.h" +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/uaccess.h> void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); @@ -39,15 +38,12 @@ struct p8_ghash_ctx { struct p8_ghash_desc_ctx { u64 shash[2]; - u8 buffer[GHASH_DIGEST_SIZE]; - int bytes; }; static int p8_ghash_init(struct shash_desc *desc) { struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - dctx->bytes = 0; memset(dctx->shash, 0, GHASH_DIGEST_SIZE); return 0; } @@ -74,27 +70,30 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, } static inline void __ghash_block(struct p8_ghash_ctx *ctx, - struct p8_ghash_desc_ctx *dctx) + struct p8_ghash_desc_ctx *dctx, + const u8 *src) { if (crypto_simd_usable()) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); - gcm_ghash_p8(dctx->shash, ctx->htable, - dctx->buffer, GHASH_DIGEST_SIZE); + gcm_ghash_p8(dctx->shash, ctx->htable, src, GHASH_BLOCK_SIZE); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } else { - crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); + crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); gf128mul_lle((be128 *)dctx->shash, &ctx->key); } } -static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, - struct p8_ghash_desc_ctx *dctx, - const u8 *src, unsigned int srclen) +static inline int __ghash_blocks(struct p8_ghash_ctx *ctx, + struct p8_ghash_desc_ctx *dctx, + const u8 *src, unsigned int srclen) { + int remain = srclen - round_down(srclen, GHASH_BLOCK_SIZE); + + srclen -= remain; if (crypto_simd_usable()) { preempt_disable(); pagefault_disable(); @@ -105,62 +104,38 @@ static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, pagefault_enable(); preempt_enable(); } else { - while (srclen >= GHASH_BLOCK_SIZE) { + do { crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); gf128mul_lle((be128 *)dctx->shash, &ctx->key); srclen -= GHASH_BLOCK_SIZE; src += GHASH_BLOCK_SIZE; - } + } while (srclen); } + + return remain; } static int p8_ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { - unsigned int len; struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, - srclen); - dctx->bytes += srclen; - return 0; - } - memcpy(dctx->buffer + dctx->bytes, src, - GHASH_DIGEST_SIZE - dctx->bytes); - - __ghash_block(ctx, dctx); - - src += GHASH_DIGEST_SIZE - dctx->bytes; - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; - dctx->bytes = 0; - } - len = srclen & ~(GHASH_DIGEST_SIZE - 1); - if (len) { - __ghash_blocks(ctx, dctx, src, len); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; - } - return 0; + return __ghash_blocks(ctx, dctx, src, srclen); } -static int p8_ghash_final(struct shash_desc *desc, u8 *out) +static int p8_ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - int i; struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) - dctx->buffer[i] = 0; - __ghash_block(ctx, dctx); - dctx->bytes = 0; + if (len) { + u8 buf[GHASH_BLOCK_SIZE] = {}; + + memcpy(buf, src, len); + __ghash_block(ctx, dctx, buf); + memzero_explicit(buf, sizeof(buf)); } memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); return 0; @@ -170,14 +145,14 @@ struct shash_alg p8_ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = p8_ghash_init, .update = p8_ghash_update, - .final = p8_ghash_final, + .finup = p8_ghash_finup, .setkey = p8_ghash_setkey, - .descsize = sizeof(struct p8_ghash_desc_ctx) - + sizeof(struct ghash_desc_ctx), + .descsize = sizeof(struct p8_ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "p8_ghash", .cra_priority = 1000, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct p8_ghash_ctx), .cra_module = THIS_MODULE, diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c index c24f605033bd..204440a90cd8 100644 --- a/arch/powerpc/crypto/md5-glue.c +++ b/arch/powerpc/crypto/md5-glue.c @@ -8,25 +8,13 @@ */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/md5.h> -#include <asm/byteorder.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> extern void ppc_md5_transform(u32 *state, const u8 *src, u32 blocks); -static inline void ppc_md5_clear_context(struct md5_state *sctx) -{ - int count = sizeof(struct md5_state) >> 2; - u32 *ptr = (u32 *)sctx; - - /* make sure we can clear the fast way */ - BUILD_BUG_ON(sizeof(struct md5_state) % 4); - do { *ptr++ = 0; } while (--count); -} - static int ppc_md5_init(struct shash_desc *desc) { struct md5_state *sctx = shash_desc_ctx(desc); @@ -44,79 +32,34 @@ static int ppc_md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->byte_count & 0x3f; - unsigned int avail = 64 - offset; - const u8 *src = data; - sctx->byte_count += len; - - if (avail > len) { - memcpy((char *)sctx->block + offset, src, len); - return 0; - } - - if (offset) { - memcpy((char *)sctx->block + offset, src, avail); - ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1); - len -= avail; - src += avail; - } - - if (len > 63) { - ppc_md5_transform(sctx->hash, src, len >> 6); - src += len & ~0x3f; - len &= 0x3f; - } - - memcpy((char *)sctx->block, src, len); - return 0; + sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE); + ppc_md5_transform(sctx->hash, data, len >> 6); + return len - round_down(len, MD5_HMAC_BLOCK_SIZE); } -static int ppc_md5_final(struct shash_desc *desc, u8 *out) +static int ppc_md5_finup(struct shash_desc *desc, const u8 *src, + unsigned int offset, u8 *out) { struct md5_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->byte_count & 0x3f; - const u8 *src = (const u8 *)sctx->block; - u8 *p = (u8 *)src + offset; - int padlen = 55 - offset; - __le64 *pbits = (__le64 *)((char *)sctx->block + 56); + __le64 block[MD5_BLOCK_WORDS] = {}; + u8 *p = memcpy(block, src, offset); __le32 *dst = (__le32 *)out; + __le64 *pbits; + src = p; + p += offset; *p++ = 0x80; - - if (padlen < 0) { - memset(p, 0x00, padlen + sizeof (u64)); - ppc_md5_transform(sctx->hash, src, 1); - p = (char *)sctx->block; - padlen = 56; - } - - memset(p, 0, padlen); + sctx->byte_count += offset; + pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1]; *pbits = cpu_to_le64(sctx->byte_count << 3); - ppc_md5_transform(sctx->hash, src, 1); + ppc_md5_transform(sctx->hash, src, (pbits - block + 1) / 8); + memzero_explicit(block, sizeof(block)); dst[0] = cpu_to_le32(sctx->hash[0]); dst[1] = cpu_to_le32(sctx->hash[1]); dst[2] = cpu_to_le32(sctx->hash[2]); dst[3] = cpu_to_le32(sctx->hash[3]); - - ppc_md5_clear_context(sctx); - return 0; -} - -static int ppc_md5_export(struct shash_desc *desc, void *out) -{ - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int ppc_md5_import(struct shash_desc *desc, const void *in) -{ - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); return 0; } @@ -124,15 +67,13 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = ppc_md5_init, .update = ppc_md5_update, - .final = ppc_md5_final, - .export = ppc_md5_export, - .import = ppc_md5_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .finup = ppc_md5_finup, + .descsize = MD5_STATE_SIZE, .base = { .cra_name = "md5", .cra_driver_name= "md5-ppc", .cra_priority = 200, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c deleted file mode 100644 index 369686e9370b..000000000000 --- a/arch/powerpc/crypto/poly1305-p10-glue.c +++ /dev/null @@ -1,186 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Poly1305 authenticator algorithm, RFC7539. - * - * Copyright 2023- IBM Corp. All rights reserved. - */ - -#include <crypto/algapi.h> -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/jump_label.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <crypto/internal/simd.h> -#include <linux/cpufeature.h> -#include <linux/unaligned.h> -#include <asm/simd.h> -#include <asm/switch_to.h> - -asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen); -asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit); -asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst); - -static void vsx_begin(void) -{ - preempt_disable(); - enable_kernel_vsx(); -} - -static void vsx_end(void) -{ - disable_kernel_vsx(); - preempt_enable(); -} - -static int crypto_poly1305_p10_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - poly1305_core_init(&dctx->h); - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx, - const u8 *inp, unsigned int len) -{ - unsigned int acc = 0; - - if (unlikely(!dctx->sset)) { - if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) { - struct poly1305_core_key *key = &dctx->core_r; - - key->key.r64[0] = get_unaligned_le64(&inp[0]); - key->key.r64[1] = get_unaligned_le64(&inp[8]); - inp += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - acc += POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (len >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(&inp[0]); - dctx->s[1] = get_unaligned_le32(&inp[4]); - dctx->s[2] = get_unaligned_le32(&inp[8]); - dctx->s[3] = get_unaligned_le32(&inp[12]); - acc += POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } - return acc; -} - -static int crypto_poly1305_p10_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes, used; - - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - srclen -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, - POLY1305_BLOCK_SIZE))) { - vsx_begin(); - poly1305_64s(&dctx->h, dctx->buf, - POLY1305_BLOCK_SIZE, 1); - vsx_end(); - } - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { - bytes = round_down(srclen, POLY1305_BLOCK_SIZE); - used = crypto_poly1305_setdctxkey(dctx, src, bytes); - if (likely(used)) { - srclen -= used; - src += used; - } - if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) { - vsx_begin(); - poly1305_p10le_4blocks(&dctx->h, src, srclen); - vsx_end(); - src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4)); - srclen %= POLY1305_BLOCK_SIZE * 4; - } - while (srclen >= POLY1305_BLOCK_SIZE) { - vsx_begin(); - poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1); - vsx_end(); - srclen -= POLY1305_BLOCK_SIZE; - src += POLY1305_BLOCK_SIZE; - } - } - - if (unlikely(srclen)) { - dctx->buflen = srclen; - memcpy(dctx->buf, src, srclen); - } - - return 0; -} - -static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - if ((dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); - vsx_begin(); - poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - vsx_end(); - dctx->buflen = 0; - } - - poly1305_emit_64(&dctx->h, &dctx->s, dst); - return 0; -} - -static struct shash_alg poly1305_alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_p10_init, - .update = crypto_poly1305_p10_update, - .final = crypto_poly1305_p10_final, - .descsize = sizeof(struct poly1305_desc_ctx), - .base = { - .cra_name = "poly1305", - .cra_driver_name = "poly1305-p10", - .cra_priority = 300, - .cra_blocksize = POLY1305_BLOCK_SIZE, - .cra_module = THIS_MODULE, - }, -}; - -static int __init poly1305_p10_init(void) -{ - return crypto_register_shash(&poly1305_alg); -} - -static void __exit poly1305_p10_exit(void) -{ - crypto_unregister_shash(&poly1305_alg); -} - -module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init); -module_exit(poly1305_p10_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); -MODULE_DESCRIPTION("Optimized Poly1305 for P10"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-p10"); diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index 9170892a8557..04c88e173ce1 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c @@ -7,16 +7,13 @@ * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> */ +#include <asm/switch_to.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/byteorder.h> -#include <asm/switch_to.h> -#include <linux/hardirq.h> +#include <linux/kernel.h> +#include <linux/preempt.h> +#include <linux/module.h> /* * MAX_BYTES defines the number of bytes that are allowed to be processed @@ -30,7 +27,7 @@ */ #define MAX_BYTES 2048 -extern void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks); +asmlinkage void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks); static void spe_begin(void) { @@ -46,126 +43,45 @@ static void spe_end(void) preempt_enable(); } -static inline void ppc_sha1_clear_context(struct sha1_state *sctx) +static void ppc_spe_sha1_block(struct sha1_state *sctx, const u8 *src, + int blocks) { - int count = sizeof(struct sha1_state) >> 2; - u32 *ptr = (u32 *)sctx; - - /* make sure we can clear the fast way */ - BUILD_BUG_ON(sizeof(struct sha1_state) % 4); - do { *ptr++ = 0; } while (--count); -} - -static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - const unsigned int avail = 64 - offset; - unsigned int bytes; - const u8 *src = data; - - if (avail > len) { - sctx->count += len; - memcpy((char *)sctx->buffer + offset, src, len); - return 0; - } - - sctx->count += len; - - if (offset) { - memcpy((char *)sctx->buffer + offset, src, avail); + do { + int unit = min(blocks, MAX_BYTES / SHA1_BLOCK_SIZE); spe_begin(); - ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1); + ppc_spe_sha1_transform(sctx->state, src, unit); spe_end(); - len -= avail; - src += avail; - } - - while (len > 63) { - bytes = (len > MAX_BYTES) ? MAX_BYTES : len; - bytes = bytes & ~0x3f; - - spe_begin(); - ppc_spe_sha1_transform(sctx->state, src, bytes >> 6); - spe_end(); - - src += bytes; - len -= bytes; - } - - memcpy((char *)sctx->buffer, src, len); - return 0; -} - -static int ppc_spe_sha1_final(struct shash_desc *desc, u8 *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - char *p = (char *)sctx->buffer + offset; - int padlen; - __be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56); - __be32 *dst = (__be32 *)out; - - padlen = 55 - offset; - *p++ = 0x80; - - spe_begin(); - - if (padlen < 0) { - memset(p, 0x00, padlen + sizeof (u64)); - ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); - p = (char *)sctx->buffer; - padlen = 56; - } - - memset(p, 0, padlen); - *pbits = cpu_to_be64(sctx->count << 3); - ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); - - spe_end(); - - dst[0] = cpu_to_be32(sctx->state[0]); - dst[1] = cpu_to_be32(sctx->state[1]); - dst[2] = cpu_to_be32(sctx->state[2]); - dst[3] = cpu_to_be32(sctx->state[3]); - dst[4] = cpu_to_be32(sctx->state[4]); - - ppc_sha1_clear_context(sctx); - return 0; + src += unit * SHA1_BLOCK_SIZE; + blocks -= unit; + } while (blocks); } -static int ppc_spe_sha1_export(struct shash_desc *desc, void *out) +static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; + return sha1_base_do_update_blocks(desc, data, len, ppc_spe_sha1_block); } -static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in) +static int ppc_spe_sha1_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; + sha1_base_do_finup(desc, src, len, ppc_spe_sha1_block); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = ppc_spe_sha1_update, - .final = ppc_spe_sha1_final, - .export = ppc_spe_sha1_export, - .import = ppc_spe_sha1_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = ppc_spe_sha1_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-ppc-spe", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c index f283bbd3f121..4593946aa9b3 100644 --- a/arch/powerpc/crypto/sha1.c +++ b/arch/powerpc/crypto/sha1.c @@ -13,107 +13,46 @@ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/byteorder.h> - -void powerpc_sha_transform(u32 *state, const u8 *src); - -static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial, done; - const u8 *src; - - partial = sctx->count & 0x3f; - sctx->count += len; - done = 0; - src = data; - - if ((partial + len) > 63) { - - if (partial) { - done = -partial; - memcpy(sctx->buffer + partial, data, done + 64); - src = sctx->buffer; - } - - do { - powerpc_sha_transform(sctx->state, src); - done += 64; - src = data + done; - } while (done + 63 < len); - - partial = 0; - } - memcpy(sctx->buffer + partial, src, len - done); - - return 0; -} +#include <linux/kernel.h> +#include <linux/module.h> +asmlinkage void powerpc_sha_transform(u32 *state, const u8 *src); -/* Add padding and return the message digest. */ -static int powerpc_sha1_final(struct shash_desc *desc, u8 *out) +static void powerpc_sha_block(struct sha1_state *sctx, const u8 *data, + int blocks) { - struct sha1_state *sctx = shash_desc_ctx(desc); - __be32 *dst = (__be32 *)out; - u32 i, index, padlen; - __be64 bits; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64 */ - index = sctx->count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - powerpc_sha1_update(desc, padding, padlen); - - /* Append length */ - powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); - - /* Store state in digest */ - for (i = 0; i < 5; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof *sctx); - - return 0; + do { + powerpc_sha_transform(sctx->state, data); + data += 64; + } while (--blocks); } -static int powerpc_sha1_export(struct shash_desc *desc, void *out) +static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; + return sha1_base_do_update_blocks(desc, data, len, powerpc_sha_block); } -static int powerpc_sha1_import(struct shash_desc *desc, const void *in) +/* Add padding and return the message digest. */ +static int powerpc_sha1_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; + sha1_base_do_finup(desc, src, len, powerpc_sha_block); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = powerpc_sha1_update, - .final = powerpc_sha1_final, - .export = powerpc_sha1_export, - .import = powerpc_sha1_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = powerpc_sha1_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-powerpc", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c deleted file mode 100644 index 2997d13236e0..000000000000 --- a/arch/powerpc/crypto/sha256-spe-glue.c +++ /dev/null @@ -1,235 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Glue code for SHA-256 implementation for SPE instructions (PPC) - * - * Based on generic implementation. The assembler module takes care - * about the SPE registers so it can run from interrupt context. - * - * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> - */ - -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <asm/byteorder.h> -#include <asm/switch_to.h> -#include <linux/hardirq.h> - -/* - * MAX_BYTES defines the number of bytes that are allowed to be processed - * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000 - * operations per 64 bytes. e500 cores can issue two arithmetic instructions - * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2). - * Thus 1KB of input data will need an estimated maximum of 18,000 cycles. - * Headroom for cache misses included. Even with the low end model clocked - * at 667 MHz this equals to a critical time window of less than 27us. - * - */ -#define MAX_BYTES 1024 - -extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks); - -static void spe_begin(void) -{ - /* We just start SPE operations and will save SPE registers later. */ - preempt_disable(); - enable_kernel_spe(); -} - -static void spe_end(void) -{ - disable_kernel_spe(); - /* reenable preemption */ - preempt_enable(); -} - -static inline void ppc_sha256_clear_context(struct sha256_state *sctx) -{ - int count = sizeof(struct sha256_state) >> 2; - u32 *ptr = (u32 *)sctx; - - /* make sure we can clear the fast way */ - BUILD_BUG_ON(sizeof(struct sha256_state) % 4); - do { *ptr++ = 0; } while (--count); -} - -static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - const unsigned int avail = 64 - offset; - unsigned int bytes; - const u8 *src = data; - - if (avail > len) { - sctx->count += len; - memcpy((char *)sctx->buf + offset, src, len); - return 0; - } - - sctx->count += len; - - if (offset) { - memcpy((char *)sctx->buf + offset, src, avail); - - spe_begin(); - ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1); - spe_end(); - - len -= avail; - src += avail; - } - - while (len > 63) { - /* cut input data into smaller blocks */ - bytes = (len > MAX_BYTES) ? MAX_BYTES : len; - bytes = bytes & ~0x3f; - - spe_begin(); - ppc_spe_sha256_transform(sctx->state, src, bytes >> 6); - spe_end(); - - src += bytes; - len -= bytes; - } - - memcpy((char *)sctx->buf, src, len); - return 0; -} - -static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - const unsigned int offset = sctx->count & 0x3f; - char *p = (char *)sctx->buf + offset; - int padlen; - __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56); - __be32 *dst = (__be32 *)out; - - padlen = 55 - offset; - *p++ = 0x80; - - spe_begin(); - - if (padlen < 0) { - memset(p, 0x00, padlen + sizeof (u64)); - ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); - p = (char *)sctx->buf; - padlen = 56; - } - - memset(p, 0, padlen); - *pbits = cpu_to_be64(sctx->count << 3); - ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); - - spe_end(); - - dst[0] = cpu_to_be32(sctx->state[0]); - dst[1] = cpu_to_be32(sctx->state[1]); - dst[2] = cpu_to_be32(sctx->state[2]); - dst[3] = cpu_to_be32(sctx->state[3]); - dst[4] = cpu_to_be32(sctx->state[4]); - dst[5] = cpu_to_be32(sctx->state[5]); - dst[6] = cpu_to_be32(sctx->state[6]); - dst[7] = cpu_to_be32(sctx->state[7]); - - ppc_sha256_clear_context(sctx); - return 0; -} - -static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out) -{ - __be32 D[SHA256_DIGEST_SIZE >> 2]; - __be32 *dst = (__be32 *)out; - - ppc_spe_sha256_final(desc, (u8 *)D); - - /* avoid bytewise memcpy */ - dst[0] = D[0]; - dst[1] = D[1]; - dst[2] = D[2]; - dst[3] = D[3]; - dst[4] = D[4]; - dst[5] = D[5]; - dst[6] = D[6]; - - /* clear sensitive data */ - memzero_explicit(D, SHA256_DIGEST_SIZE); - return 0; -} - -static int ppc_spe_sha256_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; -} - -static struct shash_alg algs[2] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = ppc_spe_sha256_update, - .final = ppc_spe_sha256_final, - .export = ppc_spe_sha256_export, - .import = ppc_spe_sha256_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-ppc-spe", - .cra_priority = 300, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = ppc_spe_sha256_update, - .final = ppc_spe_sha224_final, - .export = ppc_spe_sha256_export, - .import = ppc_spe_sha256_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-ppc-spe", - .cra_priority = 300, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init ppc_spe_sha256_mod_init(void) -{ - return crypto_register_shashes(algs, ARRAY_SIZE(algs)); -} - -static void __exit ppc_spe_sha256_mod_fini(void) -{ - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); -} - -module_init(ppc_spe_sha256_mod_init); -module_exit(ppc_spe_sha256_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, SPE optimized"); - -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha224-ppc-spe"); -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha256-ppc-spe"); diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h index d107abe1468f..acd61eb36d59 100644 --- a/arch/powerpc/include/asm/guest-state-buffer.h +++ b/arch/powerpc/include/asm/guest-state-buffer.h @@ -28,6 +28,21 @@ /* Process Table Info */ #define KVMPPC_GSID_PROCESS_TABLE 0x0006 +/* Guest Management Heap Size */ +#define KVMPPC_GSID_L0_GUEST_HEAP 0x0800 + +/* Guest Management Heap Max Size */ +#define KVMPPC_GSID_L0_GUEST_HEAP_MAX 0x0801 + +/* Guest Pagetable Size */ +#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE 0x0802 + +/* Guest Pagetable Max Size */ +#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX 0x0803 + +/* Guest Pagetable Reclaim in bytes */ +#define KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM 0x0804 + /* H_GUEST_RUN_VCPU input buffer Info */ #define KVMPPC_GSID_RUN_INPUT 0x0C00 /* H_GUEST_RUN_VCPU output buffer Info */ @@ -106,6 +121,11 @@ #define KVMPPC_GSE_GUESTWIDE_COUNT \ (KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1) +#define KVMPPC_GSE_HOSTWIDE_START KVMPPC_GSID_L0_GUEST_HEAP +#define KVMPPC_GSE_HOSTWIDE_END KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM +#define KVMPPC_GSE_HOSTWIDE_COUNT \ + (KVMPPC_GSE_HOSTWIDE_END - KVMPPC_GSE_HOSTWIDE_START + 1) + #define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT #define KVMPPC_GSE_META_END KVMPPC_GSID_VPA #define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1) @@ -130,7 +150,8 @@ (KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1) #define KVMPPC_GSE_IDEN_COUNT \ - (KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \ + (KVMPPC_GSE_HOSTWIDE_COUNT + \ + KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \ KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \ KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT) @@ -139,10 +160,11 @@ */ enum { KVMPPC_GS_CLASS_GUESTWIDE = 0x01, - KVMPPC_GS_CLASS_META = 0x02, - KVMPPC_GS_CLASS_DWORD_REG = 0x04, - KVMPPC_GS_CLASS_WORD_REG = 0x08, - KVMPPC_GS_CLASS_VECTOR = 0x10, + KVMPPC_GS_CLASS_HOSTWIDE = 0x02, + KVMPPC_GS_CLASS_META = 0x04, + KVMPPC_GS_CLASS_DWORD_REG = 0x08, + KVMPPC_GS_CLASS_WORD_REG = 0x10, + KVMPPC_GS_CLASS_VECTOR = 0x18, KVMPPC_GS_CLASS_INTR = 0x20, }; @@ -164,6 +186,7 @@ enum { */ enum { KVMPPC_GS_FLAGS_WIDE = 0x01, + KVMPPC_GS_FLAGS_HOST_WIDE = 0x02, }; /** @@ -287,7 +310,7 @@ struct kvmppc_gs_msg_ops { * struct kvmppc_gs_msg - a guest state message * @bitmap: the guest state ids that should be included * @ops: modify message behavior for reading and writing to buffers - * @flags: guest wide or thread wide + * @flags: host wide, guest wide or thread wide * @data: location where buffer data will be written to or from. * * A guest state message is allows flexibility in sending in receiving data diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index eeef13db2770..6df6dbbe1e7c 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -490,14 +490,15 @@ #define H_RPTI_PAGE_ALL (-1UL) /* Flags for H_GUEST_{S,G}_STATE */ -#define H_GUEST_FLAGS_WIDE (1UL<<(63-0)) +#define H_GUEST_FLAGS_WIDE (1UL << (63 - 0)) +#define H_GUEST_FLAGS_HOST_WIDE (1UL << (63 - 1)) /* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */ -#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0)) -#define H_GUEST_CAP_POWER9 (1UL<<(63-1)) -#define H_GUEST_CAP_POWER10 (1UL<<(63-2)) -#define H_GUEST_CAP_POWER11 (1UL<<(63-3)) -#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63)) +#define H_GUEST_CAP_COPY_MEM (1UL << (63 - 0)) +#define H_GUEST_CAP_POWER9 (1UL << (63 - 1)) +#define H_GUEST_CAP_POWER10 (1UL << (63 - 2)) +#define H_GUEST_CAP_POWER11 (1UL << (63 - 3)) +#define H_GUEST_CAP_BITMAP2 (1UL << (63 - 63)) /* * Defines for H_HTM - Macros for hardware trace macro (HTM) function. diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 91be7b885944..f2b6cc4341bb 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -65,6 +65,14 @@ static inline long register_dtl(unsigned long cpu, unsigned long vpa) return vpa_call(H_VPA_REG_DTL, cpu, vpa); } +/* + * Invokes H_HTM hcall with parameters passed from htm_hcall_wrapper. + * flags: Set to hardwareTarget. + * target: Specifies target using node index, nodal chip index and core index. + * operation : action to perform ie configure, start, stop, deconfigure, trace + * based on the HTM type. + * param1, param2, param3: parameters for each action. + */ static inline long htm_call(unsigned long flags, unsigned long target, unsigned long operation, unsigned long param1, unsigned long param2, unsigned long param3) @@ -73,17 +81,17 @@ static inline long htm_call(unsigned long flags, unsigned long target, param1, param2, param3); } -static inline long htm_get_dump_hardware(unsigned long nodeindex, +static inline long htm_hcall_wrapper(unsigned long flags, unsigned long nodeindex, unsigned long nodalchipindex, unsigned long coreindexonchip, - unsigned long type, unsigned long addr, unsigned long size, - unsigned long offset) + unsigned long type, unsigned long htm_op, unsigned long param1, unsigned long param2, + unsigned long param3) { - return htm_call(H_HTM_FLAGS_HARDWARE_TARGET, + return htm_call(H_HTM_FLAGS_HARDWARE_TARGET | flags, H_HTM_TARGET_NODE_INDEX(nodeindex) | H_HTM_TARGET_NODAL_CHIP_INDEX(nodalchipindex) | H_HTM_TARGET_CORE_INDEX_ON_CHIP(coreindexonchip), - H_HTM_OP(H_HTM_OP_DUMP_DATA) | H_HTM_TYPE(type), - addr, size, offset); + H_HTM_OP(htm_op) | H_HTM_TYPE(type), + param1, param2, param3); } extern void vpa_init(int cpu); diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h new file mode 100644 index 000000000000..000e2b9681f3 --- /dev/null +++ b/arch/powerpc/include/asm/preempt.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_POWERPC_PREEMPT_H +#define __ASM_POWERPC_PREEMPT_H + +#include <asm-generic/preempt.h> + +#if defined(CONFIG_PREEMPT_DYNAMIC) +#include <linux/jump_label.h> +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +#define need_irq_preemption() \ + (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) +#else +#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION)) +#endif + +#endif /* __ASM_POWERPC_PREEMPT_H */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 04406162fc5a..75fa0293c508 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -515,6 +515,10 @@ extern char rtas_data_buf[RTAS_DATA_BUF_SIZE]; extern unsigned long rtas_rmo_buf; extern struct mutex rtas_ibm_get_vpd_lock; +extern struct mutex rtas_ibm_get_indices_lock; +extern struct mutex rtas_ibm_set_dynamic_indicator_lock; +extern struct mutex rtas_ibm_get_dynamic_sensor_state_lock; +extern struct mutex rtas_ibm_physical_attestation_lock; #define GLOBAL_INTERRUPT_QUEUE 9005 diff --git a/arch/powerpc/include/uapi/asm/papr-indices.h b/arch/powerpc/include/uapi/asm/papr-indices.h new file mode 100644 index 000000000000..c2999d89d52a --- /dev/null +++ b/arch/powerpc/include/uapi/asm/papr-indices.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PAPR_INDICES_H_ +#define _UAPI_PAPR_INDICES_H_ + +#include <linux/types.h> +#include <asm/ioctl.h> +#include <asm/papr-miscdev.h> + +#define LOC_CODE_SIZE 80 +#define RTAS_GET_INDICES_BUF_SIZE SZ_4K + +struct papr_indices_io_block { + union { + struct { + __u8 is_sensor; /* 0 for indicator and 1 for sensor */ + __u32 indice_type; + } indices; + struct { + __u32 token; /* Sensor or indicator token */ + __u32 state; /* get / set state */ + /* + * PAPR+ 12.3.2.4 Converged Location Code Rules - Length + * Restrictions. 79 characters plus null. + */ + char location_code_str[LOC_CODE_SIZE]; /* location code */ + } dynamic_param; + }; +}; + +/* + * ioctls for /dev/papr-indices. + * PAPR_INDICES_IOC_GET: Returns a get-indices handle fd to read data + * PAPR_DYNAMIC_SENSOR_IOC_GET: Gets the state of the input sensor + * PAPR_DYNAMIC_INDICATOR_IOC_SET: Sets the new state for the input indicator + */ +#define PAPR_INDICES_IOC_GET _IOW(PAPR_MISCDEV_IOC_ID, 3, struct papr_indices_io_block) +#define PAPR_DYNAMIC_SENSOR_IOC_GET _IOWR(PAPR_MISCDEV_IOC_ID, 4, struct papr_indices_io_block) +#define PAPR_DYNAMIC_INDICATOR_IOC_SET _IOW(PAPR_MISCDEV_IOC_ID, 5, struct papr_indices_io_block) + + +#endif /* _UAPI_PAPR_INDICES_H_ */ diff --git a/arch/powerpc/include/uapi/asm/papr-physical-attestation.h b/arch/powerpc/include/uapi/asm/papr-physical-attestation.h new file mode 100644 index 000000000000..ea746837bb9a --- /dev/null +++ b/arch/powerpc/include/uapi/asm/papr-physical-attestation.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PAPR_PHYSICAL_ATTESTATION_H_ +#define _UAPI_PAPR_PHYSICAL_ATTESTATION_H_ + +#include <linux/types.h> +#include <asm/ioctl.h> +#include <asm/papr-miscdev.h> + +#define PAPR_PHYATTEST_MAX_INPUT 4084 /* Max 4K buffer: 4K-12 */ + +/* + * Defined in PAPR 2.13+ 21.6 Attestation Command Structures. + * User space pass this struct and the max size should be 4K. + */ +struct papr_phy_attest_io_block { + __u8 version; + __u8 command; + __u8 TCG_major_ver; + __u8 TCG_minor_ver; + __be32 length; + __be32 correlator; + __u8 payload[PAPR_PHYATTEST_MAX_INPUT]; +}; + +/* + * ioctl for /dev/papr-physical-attestation. Returns a attestation + * command fd handle + */ +#define PAPR_PHY_ATTEST_IOC_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 8, struct papr_phy_attest_io_block) + +#endif /* _UAPI_PAPR_PHYSICAL_ATTESTATION_H_ */ diff --git a/arch/powerpc/include/uapi/asm/papr-platform-dump.h b/arch/powerpc/include/uapi/asm/papr-platform-dump.h new file mode 100644 index 000000000000..8a1c060e89a9 --- /dev/null +++ b/arch/powerpc/include/uapi/asm/papr-platform-dump.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_PAPR_PLATFORM_DUMP_H_ +#define _UAPI_PAPR_PLATFORM_DUMP_H_ + +#include <linux/types.h> +#include <asm/ioctl.h> +#include <asm/papr-miscdev.h> + +/* + * ioctl for /dev/papr-platform-dump. Returns a platform-dump handle fd + * corresponding to dump tag. + */ +#define PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE _IOW(PAPR_MISCDEV_IOC_ID, 6, __u64) +#define PAPR_PLATFORM_DUMP_IOC_INVALIDATE _IOW(PAPR_MISCDEV_IOC_ID, 7, __u64) + +#endif /* _UAPI_PAPR_PLATFORM_DUMP_H_ */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 6ac621155ec3..9d1ab3971694 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -160,9 +160,7 @@ endif obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o -ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)(CONFIG_PPC_BOOK3S),) obj-y += ppc_save_regs.o -endif obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index df16c7f547ab..8ca49e40c473 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -290,10 +290,8 @@ static void __init fadump_show_config(void) if (!fw_dump.fadump_supported) return; - pr_debug("Fadump enabled : %s\n", - (fw_dump.fadump_enabled ? "yes" : "no")); - pr_debug("Dump Active : %s\n", - (fw_dump.dump_active ? "yes" : "no")); + pr_debug("Fadump enabled : %s\n", str_yes_no(fw_dump.fadump_enabled)); + pr_debug("Dump Active : %s\n", str_yes_no(fw_dump.dump_active)); pr_debug("Dump section sizes:\n"); pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size); pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size); diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index 8f4acc55407b..e0c681d0b076 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -25,6 +25,10 @@ unsigned long global_dbcr0[NR_CPUS]; #endif +#if defined(CONFIG_PREEMPT_DYNAMIC) +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +#endif + #ifdef CONFIG_PPC_BOOK3S_64 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); static inline bool exit_must_hard_disable(void) @@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) /* Returning to a kernel context with local irqs enabled. */ WARN_ON_ONCE(!(regs->msr & MSR_EE)); again: - if (IS_ENABLED(CONFIG_PREEMPTION)) { + if (need_irq_preemption()) { /* Return to preemptible kernel context */ if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { if (preempt_count() == 0) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 0ebae6e4c19d..244eb4857e7f 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -16,6 +16,7 @@ #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/dma-mapping.h> #include <linux/bitmap.h> #include <linux/iommu-helper.h> @@ -769,8 +770,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, iommu_table_clear(tbl); if (!welcomed) { - printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", - novmerge ? "disabled" : "enabled"); + pr_info("IOMMU table initialized, virtual merging %s\n", + str_disabled_enabled(novmerge)); welcomed = 1; } diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c index 3816a2bf2b84..d083b4517065 100644 --- a/arch/powerpc/kernel/proc_powerpc.c +++ b/arch/powerpc/kernel/proc_powerpc.c @@ -9,6 +9,7 @@ #include <linux/proc_fs.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/string.h> #include <asm/machdep.h> #include <asm/vdso_datapage.h> @@ -56,7 +57,7 @@ static int __init proc_ppc64_init(void) { struct proc_dir_entry *pde; - strcpy((char *)systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); + strscpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); systemcfg->version.major = SYSTEMCFG_MAJOR; systemcfg->version.minor = SYSTEMCFG_MINOR; systemcfg->processor = mfspr(SPRN_PVR); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ef91f71e07c4..855e09886503 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1000,7 +1000,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk) WARN_ON(tm_suspend_disabled); - TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " + TM_DEBUG("---- tm_reclaim on pid %d (NIP=%lx, " "ccr=%lx, msr=%lx, trap=%lx)\n", tsk->pid, thr->regs->nip, thr->regs->ccr, thr->regs->msr, @@ -1008,7 +1008,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk) tm_reclaim_thread(thr, TM_CAUSE_RESCHED); - TM_DEBUG("--- tm_reclaim on pid %d complete\n", + TM_DEBUG("---- tm_reclaim on pid %d complete\n", tsk->pid); out_and_saveregs: @@ -2367,14 +2367,14 @@ void __no_sanitize_address show_stack(struct task_struct *tsk, (sp + STACK_INT_FRAME_REGS); lr = regs->link; - printk("%s--- interrupt: %lx at %pS\n", + printk("%s---- interrupt: %lx at %pS\n", loglvl, regs->trap, (void *)regs->nip); // Detect the case of an empty pt_regs at the very base // of the stack and suppress showing it in full. if (!empty_user_regs(regs, tsk)) { __show_regs(regs); - printk("%s--- interrupt: %lx\n", loglvl, regs->trap); + printk("%s---- interrupt: %lx\n", loglvl, regs->trap); } firstframe = 1; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index d7a738f1858d..e61245c4468e 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -92,12 +92,12 @@ struct rtas_function { * Per-function locks for sequence-based RTAS functions. */ static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock); -static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock); -static DEFINE_MUTEX(rtas_ibm_get_indices_lock); static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock); -static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock); -static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock); +DEFINE_MUTEX(rtas_ibm_physical_attestation_lock); DEFINE_MUTEX(rtas_ibm_get_vpd_lock); +DEFINE_MUTEX(rtas_ibm_get_indices_lock); +DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock); +DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock); static struct rtas_function rtas_function_table[] __ro_after_init = { [RTAS_FNIDX__CHECK_EXCEPTION] = { diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S index 2c1b24100eca..3565c67fc638 100644 --- a/arch/powerpc/kernel/trace/ftrace_entry.S +++ b/arch/powerpc/kernel/trace/ftrace_entry.S @@ -212,10 +212,10 @@ bne- 1f mr r3, r15 +1: mtlr r3 .if \allregs == 0 REST_GPR(15, r1) .endif -1: mtlr r3 #endif /* Restore gprs */ diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index 9ac3266e4965..a325c1c02f96 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -359,7 +359,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs) if (TRAP(regs) == INTERRUPT_SYSTEM_RESET) is_via_system_reset = 1; - crash_smp_send_stop(); + if (IS_ENABLED(CONFIG_SMP)) + crash_smp_send_stop(); + else + crash_kexec_prepare(); crash_save_cpu(regs, crashing_cpu); diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index dbfdc126bf14..2f2702c867f7 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -83,6 +83,7 @@ config KVM_BOOK3S_64_HV depends on KVM_BOOK3S_64 && PPC_POWERNV select KVM_BOOK3S_HV_POSSIBLE select KVM_GENERIC_MMU_NOTIFIER + select KVM_BOOK3S_HV_PMU select CMA help Support running unmodified book3s_64 guest kernels in @@ -171,6 +172,18 @@ config KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND those buggy L1s which saves the L2 state, at the cost of performance in all nested-capable guest entry/exit. +config KVM_BOOK3S_HV_PMU + tristate "Hypervisor Perf events for KVM Book3s-HV" + depends on KVM_BOOK3S_64_HV + help + Enable Book3s-HV Hypervisor Perf events PMU named 'kvm-hv'. These + Perf events give an overview of hypervisor performance overall + instead of a specific guests. Currently the PMU reports + L0-Hypervisor stats on a kvm-hv enabled PSeries LPAR like: + * Total/Used Guest-Heap + * Total/Used Guest Page-table Memory + * Total amount of Guest Page-table Memory reclaimed + config KVM_BOOKE_HV bool diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 19f4d298dd17..7667563fb9ff 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -6541,10 +6541,6 @@ static struct kvmppc_ops kvm_ops_hv = { .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, .hcall_implemented = kvmppc_hcall_impl_hv, -#ifdef CONFIG_KVM_XICS - .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, - .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, -#endif .configure_mmu = kvmhv_configure_mmu, .get_rmmu_info = kvmhv_get_rmmu_info, .set_smt_mode = kvmhv_set_smt_mode, @@ -6662,6 +6658,22 @@ static int kvmppc_book3s_init_hv(void) return r; } +#if defined(CONFIG_KVM_XICS) + /* + * IRQ bypass is supported only for interrupts whose EOI operations are + * handled via OPAL calls. Therefore, register IRQ bypass handlers + * exclusively for PowerNV KVM when booted with 'xive=off', indicating + * the use of the emulated XICS interrupt controller. + */ + if (!kvmhv_on_pseries()) { + pr_info("KVM-HV: Enabling IRQ bypass\n"); + kvm_ops_hv.irq_bypass_add_producer = + kvmppc_irq_bypass_add_producer_hv; + kvm_ops_hv.irq_bypass_del_producer = + kvmppc_irq_bypass_del_producer_hv; + } +#endif + kvm_ops_hv.owner = THIS_MODULE; kvmppc_hv_ops = &kvm_ops_hv; diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c index e5c7ce1fb761..87691cf86cae 100644 --- a/arch/powerpc/kvm/book3s_hv_nestedv2.c +++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c @@ -123,6 +123,12 @@ static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm) case KVMPPC_GSID_PROCESS_TABLE: case KVMPPC_GSID_RUN_INPUT: case KVMPPC_GSID_RUN_OUTPUT: + /* Host wide counters */ + case KVMPPC_GSID_L0_GUEST_HEAP: + case KVMPPC_GSID_L0_GUEST_HEAP_MAX: + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE: + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX: + case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM: break; default: size += kvmppc_gse_total_size(kvmppc_gsid_size(iden)); diff --git a/arch/powerpc/kvm/guest-state-buffer.c b/arch/powerpc/kvm/guest-state-buffer.c index b80dbc58621f..871cf60ddeb6 100644 --- a/arch/powerpc/kvm/guest-state-buffer.c +++ b/arch/powerpc/kvm/guest-state-buffer.c @@ -92,6 +92,10 @@ static int kvmppc_gsid_class(u16 iden) (iden <= KVMPPC_GSE_GUESTWIDE_END)) return KVMPPC_GS_CLASS_GUESTWIDE; + if ((iden >= KVMPPC_GSE_HOSTWIDE_START) && + (iden <= KVMPPC_GSE_HOSTWIDE_END)) + return KVMPPC_GS_CLASS_HOSTWIDE; + if ((iden >= KVMPPC_GSE_META_START) && (iden <= KVMPPC_GSE_META_END)) return KVMPPC_GS_CLASS_META; @@ -118,6 +122,21 @@ static int kvmppc_gsid_type(u16 iden) int type = -1; switch (kvmppc_gsid_class(iden)) { + case KVMPPC_GS_CLASS_HOSTWIDE: + switch (iden) { + case KVMPPC_GSID_L0_GUEST_HEAP: + fallthrough; + case KVMPPC_GSID_L0_GUEST_HEAP_MAX: + fallthrough; + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE: + fallthrough; + case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX: + fallthrough; + case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM: + type = KVMPPC_GSE_BE64; + break; + } + break; case KVMPPC_GS_CLASS_GUESTWIDE: switch (iden) { case KVMPPC_GSID_HOST_STATE_SIZE: @@ -187,6 +206,9 @@ unsigned long kvmppc_gsid_flags(u16 iden) case KVMPPC_GS_CLASS_GUESTWIDE: flags = KVMPPC_GS_FLAGS_WIDE; break; + case KVMPPC_GS_CLASS_HOSTWIDE: + flags = KVMPPC_GS_FLAGS_HOST_WIDE; + break; case KVMPPC_GS_CLASS_META: case KVMPPC_GS_CLASS_DWORD_REG: case KVMPPC_GS_CLASS_WORD_REG: @@ -310,6 +332,13 @@ static inline int kvmppc_gse_flatten_iden(u16 iden) bit += KVMPPC_GSE_GUESTWIDE_COUNT; + if (class == KVMPPC_GS_CLASS_HOSTWIDE) { + bit += iden - KVMPPC_GSE_HOSTWIDE_START; + return bit; + } + + bit += KVMPPC_GSE_HOSTWIDE_COUNT; + if (class == KVMPPC_GS_CLASS_META) { bit += iden - KVMPPC_GSE_META_START; return bit; @@ -356,6 +385,12 @@ static inline u16 kvmppc_gse_unflatten_iden(int bit) } bit -= KVMPPC_GSE_GUESTWIDE_COUNT; + if (bit < KVMPPC_GSE_HOSTWIDE_COUNT) { + iden = KVMPPC_GSE_HOSTWIDE_START + bit; + return iden; + } + bit -= KVMPPC_GSE_HOSTWIDE_COUNT; + if (bit < KVMPPC_GSE_META_COUNT) { iden = KVMPPC_GSE_META_START + bit; return iden; @@ -588,6 +623,8 @@ int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags) if (flags & KVMPPC_GS_FLAGS_WIDE) hflags |= H_GUEST_FLAGS_WIDE; + if (flags & KVMPPC_GS_FLAGS_HOST_WIDE) + hflags |= H_GUEST_FLAGS_HOST_WIDE; rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id, __pa(gsb->hdr), gsb->capacity, &i); @@ -613,6 +650,8 @@ int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags) if (flags & KVMPPC_GS_FLAGS_WIDE) hflags |= H_GUEST_FLAGS_WIDE; + if (flags & KVMPPC_GS_FLAGS_HOST_WIDE) + hflags |= H_GUEST_FLAGS_HOST_WIDE; rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id, __pa(gsb->hdr), gsb->capacity, &i); diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c index bfd225329a18..5ccca306997a 100644 --- a/arch/powerpc/kvm/test-guest-state-buffer.c +++ b/arch/powerpc/kvm/test-guest-state-buffer.c @@ -5,6 +5,7 @@ #include <kunit/test.h> #include <asm/guest-state-buffer.h> +#include <asm/kvm_ppc.h> static void test_creating_buffer(struct kunit *test) { @@ -141,6 +142,16 @@ static void test_gs_bitmap(struct kunit *test) i++; } + for (u16 iden = KVMPPC_GSID_L0_GUEST_HEAP; + iden <= KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM; iden++) { + kvmppc_gsbm_set(&gsbm, iden); + kvmppc_gsbm_set(&gsbm1, iden); + KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); + kvmppc_gsbm_clear(&gsbm, iden); + KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden)); + i++; + } + for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA; iden++) { kvmppc_gsbm_set(&gsbm, iden); @@ -309,12 +320,215 @@ static void test_gs_msg(struct kunit *test) kvmppc_gsm_free(gsm); } +/* Test data struct for hostwide/L0 counters */ +struct kvmppc_gs_msg_test_hostwide_data { + u64 guest_heap; + u64 guest_heap_max; + u64 guest_pgtable_size; + u64 guest_pgtable_size_max; + u64 guest_pgtable_reclaim; +}; + +static size_t test_hostwide_get_size(struct kvmppc_gs_msg *gsm) + +{ + size_t size = 0; + u16 ids[] = { + KVMPPC_GSID_L0_GUEST_HEAP, + KVMPPC_GSID_L0_GUEST_HEAP_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM + }; + + for (int i = 0; i < ARRAY_SIZE(ids); i++) + size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); + return size; +} + +static int test_hostwide_fill_info(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data; + + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP, + data->guest_heap); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP_MAX, + data->guest_heap_max); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + data->guest_pgtable_size); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + data->guest_pgtable_size_max); + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM)) + kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM, + data->guest_pgtable_reclaim); + + return 0; +} + +static int test_hostwide_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data; + struct kvmppc_gs_elem *gse; + int rc; + + rc = kvmppc_gse_parse(&gsp, gsb); + if (rc < 0) + return rc; + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP); + if (gse) + data->guest_heap = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + if (gse) + data->guest_heap_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + if (gse) + data->guest_pgtable_size = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + if (gse) + data->guest_pgtable_size_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + if (gse) + data->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse); + + return 0; +} + +static struct kvmppc_gs_msg_ops gs_msg_test_hostwide_ops = { + .get_size = test_hostwide_get_size, + .fill_info = test_hostwide_fill_info, + .refresh_info = test_hostwide_refresh_info, +}; + +static void test_gs_hostwide_msg(struct kunit *test) +{ + struct kvmppc_gs_msg_test_hostwide_data test_data = { + .guest_heap = 0xdeadbeef, + .guest_heap_max = ~0ULL, + .guest_pgtable_size = 0xff, + .guest_pgtable_size_max = 0xffffff, + .guest_pgtable_reclaim = 0xdeadbeef, + }; + struct kvmppc_gs_msg *gsm; + struct kvmppc_gs_buff *gsb; + + gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND, + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm); + + gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + + kvmppc_gsm_fill_info(gsm, gsb); + + memset(&test_data, 0, sizeof(test_data)); + + kvmppc_gsm_refresh_info(gsm, gsb); + KUNIT_EXPECT_EQ(test, test_data.guest_heap, 0xdeadbeef); + KUNIT_EXPECT_EQ(test, test_data.guest_heap_max, ~0ULL); + KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size, 0xff); + KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size_max, 0xffffff); + KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_reclaim, 0xdeadbeef); + + kvmppc_gsm_free(gsm); +} + +/* Test if the H_GUEST_GET_STATE for hostwide counters works */ +static void test_gs_hostwide_counters(struct kunit *test) +{ + struct kvmppc_gs_msg_test_hostwide_data test_data; + struct kvmppc_gs_parser gsp = { 0 }; + + struct kvmppc_gs_msg *gsm; + struct kvmppc_gs_buff *gsb; + struct kvmppc_gs_elem *gse; + int rc; + + if (!kvmhv_on_pseries()) + kunit_skip(test, "This test need a kmv-hv guest"); + + gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND, + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm); + + gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + + kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + + kvmppc_gsm_fill_info(gsm, gsb); + + /* With HOST_WIDE flags guestid and vcpuid will be ignored */ + rc = kvmppc_gsb_recv(gsb, KVMPPC_GS_FLAGS_HOST_WIDE); + KUNIT_ASSERT_EQ(test, rc, 0); + + /* Parse the guest state buffer is successful */ + rc = kvmppc_gse_parse(&gsp, gsb); + KUNIT_ASSERT_EQ(test, rc, 0); + + /* Parse the GSB and get the counters */ + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter missing"); + kunit_info(test, "Guest Heap Size=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter max missing"); + kunit_info(test, "Guest Heap Size Max=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size missing"); + kunit_info(test, "Guest Page-table Size=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size-max missing"); + kunit_info(test, "Guest Page-table Size Max=%llu bytes", + kvmppc_gse_get_u64(gse)); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table reclaim size missing"); + kunit_info(test, "Guest Page-table Reclaim Size=%llu bytes", + kvmppc_gse_get_u64(gse)); + + kvmppc_gsm_free(gsm); + kvmppc_gsb_free(gsb); +} + static struct kunit_case guest_state_buffer_testcases[] = { KUNIT_CASE(test_creating_buffer), KUNIT_CASE(test_adding_element), KUNIT_CASE(test_gs_bitmap), KUNIT_CASE(test_gs_parsing), KUNIT_CASE(test_gs_msg), + KUNIT_CASE(test_gs_hostwide_msg), + KUNIT_CASE(test_gs_hostwide_counters), {} }; diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index 45817ab82bb4..14b0e23f601f 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h @@ -38,11 +38,7 @@ static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {} static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) { /* type has to be known at build time for optimization */ - - /* The BUILD_BUG_ON below breaks in funny ways, commented out - * for now ... -BenH BUILD_BUG_ON(!__builtin_constant_p(type)); - */ switch (type) { case EXT_INTR_EXITS: vcpu->stat.ext_intr_exits++; diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index dd8a4b52a0cc..481f968e42c7 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -3,6 +3,8 @@ # Makefile for ppc-specific library files.. # +obj-y += crypto/ + CFLAGS_code-patching.o += -fno-stack-protector CFLAGS_feature-fixups.o += -fno-stack-protector @@ -79,9 +81,9 @@ CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec) CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include) obj-$(CONFIG_CRC32_ARCH) += crc32-powerpc.o -crc32-powerpc-y := crc32-glue.o crc32c-vpmsum_asm.o +crc32-powerpc-y := crc32.o crc32c-vpmsum_asm.o obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-powerpc.o -crc-t10dif-powerpc-y := crc-t10dif-glue.o crct10dif-vpmsum_asm.o +crc-t10dif-powerpc-y := crc-t10dif.o crct10dif-vpmsum_asm.o obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/lib/crc-t10dif-glue.c b/arch/powerpc/lib/crc-t10dif.c index f411b0120cc5..be23ded3a9df 100644 --- a/arch/powerpc/lib/crc-t10dif-glue.c +++ b/arch/powerpc/lib/crc-t10dif.c @@ -6,22 +6,22 @@ * [based on crc32c-vpmsum_glue.c] */ -#include <linux/crc-t10dif.h> +#include <asm/switch_to.h> #include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/kernel.h> #include <linux/cpufeature.h> -#include <asm/simd.h> -#include <asm/switch_to.h> +#include <linux/crc-t10dif.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/preempt.h> +#include <linux/uaccess.h> #define VMX_ALIGN 16 #define VMX_ALIGN_MASK (VMX_ALIGN-1) #define VECTOR_BREAKPOINT 64 -static DEFINE_STATIC_KEY_FALSE(have_vec_crypto); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto); u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len); @@ -71,7 +71,7 @@ static int __init crc_t10dif_powerpc_init(void) static_branch_enable(&have_vec_crypto); return 0; } -arch_initcall(crc_t10dif_powerpc_init); +subsys_initcall(crc_t10dif_powerpc_init); static void __exit crc_t10dif_powerpc_exit(void) { diff --git a/arch/powerpc/lib/crc32-vpmsum_core.S b/arch/powerpc/lib/crc-vpmsum-template.S index b0f87f595b26..b0f87f595b26 100644 --- a/arch/powerpc/lib/crc32-vpmsum_core.S +++ b/arch/powerpc/lib/crc-vpmsum-template.S diff --git a/arch/powerpc/lib/crc32-glue.c b/arch/powerpc/lib/crc32.c index dbd10f339183..0d9befb6e7b8 100644 --- a/arch/powerpc/lib/crc32-glue.c +++ b/arch/powerpc/lib/crc32.c @@ -1,19 +1,20 @@ // SPDX-License-Identifier: GPL-2.0-only -#include <linux/crc32.h> +#include <asm/switch_to.h> #include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/kernel.h> #include <linux/cpufeature.h> -#include <asm/simd.h> -#include <asm/switch_to.h> +#include <linux/crc32.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/preempt.h> +#include <linux/uaccess.h> #define VMX_ALIGN 16 #define VMX_ALIGN_MASK (VMX_ALIGN-1) #define VECTOR_BREAKPOINT 512 -static DEFINE_STATIC_KEY_FALSE(have_vec_crypto); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto); u32 __crc32c_vpmsum(u32 crc, const u8 *p, size_t len); @@ -72,7 +73,7 @@ static int __init crc32_powerpc_init(void) static_branch_enable(&have_vec_crypto); return 0; } -arch_initcall(crc32_powerpc_init); +subsys_initcall(crc32_powerpc_init); static void __exit crc32_powerpc_exit(void) { diff --git a/arch/powerpc/lib/crc32c-vpmsum_asm.S b/arch/powerpc/lib/crc32c-vpmsum_asm.S index bf442004ea1f..1b35c55cce0a 100644 --- a/arch/powerpc/lib/crc32c-vpmsum_asm.S +++ b/arch/powerpc/lib/crc32c-vpmsum_asm.S @@ -839,4 +839,4 @@ #define CRC_FUNCTION_NAME __crc32c_vpmsum #define REFLECT -#include "crc32-vpmsum_core.S" +#include "crc-vpmsum-template.S" diff --git a/arch/powerpc/lib/crct10dif-vpmsum_asm.S b/arch/powerpc/lib/crct10dif-vpmsum_asm.S index f0b93a0fe168..47a6266d89a8 100644 --- a/arch/powerpc/lib/crct10dif-vpmsum_asm.S +++ b/arch/powerpc/lib/crct10dif-vpmsum_asm.S @@ -842,4 +842,4 @@ .octa 0x0000000000000000000000018bb70000 #define CRC_FUNCTION_NAME __crct10dif_vpmsum -#include "crc32-vpmsum_core.S" +#include "crc-vpmsum-template.S" diff --git a/arch/powerpc/lib/crypto/Kconfig b/arch/powerpc/lib/crypto/Kconfig new file mode 100644 index 000000000000..3f9e1bbd9905 --- /dev/null +++ b/arch/powerpc/lib/crypto/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_CHACHA20_P10 + tristate + depends on PPC64 && CPU_LITTLE_ENDIAN && VSX + default CRYPTO_LIB_CHACHA + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_POLY1305_P10 + tristate + depends on PPC64 && CPU_LITTLE_ENDIAN && VSX + depends on BROKEN # Needs to be fixed to work in softirq context + default CRYPTO_LIB_POLY1305 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 + select CRYPTO_LIB_POLY1305_GENERIC + +config CRYPTO_SHA256_PPC_SPE + tristate + depends on SPE + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 diff --git a/arch/powerpc/lib/crypto/Makefile b/arch/powerpc/lib/crypto/Makefile new file mode 100644 index 000000000000..27f231f8e334 --- /dev/null +++ b/arch/powerpc/lib/crypto/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o +chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o + +obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o +poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o + +obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o +sha256-ppc-spe-y := sha256.o sha256-spe-asm.o diff --git a/arch/powerpc/lib/crypto/chacha-p10-glue.c b/arch/powerpc/lib/crypto/chacha-p10-glue.c new file mode 100644 index 000000000000..fcd23c6f1590 --- /dev/null +++ b/arch/powerpc/lib/crypto/chacha-p10-glue.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ChaCha stream cipher (P10 accelerated) + * + * Copyright 2023- IBM Corp. All rights reserved. + */ + +#include <crypto/chacha.h> +#include <crypto/internal/simd.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/cpufeature.h> +#include <linux/sizes.h> +#include <asm/simd.h> +#include <asm/switch_to.h> + +asmlinkage void chacha_p10le_8x(const struct chacha_state *state, u8 *dst, + const u8 *src, unsigned int len, int nrounds); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); + +static void vsx_begin(void) +{ + preempt_disable(); + enable_kernel_vsx(); +} + +static void vsx_end(void) +{ + disable_kernel_vsx(); + preempt_enable(); +} + +static void chacha_p10_do_8x(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + unsigned int l = bytes & ~0x0FF; + + if (l > 0) { + chacha_p10le_8x(state, dst, src, l, nrounds); + bytes -= l; + src += l; + dst += l; + state->x[12] += l / CHACHA_BLOCK_SIZE; + } + + if (bytes > 0) + chacha_crypt_generic(state, dst, src, bytes, nrounds); +} + +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) +{ + hchacha_block_generic(state, out, nrounds); +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE || + !crypto_simd_usable()) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + + do { + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); + + vsx_begin(); + chacha_p10_do_8x(state, dst, src, todo, nrounds); + vsx_end(); + + bytes -= todo; + src += todo; + dst += todo; + } while (bytes); +} +EXPORT_SYMBOL(chacha_crypt_arch); + +bool chacha_is_arch_optimized(void) +{ + return static_key_enabled(&have_p10); +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +static int __init chacha_p10_init(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + static_branch_enable(&have_p10); + return 0; +} +subsys_initcall(chacha_p10_init); + +static void __exit chacha_p10_exit(void) +{ +} +module_exit(chacha_p10_exit); + +MODULE_DESCRIPTION("ChaCha stream cipher (P10 accelerated)"); +MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/crypto/chacha-p10le-8x.S b/arch/powerpc/lib/crypto/chacha-p10le-8x.S index 17bedb66b822..b29562bd5d40 100644 --- a/arch/powerpc/crypto/chacha-p10le-8x.S +++ b/arch/powerpc/lib/crypto/chacha-p10le-8x.S @@ -7,9 +7,6 @@ #=================================================================================== # Written by Danny Tsen <dtsen@us.ibm.com> # -# chacha_p10le_8x(u32 *state, byte *dst, const byte *src, -# size_t len, int nrounds); -# # do rounds, 8 quarter rounds # 1. a += b; d ^= a; d <<<= 16; # 2. c += d; b ^= c; b <<<= 12; @@ -575,7 +572,8 @@ .endm # -# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds); +# void chacha_p10le_8x(const struct chacha_state *state, u8 *dst, const u8 *src, +# unsigned int len, int nrounds); # SYM_FUNC_START(chacha_p10le_8x) .align 5 diff --git a/arch/powerpc/lib/crypto/poly1305-p10-glue.c b/arch/powerpc/lib/crypto/poly1305-p10-glue.c new file mode 100644 index 000000000000..3f1664a724b6 --- /dev/null +++ b/arch/powerpc/lib/crypto/poly1305-p10-glue.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Poly1305 authenticator algorithm, RFC7539. + * + * Copyright 2023- IBM Corp. All rights reserved. + */ +#include <asm/switch_to.h> +#include <crypto/internal/poly1305.h> +#include <linux/cpufeature.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/unaligned.h> + +asmlinkage void poly1305_p10le_4blocks(struct poly1305_block_state *state, const u8 *m, u32 mlen); +asmlinkage void poly1305_64s(struct poly1305_block_state *state, const u8 *m, u32 mlen, int highbit); +asmlinkage void poly1305_emit_64(const struct poly1305_state *state, const u32 nonce[4], u8 digest[POLY1305_DIGEST_SIZE]); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); + +static void vsx_begin(void) +{ + preempt_disable(); + enable_kernel_vsx(); +} + +static void vsx_end(void) +{ + disable_kernel_vsx(); + preempt_enable(); +} + +void poly1305_block_init_arch(struct poly1305_block_state *dctx, + const u8 raw_key[POLY1305_BLOCK_SIZE]) +{ + if (!static_key_enabled(&have_p10)) + return poly1305_block_init_generic(dctx, raw_key); + + dctx->h = (struct poly1305_state){}; + dctx->core_r.key.r64[0] = get_unaligned_le64(raw_key + 0); + dctx->core_r.key.r64[1] = get_unaligned_le64(raw_key + 8); +} +EXPORT_SYMBOL_GPL(poly1305_block_init_arch); + +void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src, + unsigned int len, u32 padbit) +{ + if (!static_key_enabled(&have_p10)) + return poly1305_blocks_generic(state, src, len, padbit); + vsx_begin(); + if (len >= POLY1305_BLOCK_SIZE * 4) { + poly1305_p10le_4blocks(state, src, len); + src += len - (len % (POLY1305_BLOCK_SIZE * 4)); + len %= POLY1305_BLOCK_SIZE * 4; + } + while (len >= POLY1305_BLOCK_SIZE) { + poly1305_64s(state, src, POLY1305_BLOCK_SIZE, padbit); + len -= POLY1305_BLOCK_SIZE; + src += POLY1305_BLOCK_SIZE; + } + vsx_end(); +} +EXPORT_SYMBOL_GPL(poly1305_blocks_arch); + +void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]) +{ + if (!static_key_enabled(&have_p10)) + return poly1305_emit_generic(state, digest, nonce); + poly1305_emit_64(state, nonce, digest); +} +EXPORT_SYMBOL_GPL(poly1305_emit_arch); + +bool poly1305_is_arch_optimized(void) +{ + return static_key_enabled(&have_p10); +} +EXPORT_SYMBOL(poly1305_is_arch_optimized); + +static int __init poly1305_p10_init(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + static_branch_enable(&have_p10); + return 0; +} +subsys_initcall(poly1305_p10_init); + +static void __exit poly1305_p10_exit(void) +{ +} +module_exit(poly1305_p10_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>"); +MODULE_DESCRIPTION("Optimized Poly1305 for P10"); diff --git a/arch/powerpc/crypto/poly1305-p10le_64.S b/arch/powerpc/lib/crypto/poly1305-p10le_64.S index a3c1987f1ecd..a3c1987f1ecd 100644 --- a/arch/powerpc/crypto/poly1305-p10le_64.S +++ b/arch/powerpc/lib/crypto/poly1305-p10le_64.S diff --git a/arch/powerpc/crypto/sha256-spe-asm.S b/arch/powerpc/lib/crypto/sha256-spe-asm.S index cd99d71dae34..cd99d71dae34 100644 --- a/arch/powerpc/crypto/sha256-spe-asm.S +++ b/arch/powerpc/lib/crypto/sha256-spe-asm.S diff --git a/arch/powerpc/lib/crypto/sha256.c b/arch/powerpc/lib/crypto/sha256.c new file mode 100644 index 000000000000..6b0f079587eb --- /dev/null +++ b/arch/powerpc/lib/crypto/sha256.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256 Secure Hash Algorithm, SPE optimized + * + * Based on generic implementation. The assembler module takes care + * about the SPE registers so it can run from interrupt context. + * + * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> + */ + +#include <asm/switch_to.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/preempt.h> + +/* + * MAX_BYTES defines the number of bytes that are allowed to be processed + * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000 + * operations per 64 bytes. e500 cores can issue two arithmetic instructions + * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2). + * Thus 1KB of input data will need an estimated maximum of 18,000 cycles. + * Headroom for cache misses included. Even with the low end model clocked + * at 667 MHz this equals to a critical time window of less than 27us. + * + */ +#define MAX_BYTES 1024 + +extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks); + +static void spe_begin(void) +{ + /* We just start SPE operations and will save SPE registers later. */ + preempt_disable(); + enable_kernel_spe(); +} + +static void spe_end(void) +{ + disable_kernel_spe(); + /* reenable preemption */ + preempt_enable(); +} + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + do { + /* cut input data into smaller blocks */ + u32 unit = min_t(size_t, nblocks, + MAX_BYTES / SHA256_BLOCK_SIZE); + + spe_begin(); + ppc_spe_sha256_transform(state, data, unit); + spe_end(); + + data += unit * SHA256_BLOCK_SIZE; + nblocks -= unit; + } while (nblocks); +} +EXPORT_SYMBOL_GPL(sha256_blocks_arch); + +bool sha256_is_arch_optimized(void) +{ + return true; +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm, SPE optimized"); diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c index 58ed6bd613a6..54340912398f 100644 --- a/arch/powerpc/lib/vmx-helper.c +++ b/arch/powerpc/lib/vmx-helper.c @@ -45,7 +45,7 @@ int exit_vmx_usercopy(void) * set and we are preemptible. The hack here is to schedule a * decrementer to fire here and reschedule for us if necessary. */ - if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched()) + if (need_irq_preemption() && need_resched()) set_dec(1); return 0; } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index c156fe0d53c3..806c74e0d5ab 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> @@ -218,7 +219,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, // Read/write fault blocked by KUAP is bad, it can never succeed. if (bad_kuap_fault(regs, address, is_write)) { pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n", - is_write ? "write" : "read", address, + str_write_read(is_write), address, from_kuid(&init_user_ns, current_uid())); // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad @@ -625,7 +626,7 @@ static void __bad_page_fault(struct pt_regs *regs, int sig) case INTERRUPT_DATA_STORAGE: case INTERRUPT_H_DATA_STORAGE: pr_alert("BUG: %s on %s at 0x%08lx\n", msg, - is_write ? "write" : "read", regs->dar); + str_write_read(is_write), regs->dar); break; case INTERRUPT_DATA_SEGMENT: pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar); diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 8b54f12d1889..ab1505cf42bf 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -54,20 +54,13 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, { pmd_t *pmdp = pmd_off_k(va); pte_t *ptep; - - if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) - return -EINVAL; + unsigned int shift = mmu_psize_to_shift(psize); if (new) { if (WARN_ON(slab_is_available())) return -EINVAL; - if (psize == MMU_PAGE_512K) { - ptep = early_pte_alloc_kernel(pmdp, va); - /* The PTE should never be already present */ - if (WARN_ON(pte_present(*ptep) && pgprot_val(prot))) - return -EINVAL; - } else { + if (psize == MMU_PAGE_8M) { if (WARN_ON(!pmd_none(*pmdp) || !pmd_none(*(pmdp + 1)))) return -EINVAL; @@ -78,20 +71,25 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, pmd_populate_kernel(&init_mm, pmdp + 1, ptep); ptep = (pte_t *)pmdp; + } else { + ptep = early_pte_alloc_kernel(pmdp, va); + /* The PTE should never be already present */ + if (WARN_ON(pte_present(*ptep) && pgprot_val(prot))) + return -EINVAL; } } else { - if (psize == MMU_PAGE_512K) - ptep = pte_offset_kernel(pmdp, va); - else + if (psize == MMU_PAGE_8M) ptep = (pte_t *)pmdp; + else + ptep = pte_offset_kernel(pmdp, va); } if (WARN_ON(!ptep)) return -ENOMEM; set_huge_pte_at(&init_mm, va, ptep, - pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), - 1UL << mmu_psize_to_shift(psize)); + arch_make_huge_pte(pfn_pte(pa >> PAGE_SHIFT, prot), shift, 0), + 1UL << shift); return 0; } @@ -123,14 +121,18 @@ static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, unsigned long p = offset; int err = 0; - WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); + WARN_ON(!IS_ALIGNED(offset, SZ_16K) || !IS_ALIGNED(top, SZ_16K)); + for (; p < ALIGN(p, SZ_512K) && p < top && !err; p += SZ_16K, v += SZ_16K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new); for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K) err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M) err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K) err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN_DOWN(top, SZ_16K) && p < top && !err; p += SZ_16K, v += SZ_16K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new); if (!new) flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 6beacaec63d3..4c26912c2e3c 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -51,8 +51,16 @@ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ } while (0) -/* Sign-extended 32-bit immediate load */ +/* + * Sign-extended 32-bit immediate load + * + * If this is a dummy pass (!image), account for + * maximum possible instructions. + */ #define PPC_LI32(d, i) do { \ + if (!image) \ + ctx->idx += 2; \ + else { \ if ((int)(uintptr_t)(i) >= -32768 && \ (int)(uintptr_t)(i) < 32768) \ EMIT(PPC_RAW_LI(d, i)); \ @@ -60,10 +68,15 @@ EMIT(PPC_RAW_LIS(d, IMM_H(i))); \ if (IMM_L(i)) \ EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \ - } } while(0) + } \ + } } while (0) #ifdef CONFIG_PPC64 +/* If dummy pass (!image), account for maximum possible instructions */ #define PPC_LI64(d, i) do { \ + if (!image) \ + ctx->idx += 5; \ + else { \ if ((long)(i) >= -2147483648 && \ (long)(i) < 2147483648) \ PPC_LI32(d, i); \ @@ -84,7 +97,8 @@ if ((uintptr_t)(i) & 0x000000000000ffffULL) \ EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \ 0xffff)); \ - } } while (0) + } \ + } } while (0) #define PPC_LI_ADDR PPC_LI64 #ifndef CONFIG_PPC_KERNEL_PCREL diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 2991bb171a9b..c0684733e9d6 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -504,10 +504,11 @@ static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ct EMIT(PPC_RAW_ADDI(_R3, _R1, regs_off)); if (!p->jited) PPC_LI_ADDR(_R4, (unsigned long)p->insnsi); - if (!create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], (unsigned long)p->bpf_func, - BRANCH_SET_LINK)) { - if (image) - image[ctx->idx] = ppc_inst_val(branch_insn); + /* Account for max possible instructions during dummy pass for size calculation */ + if (image && !create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], + (unsigned long)p->bpf_func, + BRANCH_SET_LINK)) { + image[ctx->idx] = ppc_inst_val(branch_insn); ctx->idx++; } else { EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func))); @@ -889,7 +890,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off); /* Reserve space to patch branch instruction to skip fexit progs */ - im->ip_after_call = &((u32 *)ro_image)[ctx->idx]; + if (ro_image) /* image is NULL for dummy pass */ + im->ip_after_call = &((u32 *)ro_image)[ctx->idx]; EMIT(PPC_RAW_NOP()); } @@ -912,7 +914,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im } if (flags & BPF_TRAMP_F_CALL_ORIG) { - im->ip_epilogue = &((u32 *)ro_image)[ctx->idx]; + if (ro_image) /* image is NULL for dummy pass */ + im->ip_epilogue = &((u32 *)ro_image)[ctx->idx]; PPC_LI_ADDR(_R3, im); ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx, (unsigned long)__bpf_tramp_exit); @@ -973,25 +976,9 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { struct bpf_tramp_image im; - void *image; int ret; - /* - * Allocate a temporary buffer for __arch_prepare_bpf_trampoline(). - * This will NOT cause fragmentation in direct map, as we do not - * call set_memory_*() on this buffer. - * - * We cannot use kvmalloc here, because we need image to be in - * module memory range. - */ - image = bpf_jit_alloc_exec(PAGE_SIZE); - if (!image) - return -ENOMEM; - - ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image, - m, flags, tlinks, func_addr); - bpf_jit_free_exec(image); - + ret = __arch_prepare_bpf_trampoline(&im, NULL, NULL, NULL, m, flags, tlinks, func_addr); return ret; } diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index c4db278dae36..0aace304dfe1 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -313,7 +313,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u64 func_addr; u32 true_cond; u32 tmp_idx; - int j; if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) && (BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) && @@ -1099,13 +1098,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code * 16 byte instruction that uses two 'struct bpf_insn' */ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ - tmp_idx = ctx->idx; PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); PPC_LI32(dst_reg, (u32)insn[i].imm); - /* padding to allow full 4 instructions for later patching */ - if (!image) - for (j = ctx->idx - tmp_idx; j < 4; j++) - EMIT(PPC_RAW_NOP()); /* Adjust for two bpf instructions */ addrs[++i] = ctx->idx * 4; break; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 233703b06d7c..5daa77aee7f7 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -227,7 +227,14 @@ int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context * #ifdef CONFIG_PPC_KERNEL_PCREL reladdr = func_addr - local_paca->kernelbase; - if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) { + /* + * If fimage is NULL (the initial pass to find image size), + * account for the maximum no. of instructions possible. + */ + if (!fimage) { + ctx->idx += 7; + return 0; + } else if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) { EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase))); /* Align for subsequent prefix instruction */ if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8)) @@ -412,7 +419,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code u64 imm64; u32 true_cond; u32 tmp_idx; - int j; /* * addrs[] maps a BPF bytecode address into a real offset from @@ -1046,12 +1052,7 @@ emit_clear: case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ imm64 = ((u64)(u32) insn[i].imm) | (((u64)(u32) insn[i+1].imm) << 32); - tmp_idx = ctx->idx; PPC_LI64(dst_reg, imm64); - /* padding to allow full 5 instructions for later patching */ - if (!image) - for (j = ctx->idx - tmp_idx; j < 5; j++) - EMIT(PPC_RAW_NOP()); /* Adjust for two bpf instructions */ addrs[++i] = ctx->idx * 4; break; diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index ac2cf58d62db..7f53fcb7495a 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -18,6 +18,8 @@ obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o obj-$(CONFIG_VPA_PMU) += vpa-pmu.o +obj-$(CONFIG_KVM_BOOK3S_HV_PMU) += kvm-hv-pmu.o + obj-$(CONFIG_PPC_8xx) += 8xx-pmu.o obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index b906d28f74fd..8b0081441f85 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2239,6 +2239,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs) { u64 period = event->hw.sample_period; + const u64 last_period = event->hw.last_period; s64 prev, delta, left; int record = 0; @@ -2320,7 +2321,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (record) { struct perf_sample_data data; - perf_sample_data_init(&data, ~0ULL, event->hw.last_period); + perf_sample_data_init(&data, ~0ULL, last_period); if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE) perf_get_data_addr(event, regs, &data.addr); @@ -2343,12 +2344,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type); data.sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; } - if (perf_event_overflow(event, &data, regs)) - power_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } else if (period) { /* Account for interrupt in case of invalid SIAR */ - if (perf_event_account_interrupt(event)) - power_pmu_stop(event, 0); + perf_event_account_interrupt(event); } } diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index 1a53ab08447c..7120ab20cbfe 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -590,6 +590,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct pt_regs *regs) { u64 period = event->hw.sample_period; + const u64 last_period = event->hw.last_period; s64 prev, delta, left; int record = 0; @@ -632,10 +633,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (record) { struct perf_sample_data data; - perf_sample_data_init(&data, 0, event->hw.last_period); + perf_sample_data_init(&data, 0, last_period); - if (perf_event_overflow(event, &data, regs)) - fsl_emb_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } } diff --git a/arch/powerpc/perf/kvm-hv-pmu.c b/arch/powerpc/perf/kvm-hv-pmu.c new file mode 100644 index 000000000000..ae264c9080ef --- /dev/null +++ b/arch/powerpc/perf/kvm-hv-pmu.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Description: PMUs specific to running nested KVM-HV guests + * on Book3S processors (specifically POWER9 and later). + */ + +#define pr_fmt(fmt) "kvmppc-pmu: " fmt + +#include "asm-generic/local64.h" +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/ratelimit.h> +#include <linux/kvm_host.h> +#include <linux/gfp_types.h> +#include <linux/pgtable.h> +#include <linux/perf_event.h> +#include <linux/spinlock_types.h> +#include <linux/spinlock.h> + +#include <asm/types.h> +#include <asm/kvm_ppc.h> +#include <asm/kvm_book3s.h> +#include <asm/mmu.h> +#include <asm/pgalloc.h> +#include <asm/pte-walk.h> +#include <asm/reg.h> +#include <asm/plpar_wrappers.h> +#include <asm/firmware.h> + +#include "asm/guest-state-buffer.h" + +enum kvmppc_pmu_eventid { + KVMPPC_EVENT_HOST_HEAP, + KVMPPC_EVENT_HOST_HEAP_MAX, + KVMPPC_EVENT_HOST_PGTABLE, + KVMPPC_EVENT_HOST_PGTABLE_MAX, + KVMPPC_EVENT_HOST_PGTABLE_RECLAIM, + KVMPPC_EVENT_MAX, +}; + +#define KVMPPC_PMU_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, kvmppc_events_sysfs_show, _id) + +static ssize_t kvmppc_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +/* Holds the hostwide stats */ +static struct kvmppc_hostwide_stats { + u64 guest_heap; + u64 guest_heap_max; + u64 guest_pgtable_size; + u64 guest_pgtable_size_max; + u64 guest_pgtable_reclaim; +} l0_stats; + +/* Protect access to l0_stats */ +static DEFINE_SPINLOCK(lock_l0_stats); + +/* GSB related structs needed to talk to L0 */ +static struct kvmppc_gs_msg *gsm_l0_stats; +static struct kvmppc_gs_buff *gsb_l0_stats; +static struct kvmppc_gs_parser gsp_l0_stats; + +static struct attribute *kvmppc_pmu_events_attr[] = { + KVMPPC_PMU_EVENT_ATTR(host_heap, KVMPPC_EVENT_HOST_HEAP), + KVMPPC_PMU_EVENT_ATTR(host_heap_max, KVMPPC_EVENT_HOST_HEAP_MAX), + KVMPPC_PMU_EVENT_ATTR(host_pagetable, KVMPPC_EVENT_HOST_PGTABLE), + KVMPPC_PMU_EVENT_ATTR(host_pagetable_max, KVMPPC_EVENT_HOST_PGTABLE_MAX), + KVMPPC_PMU_EVENT_ATTR(host_pagetable_reclaim, KVMPPC_EVENT_HOST_PGTABLE_RECLAIM), + NULL, +}; + +static const struct attribute_group kvmppc_pmu_events_group = { + .name = "events", + .attrs = kvmppc_pmu_events_attr, +}; + +PMU_FORMAT_ATTR(event, "config:0-5"); +static struct attribute *kvmppc_pmu_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group kvmppc_pmu_format_group = { + .name = "format", + .attrs = kvmppc_pmu_format_attr, +}; + +static const struct attribute_group *kvmppc_pmu_attr_groups[] = { + &kvmppc_pmu_events_group, + &kvmppc_pmu_format_group, + NULL, +}; + +/* + * Issue the hcall to get the L0-host stats. + * Should be called with l0-stat lock held + */ +static int kvmppc_update_l0_stats(void) +{ + int rc; + + /* With HOST_WIDE flags guestid and vcpuid will be ignored */ + rc = kvmppc_gsb_recv(gsb_l0_stats, KVMPPC_GS_FLAGS_HOST_WIDE); + if (rc) + goto out; + + /* Parse the guest state buffer is successful */ + rc = kvmppc_gse_parse(&gsp_l0_stats, gsb_l0_stats); + if (rc) + goto out; + + /* Update the l0 returned stats*/ + memset(&l0_stats, 0, sizeof(l0_stats)); + rc = kvmppc_gsm_refresh_info(gsm_l0_stats, gsb_l0_stats); + +out: + return rc; +} + +/* Update the value of the given perf_event */ +static int kvmppc_pmu_event_update(struct perf_event *event) +{ + int rc; + u64 curr_val, prev_val; + unsigned long flags; + unsigned int config = event->attr.config; + + /* Ensure no one else is modifying the l0_stats */ + spin_lock_irqsave(&lock_l0_stats, flags); + + rc = kvmppc_update_l0_stats(); + if (!rc) { + switch (config) { + case KVMPPC_EVENT_HOST_HEAP: + curr_val = l0_stats.guest_heap; + break; + case KVMPPC_EVENT_HOST_HEAP_MAX: + curr_val = l0_stats.guest_heap_max; + break; + case KVMPPC_EVENT_HOST_PGTABLE: + curr_val = l0_stats.guest_pgtable_size; + break; + case KVMPPC_EVENT_HOST_PGTABLE_MAX: + curr_val = l0_stats.guest_pgtable_size_max; + break; + case KVMPPC_EVENT_HOST_PGTABLE_RECLAIM: + curr_val = l0_stats.guest_pgtable_reclaim; + break; + default: + rc = -ENOENT; + break; + } + } + + spin_unlock_irqrestore(&lock_l0_stats, flags); + + /* If no error than update the perf event */ + if (!rc) { + prev_val = local64_xchg(&event->hw.prev_count, curr_val); + if (curr_val > prev_val) + local64_add(curr_val - prev_val, &event->count); + } + + return rc; +} + +static int kvmppc_pmu_event_init(struct perf_event *event) +{ + unsigned int config = event->attr.config; + + pr_debug("%s: Event(%p) id=%llu cpu=%x on_cpu=%x config=%u", + __func__, event, event->id, event->cpu, + event->oncpu, config); + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (config >= KVMPPC_EVENT_MAX) + return -EINVAL; + + local64_set(&event->hw.prev_count, 0); + local64_set(&event->count, 0); + + return 0; +} + +static void kvmppc_pmu_del(struct perf_event *event, int flags) +{ + kvmppc_pmu_event_update(event); +} + +static int kvmppc_pmu_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + return kvmppc_pmu_event_update(event); + return 0; +} + +static void kvmppc_pmu_read(struct perf_event *event) +{ + kvmppc_pmu_event_update(event); +} + +/* Return the size of the needed guest state buffer */ +static size_t hostwide_get_size(struct kvmppc_gs_msg *gsm) + +{ + size_t size = 0; + const u16 ids[] = { + KVMPPC_GSID_L0_GUEST_HEAP, + KVMPPC_GSID_L0_GUEST_HEAP_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM + }; + + for (int i = 0; i < ARRAY_SIZE(ids); i++) + size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); + return size; +} + +/* Populate the request guest state buffer */ +static int hostwide_fill_info(struct kvmppc_gs_buff *gsb, + struct kvmppc_gs_msg *gsm) +{ + int rc = 0; + struct kvmppc_hostwide_stats *stats = gsm->data; + + /* + * It doesn't matter what values are put into request buffer as + * they are going to be overwritten anyways. But for the sake of + * testcode and symmetry contents of existing stats are put + * populated into the request guest state buffer. + */ + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_HEAP, + stats->guest_heap); + + if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_HEAP_MAX, + stats->guest_heap_max); + + if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, + stats->guest_pgtable_size); + if (!rc && + kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, + stats->guest_pgtable_size_max); + if (!rc && + kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM)) + rc = kvmppc_gse_put_u64(gsb, + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM, + stats->guest_pgtable_reclaim); + + return rc; +} + +/* Parse and update the host wide stats from returned gsb */ +static int hostwide_refresh_info(struct kvmppc_gs_msg *gsm, + struct kvmppc_gs_buff *gsb) +{ + struct kvmppc_gs_parser gsp = { 0 }; + struct kvmppc_hostwide_stats *stats = gsm->data; + struct kvmppc_gs_elem *gse; + int rc; + + rc = kvmppc_gse_parse(&gsp, gsb); + if (rc < 0) + return rc; + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP); + if (gse) + stats->guest_heap = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + if (gse) + stats->guest_heap_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + if (gse) + stats->guest_pgtable_size = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + if (gse) + stats->guest_pgtable_size_max = kvmppc_gse_get_u64(gse); + + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + if (gse) + stats->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse); + + return 0; +} + +/* gsb-message ops for setting up/parsing */ +static struct kvmppc_gs_msg_ops gsb_ops_l0_stats = { + .get_size = hostwide_get_size, + .fill_info = hostwide_fill_info, + .refresh_info = hostwide_refresh_info, +}; + +static int kvmppc_init_hostwide(void) +{ + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&lock_l0_stats, flags); + + /* already registered ? */ + if (gsm_l0_stats) { + rc = 0; + goto out; + } + + /* setup the Guest state message/buffer to talk to L0 */ + gsm_l0_stats = kvmppc_gsm_new(&gsb_ops_l0_stats, &l0_stats, + GSM_SEND, GFP_KERNEL); + if (!gsm_l0_stats) { + rc = -ENOMEM; + goto out; + } + + /* Populate the Idents */ + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP_MAX); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); + + /* allocate GSB. Guest/Vcpu Id is ignored */ + gsb_l0_stats = kvmppc_gsb_new(kvmppc_gsm_size(gsm_l0_stats), 0, 0, + GFP_KERNEL); + if (!gsb_l0_stats) { + rc = -ENOMEM; + goto out; + } + + /* ask the ops to fill in the info */ + rc = kvmppc_gsm_fill_info(gsm_l0_stats, gsb_l0_stats); + +out: + if (rc) { + if (gsm_l0_stats) + kvmppc_gsm_free(gsm_l0_stats); + if (gsb_l0_stats) + kvmppc_gsb_free(gsb_l0_stats); + gsm_l0_stats = NULL; + gsb_l0_stats = NULL; + } + spin_unlock_irqrestore(&lock_l0_stats, flags); + return rc; +} + +static void kvmppc_cleanup_hostwide(void) +{ + unsigned long flags; + + spin_lock_irqsave(&lock_l0_stats, flags); + + if (gsm_l0_stats) + kvmppc_gsm_free(gsm_l0_stats); + if (gsb_l0_stats) + kvmppc_gsb_free(gsb_l0_stats); + gsm_l0_stats = NULL; + gsb_l0_stats = NULL; + + spin_unlock_irqrestore(&lock_l0_stats, flags); +} + +/* L1 wide counters PMU */ +static struct pmu kvmppc_pmu = { + .module = THIS_MODULE, + .task_ctx_nr = perf_sw_context, + .name = "kvm-hv", + .event_init = kvmppc_pmu_event_init, + .add = kvmppc_pmu_add, + .del = kvmppc_pmu_del, + .read = kvmppc_pmu_read, + .attr_groups = kvmppc_pmu_attr_groups, + .type = -1, + .scope = PERF_PMU_SCOPE_SYS_WIDE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, +}; + +static int __init kvmppc_register_pmu(void) +{ + int rc = -EOPNOTSUPP; + + /* only support events for nestedv2 right now */ + if (kvmhv_is_nestedv2()) { + rc = kvmppc_init_hostwide(); + if (rc) + goto out; + + /* Register the pmu */ + rc = perf_pmu_register(&kvmppc_pmu, kvmppc_pmu.name, -1); + if (rc) + goto out; + + pr_info("Registered kvm-hv pmu"); + } + +out: + return rc; +} + +static void __exit kvmppc_unregister_pmu(void) +{ + if (kvmhv_is_nestedv2()) { + kvmppc_cleanup_hostwide(); + + if (kvmppc_pmu.type != -1) + perf_pmu_unregister(&kvmppc_pmu); + + pr_info("kvmhv_pmu unregistered.\n"); + } +} + +module_init(kvmppc_register_pmu); +module_exit(kvmppc_unregister_pmu); +MODULE_DESCRIPTION("KVM PPC Book3s-hv PMU"); +MODULE_AUTHOR("Vaibhav Jain <vaibhav@linux.ibm.com>"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/platforms/44x/gpio.c b/arch/powerpc/platforms/44x/gpio.c index e5f2319e5cbe..d540e261d85a 100644 --- a/arch/powerpc/platforms/44x/gpio.c +++ b/arch/powerpc/platforms/44x/gpio.c @@ -75,8 +75,7 @@ __ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) clrbits32(®s->or, GPIO_MASK(gpio)); } -static void -ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); unsigned long flags; @@ -88,6 +87,8 @@ ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) spin_unlock_irqrestore(&chip->lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; } static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -179,7 +180,7 @@ static int __init ppc4xx_add_gpiochips(void) gc->direction_input = ppc4xx_gpio_dir_in; gc->direction_output = ppc4xx_gpio_dir_out; gc->get = ppc4xx_gpio_get; - gc->set = ppc4xx_gpio_set; + gc->set_rv = ppc4xx_gpio_set; ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); if (ret) diff --git a/arch/powerpc/platforms/44x/uic.c b/arch/powerpc/platforms/44x/uic.c index 31f760c2ec5d..85daf841fd3f 100644 --- a/arch/powerpc/platforms/44x/uic.c +++ b/arch/powerpc/platforms/44x/uic.c @@ -254,8 +254,9 @@ static struct uic * __init uic_init_one(struct device_node *node) } uic->dcrbase = *dcrreg; - uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, - uic); + uic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), + NR_UIC_INTS, &uic_host_ops, + uic); if (! uic->irqhost) return NULL; /* FIXME: panic? */ @@ -327,5 +328,5 @@ unsigned int uic_get_irq(void) msr = mfdcr(primary_uic->dcrbase + UIC_MSR); src = 32 - ffs(msr); - return irq_linear_revmap(primary_uic->irqhost, src); + return irq_find_mapping(primary_uic->irqhost, src); } diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index e995eb30bf09..2cf3c6237337 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -188,7 +188,8 @@ mpc5121_ads_cpld_pic_init(void) cpld_pic_node = of_node_get(np); - cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL); + cpld_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 16, + &cpld_pic_host_ops, NULL); if (!cpld_pic_host) { printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); goto end; diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 19626cd42406..bc7f83cfec1d 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -168,7 +168,7 @@ static void __init media5200_init_irq(void) spin_lock_init(&media5200_irq.lock); - media5200_irq.irqhost = irq_domain_add_linear(fpga_np, + media5200_irq.irqhost = irq_domain_create_linear(of_fwnode_handle(fpga_np), MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq); if (!media5200_irq.irqhost) goto out; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 1ea591ec6083..bda707d848a6 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -247,9 +247,9 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) if (!cascade_virq) return; - gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt); + gpt->irqhost = irq_domain_create_linear(of_fwnode_handle(node), 1, &mpc52xx_gpt_irq_ops, gpt); if (!gpt->irqhost) { - dev_err(gpt->dev, "irq_domain_add_linear() failed\n"); + dev_err(gpt->dev, "irq_domain_create_linear() failed\n"); return; } @@ -280,7 +280,7 @@ static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio) return (in_be32(&gpt->regs->status) >> 8) & 1; } -static void +static int mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) { struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc); @@ -293,6 +293,8 @@ mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v) raw_spin_lock_irqsave(&gpt->lock, flags); clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_GPIO_MASK, r); raw_spin_unlock_irqrestore(&gpt->lock, flags); + + return 0; } static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) @@ -334,7 +336,7 @@ static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in; gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out; gpt->gc.get = mpc52xx_gpt_gpio_get; - gpt->gc.set = mpc52xx_gpt_gpio_set; + gpt->gc.set_rv = mpc52xx_gpt_gpio_set; gpt->gc.base = -1; gpt->gc.parent = gpt->dev; @@ -369,7 +371,7 @@ struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq) mutex_lock(&mpc52xx_gpt_list_mutex); list_for_each(pos, &mpc52xx_gpt_list) { gpt = container_of(pos, struct mpc52xx_gpt_priv, list); - if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) { + if (gpt->irqhost && irq == irq_find_mapping(gpt->irqhost, 0)) { mutex_unlock(&mpc52xx_gpt_list_mutex); return gpt; } diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 43c881d31ca6..eb6a4e745c08 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -446,7 +446,7 @@ void __init mpc52xx_init_irq(void) * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ - mpc52xx_irqhost = irq_domain_add_linear(picnode, + mpc52xx_irqhost = irq_domain_create_linear(of_fwnode_handle(picnode), MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, NULL); @@ -515,5 +515,5 @@ unsigned int mpc52xx_get_irq(void) return 0; } - return irq_linear_revmap(mpc52xx_irqhost, irq); + return irq_find_mapping(mpc52xx_irqhost, irq); } diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 4d8fa9ed1a67..6e37dfc6c5c9 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -92,10 +92,11 @@ static void mcu_power_off(void) mutex_unlock(&mcu->lock); } -static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +static int mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct mcu *mcu = gpiochip_get_data(gc); u8 bit = 1 << (4 + gpio); + int ret; mutex_lock(&mcu->lock); if (val) @@ -103,14 +104,16 @@ static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) else mcu->reg_ctrl |= bit; - i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl); + ret = i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, + mcu->reg_ctrl); mutex_unlock(&mcu->lock); + + return ret; } static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - mcu_gpio_set(gc, gpio, val); - return 0; + return mcu_gpio_set(gc, gpio, val); } static int mcu_gpiochip_add(struct mcu *mcu) @@ -123,7 +126,7 @@ static int mcu_gpiochip_add(struct mcu *mcu) gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; - gc->set = mcu_gpio_set; + gc->set_rv = mcu_gpio_set; gc->direction_output = mcu_gpio_dir_out; gc->parent = dev; diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 60e0b8947ce6..4b69fb321a68 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -83,7 +83,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) if (cause >> (i + 16)) break; } - return irq_linear_revmap(socrates_fpga_pic_irq_host, + return irq_find_mapping(socrates_fpga_pic_irq_host, (irq_hw_number_t)i); } @@ -278,7 +278,7 @@ void __init socrates_fpga_pic_init(struct device_node *pic) int i; /* Setup an irq_domain structure */ - socrates_fpga_pic_irq_host = irq_domain_add_linear(pic, + socrates_fpga_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(pic), SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL); if (socrates_fpga_pic_irq_host == NULL) { pr_err("FPGA PIC: Unable to allocate host\n"); diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c index a18fc7c99f83..a49d4a9ab3bc 100644 --- a/arch/powerpc/platforms/8xx/cpm1-ic.c +++ b/arch/powerpc/platforms/8xx/cpm1-ic.c @@ -59,7 +59,7 @@ static int cpm_get_irq(struct irq_desc *desc) cpm_vec = in_be16(&data->reg->cpic_civr); cpm_vec >>= 11; - return irq_linear_revmap(data->host, cpm_vec); + return irq_find_mapping(data->host, cpm_vec); } static void cpm_cascade(struct irq_desc *desc) @@ -110,7 +110,8 @@ static int cpm_pic_probe(struct platform_device *pdev) out_be32(&data->reg->cpic_cimr, 0); - data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data); + data->host = irq_domain_create_linear(of_fwnode_handle(dev->of_node), + 64, &cpm_pic_host_ops, data); if (!data->host) return -ENODEV; diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index 1dc095ad48fc..7462c221115c 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -417,7 +417,7 @@ static void __cpm1_gpio16_set(struct cpm1_gpio16_chip *cpm1_gc, u16 pin_mask, in out_be16(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; @@ -428,6 +428,8 @@ static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) __cpm1_gpio16_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) @@ -497,7 +499,7 @@ int cpm1_gpiochip_add16(struct device *dev) gc->direction_input = cpm1_gpio16_dir_in; gc->direction_output = cpm1_gpio16_dir_out; gc->get = cpm1_gpio16_get; - gc->set = cpm1_gpio16_set; + gc->set_rv = cpm1_gpio16_set; gc->to_irq = cpm1_gpio16_to_irq; gc->parent = dev; gc->owner = THIS_MODULE; @@ -554,7 +556,7 @@ static void __cpm1_gpio32_set(struct cpm1_gpio32_chip *cpm1_gc, u32 pin_mask, in out_be32(&iop->dat, cpm1_gc->cpdata); } -static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; @@ -565,6 +567,8 @@ static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) __cpm1_gpio32_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; } static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) @@ -618,7 +622,7 @@ int cpm1_gpiochip_add32(struct device *dev) gc->direction_input = cpm1_gpio32_dir_in; gc->direction_output = cpm1_gpio32_dir_out; gc->get = cpm1_gpio32_get; - gc->set = cpm1_gpio32_set; + gc->set_rv = cpm1_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c index ea6b0e523c60..933d6ab7f512 100644 --- a/arch/powerpc/platforms/8xx/pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -80,7 +80,7 @@ unsigned int mpc8xx_get_irq(void) if (irq == PIC_VEC_SPURRIOUS) return 0; - return irq_linear_revmap(mpc8xx_pic_host, irq); + return irq_find_mapping(mpc8xx_pic_host, irq); } @@ -146,7 +146,8 @@ void __init mpc8xx_pic_init(void) if (!siu_reg) goto out; - mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL); + mpc8xx_pic_host = irq_domain_create_linear(of_fwnode_handle(np), 64, + &mpc8xx_pic_host_ops, NULL); if (!mpc8xx_pic_host) printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 013d66304c31..91a8f0a7086e 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -149,8 +149,9 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np) __flipper_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS, - &flipper_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + FLIPPER_NR_IRQS, + &flipper_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); return NULL; @@ -172,7 +173,7 @@ unsigned int flipper_pic_get_irq(void) return 0; /* no more IRQs pending */ irq = __ffs(irq_status); - return irq_linear_revmap(flipper_irq_host, irq); + return irq_find_mapping(flipper_irq_host, irq); } /* diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 4d2d92de30af..b57e87b0b3ce 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -175,8 +175,9 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) __hlwd_quiesce(io_base); - irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS, - &hlwd_irq_domain_ops, io_base); + irq_domain = irq_domain_create_linear(of_fwnode_handle(np), + HLWD_NR_IRQS, + &hlwd_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); iounmap(io_base); @@ -189,7 +190,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np) unsigned int hlwd_pic_get_irq(void) { unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host); - return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0; + return hwirq ? irq_find_mapping(hlwd_irq_host, hwirq) : 0; } /* diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 03a7c51f2645..c37783a03d25 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -250,7 +250,7 @@ static unsigned int pmac_pic_get_irq(void) raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); if (unlikely(irq < 0)) return 0; - return irq_linear_revmap(pmac_pic_host, irq); + return irq_find_mapping(pmac_pic_host, irq); } static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, @@ -327,8 +327,9 @@ static void __init pmac_pic_probe_oldstyle(void) /* * Allocate an irq host */ - pmac_pic_host = irq_domain_add_linear(master, max_irqs, - &pmac_pic_host_ops, NULL); + pmac_pic_host = irq_domain_create_linear(of_fwnode_handle(master), + max_irqs, + &pmac_pic_host_ops, NULL); BUG_ON(pmac_pic_host == NULL); irq_set_default_domain(pmac_pic_host); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 6de1cd5d8a58..e119ced05d10 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -45,6 +45,7 @@ #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> +#include <linux/string_choices.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -238,8 +239,7 @@ static void __init l2cr_init(void) _set_L2CR(0); _set_L2CR(*l2cr); pr_info("L2CR overridden (0x%x), backside cache is %s\n", - *l2cr, ((*l2cr) & 0x80000000) ? - "enabled" : "disabled"); + *l2cr, str_enabled_disabled((*l2cr) & 0x80000000)); } of_node_put(np); break; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 09e7fe24fac1..88e92af8acf9 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -190,7 +190,7 @@ static int __init psurge_secondary_ipi_init(void) { int rc = -ENOMEM; - psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL); + psurge_host = irq_domain_create_nomap(NULL, ~0, &psurge_host_ops, NULL); if (psurge_host) psurge_secondary_virq = irq_create_direct_mapping(psurge_host); diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 8633891b7aa5..b4426a35aca3 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/time.h> @@ -77,7 +78,7 @@ long __init pmac_time_init(void) delta |= 0xFF000000UL; dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0); printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60, - dst ? "on" : "off"); + str_on_off(dst)); #endif return delta; } diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index d92759c21fae..e180bd8e1400 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -191,7 +191,8 @@ int __init opal_event_init(void) * fall back to the legacy method (opal_event_request(...)) * anyway. */ dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event"); - opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, + opal_event_irqchip.domain = irq_domain_create_linear(of_fwnode_handle(dn), + MAX_NUM_EVENTS, &opal_event_domain_ops, &opal_event_irqchip); of_node_put(dn); if (!opal_event_irqchip.domain) { diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index ae4b549b5ca0..d8ccf2c9b98a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1897,7 +1897,7 @@ static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned return -ENOMEM; } - hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), + hose->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(hose->dn), &pnv_msi_domain_info, hose->dev_domain); if (!hose->msi_domain) { diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index 61722133eb2d..22d91ac424dd 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/reboot.h> #include <linux/rcuwait.h> +#include <linux/string_choices.h> #include <asm/firmware.h> #include <asm/lv1call.h> @@ -724,7 +725,7 @@ static irqreturn_t ps3_notification_interrupt(int irq, void *data) static int ps3_notification_read_write(struct ps3_notification_device *dev, u64 lpar, int write) { - const char *op = write ? "write" : "read"; + const char *op = str_write_read(write); unsigned long flags; int res; diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 95e96bd61a20..a4ad4b49eef7 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -743,7 +743,7 @@ void __init ps3_init_IRQ(void) unsigned cpu; struct irq_domain *host; - host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); + host = irq_domain_create_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); irq_set_default_domain(host); for_each_possible_cpu(cpu) { diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 3f3e3492e436..57222678bb3f 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -3,7 +3,8 @@ ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ of_helpers.o rtas-work-area.o papr-sysparm.o \ - papr-vpd.o \ + papr-rtas-common.o papr-vpd.o papr-indices.o \ + papr-platform-dump.o papr-phy-attest.o \ setup.o iommu.o event_sources.o ras.o \ firmware.o power.o dlpar.o mobility.o rng.o \ pci.o pci_dlpar.o eeh_pseries.o msi.o \ diff --git a/arch/powerpc/platforms/pseries/htmdump.c b/arch/powerpc/platforms/pseries/htmdump.c index 57fc1700f604..742ec52c9d4d 100644 --- a/arch/powerpc/platforms/pseries/htmdump.c +++ b/arch/powerpc/platforms/pseries/htmdump.c @@ -10,28 +10,40 @@ #include <asm/io.h> #include <asm/machdep.h> #include <asm/plpar_wrappers.h> +#include <asm/kvm_guest.h> static void *htm_buf; +static void *htm_status_buf; +static void *htm_info_buf; +static void *htm_caps_buf; static u32 nodeindex; static u32 nodalchipindex; static u32 coreindexonchip; static u32 htmtype; +static u32 htmconfigure; +static u32 htmstart; +static u32 htmsetup; +static u64 htmflags; + static struct dentry *htmdump_debugfs_dir; +#define HTM_ENABLE 1 +#define HTM_DISABLE 0 +#define HTM_NOWRAP 1 +#define HTM_WRAP 0 -static ssize_t htmdump_read(struct file *filp, char __user *ubuf, - size_t count, loff_t *ppos) +/* + * Check the return code for H_HTM hcall. + * Return non-zero value (1) if either H_PARTIAL or H_SUCCESS + * is returned. For other return codes: + * Return zero if H_NOT_AVAILABLE. + * Return -EBUSY if hcall return busy. + * Return -EINVAL if any parameter or operation is not valid. + * Return -EPERM if HTM Virtualization Engine Technology code + * is not applied. + * Return -EIO if the HTM state is not valid. + */ +static ssize_t htm_return_check(long rc) { - void *htm_buf = filp->private_data; - unsigned long page, read_size, available; - loff_t offset; - long rc; - - page = ALIGN_DOWN(*ppos, PAGE_SIZE); - offset = (*ppos) % PAGE_SIZE; - - rc = htm_get_dump_hardware(nodeindex, nodalchipindex, coreindexonchip, - htmtype, virt_to_phys(htm_buf), PAGE_SIZE, page); - switch (rc) { case H_SUCCESS: /* H_PARTIAL for the case where all available data can't be @@ -65,6 +77,38 @@ static ssize_t htmdump_read(struct file *filp, char __user *ubuf, return -EPERM; } + /* + * Return 1 for H_SUCCESS/H_PARTIAL + */ + return 1; +} + +static ssize_t htmdump_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_buf = filp->private_data; + unsigned long page, read_size, available; + loff_t offset; + long rc, ret; + + page = ALIGN_DOWN(*ppos, PAGE_SIZE); + offset = (*ppos) % PAGE_SIZE; + + /* + * Invoke H_HTM call with: + * - operation as htm dump (H_HTM_OP_DUMP_DATA) + * - last three values are address, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_DATA, virt_to_phys(htm_buf), + PAGE_SIZE, page); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_DATA, returning %ld\n", ret); + return ret; + } + available = PAGE_SIZE; read_size = min(count, available); *ppos += read_size; @@ -77,6 +121,292 @@ static const struct file_operations htmdump_fops = { .open = simple_open, }; +static int htmconfigure_set(void *data, u64 val) +{ + long rc, ret; + unsigned long param1 = -1, param2 = -1; + + /* + * value as 1 : configure HTM. + * value as 0 : deconfigure HTM. Return -EINVAL for + * other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm configure (H_HTM_OP_CONFIGURE) + * - If htmflags is set, param1 and param2 will be -1 + * which is an indicator to use default htm mode reg mask + * and htm mode reg value. + * - last three values are unused, hence set to zero + */ + if (!htmflags) { + param1 = 0; + param2 = 0; + } + + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CONFIGURE, param1, param2, 0); + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm deconfigure (H_HTM_OP_DECONFIGURE) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DECONFIGURE, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmconfigure if operation succeeds */ + htmconfigure = val; + + return 0; +} + +static int htmconfigure_get(void *data, u64 *val) +{ + *val = htmconfigure; + return 0; +} + +static int htmstart_set(void *data, u64 val) +{ + long rc, ret; + + /* + * value as 1: start HTM + * value as 0: stop HTM + * Return -EINVAL for other values. + */ + if (val == HTM_ENABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_START) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_START, 0, 0, 0); + + } else if (val == HTM_DISABLE) { + /* + * Invoke H_HTM call with: + * - operation as htm stop (H_HTM_OP_STOP) + * - last three values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STOP, 0, 0, 0); + } else + return -EINVAL; + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed, returning %ld\n", ret); + return ret; + } + + /* Set htmstart if H_HTM_OP_START/H_HTM_OP_STOP operation succeeds */ + htmstart = val; + + return 0; +} + +static int htmstart_get(void *data, u64 *val) +{ + *val = htmstart; + return 0; +} + +static ssize_t htmstatus_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_status_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + int htmstatus_flag; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_STATUS, virt_to_phys(htm_status_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_STATUS, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each nest htm status + * entry is 0x6 bytes where each core htm status entry is + * 0x8 bytes. + * So total count to copy is: + * 32 bytes (for first 7 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_status_buf + 0x10; + if (htmtype == 0x2) + htmstatus_flag = 0x8; + else + htmstatus_flag = 0x6; + to_copy = 32 + (be64_to_cpu(*num_entries) * htmstatus_flag); + return simple_read_from_buffer(ubuf, count, ppos, htm_status_buf, to_copy); +} + +static const struct file_operations htmstatus_fops = { + .llseek = NULL, + .read = htmstatus_read, + .open = simple_open, +}; + +static ssize_t htminfo_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_info_buf = filp->private_data; + long rc, ret; + u64 *num_entries; + u64 to_copy; + + /* + * Invoke H_HTM call with: + * - operation as htm status (H_HTM_OP_STATUS) + * - last three values as addr, size and offset + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_DUMP_SYSPROC_CONF, virt_to_phys(htm_info_buf), + PAGE_SIZE, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_DUMP_SYSPROC_CONF, returning %ld\n", ret); + return ret; + } + + /* + * HTM status buffer, start of buffer + 0x10 gives the + * number of HTM entries in the buffer. Each entry of processor + * is 16 bytes. + * + * So total count to copy is: + * 32 bytes (for first 5 fields) + (number of HTM entries * entry size) + */ + num_entries = htm_info_buf + 0x10; + to_copy = 32 + (be64_to_cpu(*num_entries) * 16); + return simple_read_from_buffer(ubuf, count, ppos, htm_info_buf, to_copy); +} + +static ssize_t htmcaps_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + void *htm_caps_buf = filp->private_data; + long rc, ret; + + /* + * Invoke H_HTM call with: + * - operation as htm capabilities (H_HTM_OP_CAPABILITIES) + * - last three values as addr, size (0x80 for Capabilities Output Buffer + * and zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_CAPABILITIES, virt_to_phys(htm_caps_buf), + 0x80, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_CAPABILITIES, returning %ld\n", ret); + return ret; + } + + return simple_read_from_buffer(ubuf, count, ppos, htm_caps_buf, 0x80); +} + +static const struct file_operations htminfo_fops = { + .llseek = NULL, + .read = htminfo_read, + .open = simple_open, +}; + +static const struct file_operations htmcaps_fops = { + .llseek = NULL, + .read = htmcaps_read, + .open = simple_open, +}; + +static int htmsetup_set(void *data, u64 val) +{ + long rc, ret; + + /* + * Input value: HTM buffer size in the power of 2 + * example: hex value 0x21 ( decimal: 33 ) is for + * 8GB + * Invoke H_HTM call with: + * - operation as htm start (H_HTM_OP_SETUP) + * - parameter 1 set to input value. + * - last two values are unused, hence set to zero + */ + rc = htm_hcall_wrapper(htmflags, nodeindex, nodalchipindex, coreindexonchip, + htmtype, H_HTM_OP_SETUP, val, 0, 0); + + ret = htm_return_check(rc); + if (ret <= 0) { + pr_debug("H_HTM hcall failed for op: H_HTM_OP_SETUP, returning %ld\n", ret); + return ret; + } + + /* Set htmsetup if H_HTM_OP_SETUP operation succeeds */ + htmsetup = val; + + return 0; +} + +static int htmsetup_get(void *data, u64 *val) +{ + *val = htmsetup; + return 0; +} + +static int htmflags_set(void *data, u64 val) +{ + /* + * Input value: + * Currently supported flag value is to enable/disable + * HTM buffer wrap. wrap is used along with "configure" + * to prevent HTM buffer from wrapping. + * Writing 1 will set noWrap while configuring HTM + */ + if (val == HTM_NOWRAP) + htmflags = H_HTM_FLAGS_NOWRAP; + else if (val == HTM_WRAP) + htmflags = 0; + else + return -EINVAL; + + return 0; +} + +static int htmflags_get(void *data, u64 *val) +{ + *val = htmflags; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(htmconfigure_fops, htmconfigure_get, htmconfigure_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmstart_fops, htmstart_get, htmstart_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmsetup_fops, htmsetup_get, htmsetup_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(htmflags_fops, htmflags_get, htmflags_set, "%llu\n"); + static int htmdump_init_debugfs(void) { htm_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); @@ -98,11 +428,50 @@ static int htmdump_init_debugfs(void) htmdump_debugfs_dir, &htmtype); debugfs_create_file("trace", 0400, htmdump_debugfs_dir, htm_buf, &htmdump_fops); + /* + * Debugfs interface files to control HTM operations: + */ + debugfs_create_file("htmconfigure", 0600, htmdump_debugfs_dir, NULL, &htmconfigure_fops); + debugfs_create_file("htmstart", 0600, htmdump_debugfs_dir, NULL, &htmstart_fops); + debugfs_create_file("htmsetup", 0600, htmdump_debugfs_dir, NULL, &htmsetup_fops); + debugfs_create_file("htmflags", 0600, htmdump_debugfs_dir, NULL, &htmflags_fops); + + /* Debugfs interface file to present status of HTM */ + htm_status_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_status_buf) { + pr_err("Failed to allocate htmstatus buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present System Processor Configuration */ + htm_info_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_info_buf) { + pr_err("Failed to allocate htm info buf\n"); + return -ENOMEM; + } + + /* Debugfs interface file to present HTM capabilities */ + htm_caps_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!htm_caps_buf) { + pr_err("Failed to allocate htm caps buf\n"); + return -ENOMEM; + } + + debugfs_create_file("htmstatus", 0400, htmdump_debugfs_dir, htm_status_buf, &htmstatus_fops); + debugfs_create_file("htminfo", 0400, htmdump_debugfs_dir, htm_info_buf, &htminfo_fops); + debugfs_create_file("htmcaps", 0400, htmdump_debugfs_dir, htm_caps_buf, &htmcaps_fops); + return 0; } static int __init htmdump_init(void) { + /* Disable on kvm guest */ + if (is_kvm_guest()) { + pr_info("htmdump not supported inside KVM guest\n"); + return -EOPNOTSUPP; + } + if (htmdump_init_debugfs()) return -ENOMEM; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index d6ebc19fb99c..eec333dd2e59 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -197,7 +197,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) static void tce_free_pSeries(struct iommu_table *tbl) { - if (!tbl->it_userspace) + if (tbl->it_userspace) tce_iommu_userspace_view_free(tbl); } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index f9d80111c322..ee1c8c6898a3 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -525,7 +525,12 @@ static struct msi_domain_info pseries_msi_domain_info = { static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { - __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + + if (dev->current_state == PCI_D0) + __pci_read_msi_msg(irq_data_get_msi_desc(data), msg); + else + get_cached_msi_msg(data->irq, msg); } static struct irq_chip pseries_msi_irq_chip = { @@ -628,7 +633,7 @@ static int __pseries_msi_allocate_domains(struct pci_controller *phb, return -ENOMEM; } - phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn), + phb->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(phb->dn), &pseries_msi_domain_info, phb->dev_domain); if (!phb->msi_domain) { diff --git a/arch/powerpc/platforms/pseries/papr-indices.c b/arch/powerpc/platforms/pseries/papr-indices.c new file mode 100644 index 000000000000..3c7545591c45 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-indices.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-indices: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-indices.h> +#include "papr-rtas-common.h" + +/* + * Function-specific return values for ibm,set-dynamic-indicator and + * ibm,get-dynamic-sensor-state RTAS calls. + * PAPR+ v2.13 7.3.18 and 7.3.19. + */ +#define RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR -3 + +/** + * struct rtas_get_indices_params - Parameters (in and out) for + * ibm,get-indices. + * @is_sensor: In: Caller-provided whether sensor or indicator. + * @indice_type:In: Caller-provided indice (sensor or indicator) token + * @work_area: In: Caller-provided work area buffer for results. + * @next: In: Sequence number. Out: Next sequence number. + * @status: Out: RTAS call status. + */ +struct rtas_get_indices_params { + u8 is_sensor; + u32 indice_type; + struct rtas_work_area *work_area; + u32 next; + s32 status; +}; + +/* + * rtas_ibm_get_indices() - Call ibm,get-indices to fill a work area buffer. + * @params: See &struct rtas_ibm_get_indices_params. + * + * Calls ibm,get-indices until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_ibm_get_indices() multiple times + * to retrieve all indices data for the provided indice type. Only one + * sequence should be in progress at any time; starting a new sequence + * will disrupt any sequence already in progress. Serialization of + * indices retrieval sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_ibm_get_indices(struct rtas_get_indices_params *params) +{ + struct rtas_work_area *work_area = params->work_area; + const s32 token = rtas_function_token(RTAS_FN_IBM_GET_INDICES); + u32 rets; + s32 fwrc; + int ret; + + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_get_indices_lock); + + do { + fwrc = rtas_call(token, 5, 2, &rets, params->is_sensor, + params->indice_type, + rtas_work_area_phys(work_area), + rtas_work_area_size(work_area), + params->next); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: /* Indicator type is not supported */ + ret = -EINVAL; + break; + case RTAS_SEQ_START_OVER: + ret = -EAGAIN; + pr_info_ratelimited("Indices changed during retrieval, retrying\n"); + params->next = 1; + break; + case RTAS_SEQ_MORE_DATA: + params->next = rets; + ret = 0; + break; + case RTAS_SEQ_COMPLETE: + params->next = 0; + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-indices status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal indices sequence APIs. A sequence is a series of calls to + * ibm,get-indices for a given location code. The sequence ends when + * an error is encountered or all indices for the input has been + * returned. + */ + +/* + * indices_sequence_begin() - Begin a indices retrieval sequence. + * + * Context: May sleep. + */ +static void indices_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_get_indices_lock); + param->work_area = rtas_work_area_alloc(RTAS_GET_INDICES_BUF_SIZE); + param->next = 1; + param->status = 0; +} + +/* + * indices_sequence_end() - Finalize a indices retrieval sequence. + * + * Releases resources obtained by indices_sequence_begin(). + */ +static void indices_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_get_indices_params *param; + + param = (struct rtas_get_indices_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_get_indices_lock); +} + +/* + * Work function to be passed to papr_rtas_blob_generate(). + * + * ibm,get-indices RTAS call fills the work area with the certain + * format but does not return the bytes written in the buffer. So + * instead of kernel parsing this work area to determine the buffer + * length, copy the complete work area (RTAS_GET_INDICES_BUF_SIZE) + * to the blob and let the user space to obtain the data. + * Means RTAS_GET_INDICES_BUF_SIZE data will be returned for each + * read(). + */ + +static const char *indices_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_get_indices_params *p; + bool init_state; + + p = (struct rtas_get_indices_params *)seq->params; + init_state = (p->next == 1) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_indices(p))) + return NULL; + + *len = RTAS_GET_INDICES_BUF_SIZE; + return rtas_work_area_raw_buf(p->work_area); +} + +/* + * papr_indices_handle_read - returns indices blob data to the user space + * + * ibm,get-indices RTAS call fills the work area with the certian + * format but does not return the bytes written in the buffer and + * copied RTAS_GET_INDICES_BUF_SIZE data to the blob for each RTAS + * call. So send RTAS_GET_INDICES_BUF_SIZE buffer to the user space + * for each read(). + */ +static ssize_t papr_indices_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* we should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + if (size < RTAS_GET_INDICES_BUF_SIZE) { + pr_err_once("Invalid buffer length %ld, expect %d\n", + size, RTAS_GET_INDICES_BUF_SIZE); + return -EINVAL; + } else if (size > RTAS_GET_INDICES_BUF_SIZE) + size = RTAS_GET_INDICES_BUF_SIZE; + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +static const struct file_operations papr_indices_handle_ops = { + .read = papr_indices_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/* + * papr_indices_create_handle() - Create a fd-based handle for reading + * indices data + * @ubuf: Input parameters to RTAS call such as whether sensor or indicator + * and indice type in user memory + * + * Handler for PAPR_INDICES_IOC_GET ioctl command. Validates @ubuf + * and instantiates an immutable indices "blob" for it. The blob is + * attached to a file descriptor for reading by user space. The memory + * backing the blob is freed when the file is released. + * + * The entire requested indices is retrieved by this call and all + * necessary RTAS interactions are performed before returning the fd + * to user space. This keeps the read handler simple and ensures that + * the kernel can prevent interleaving of ibm,get-indices call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_indices_create_handle(struct papr_indices_io_block __user *ubuf) +{ + struct papr_rtas_sequence seq = {}; + struct rtas_get_indices_params params = {}; + int fd; + + if (get_user(params.is_sensor, &ubuf->indices.is_sensor)) + return -EFAULT; + + if (get_user(params.indice_type, &ubuf->indices.indice_type)) + return -EFAULT; + + seq = (struct papr_rtas_sequence) { + .begin = indices_sequence_begin, + .end = indices_sequence_end, + .work = indices_sequence_fill_work_area, + }; + + seq.params = ¶ms; + fd = papr_rtas_setup_file_interface(&seq, + &papr_indices_handle_ops, "[papr-indices]"); + + return fd; +} + +/* + * Create work area with the input parameters. This function is used + * for both ibm,set-dynamic-indicator and ibm,get-dynamic-sensor-state + * RTAS Calls. + */ +static struct rtas_work_area * +papr_dynamic_indice_buf_from_user(struct papr_indices_io_block __user *ubuf, + struct papr_indices_io_block *kbuf) +{ + struct rtas_work_area *work_area; + u32 length; + __be32 len_be; + + if (copy_from_user(kbuf, ubuf, sizeof(*kbuf))) + return ERR_PTR(-EFAULT); + + + if (!string_is_terminated(kbuf->dynamic_param.location_code_str, + ARRAY_SIZE(kbuf->dynamic_param.location_code_str))) + return ERR_PTR(-EINVAL); + + /* + * The input data in the work area should be as follows: + * - 32-bit integer length of the location code string, + * including NULL. + * - Location code string, NULL terminated, identifying the + * token (sensor or indicator). + * PAPR 2.13 - R1–7.3.18–5 ibm,set-dynamic-indicator + * - R1–7.3.19–5 ibm,get-dynamic-sensor-state + */ + /* + * Length that user space passed should also include NULL + * terminator. + */ + length = strlen(kbuf->dynamic_param.location_code_str) + 1; + if (length > LOC_CODE_SIZE) + return ERR_PTR(-EINVAL); + + len_be = cpu_to_be32(length); + + work_area = rtas_work_area_alloc(LOC_CODE_SIZE + sizeof(u32)); + memcpy(rtas_work_area_raw_buf(work_area), &len_be, sizeof(u32)); + memcpy((rtas_work_area_raw_buf(work_area) + sizeof(u32)), + &kbuf->dynamic_param.location_code_str, length); + + return work_area; +} + +/** + * papr_dynamic_indicator_ioc_set - ibm,set-dynamic-indicator RTAS Call + * PAPR 2.13 7.3.18 + * + * @ubuf: Input parameters to RTAS call such as indicator token and + * new state. + * + * Returns success or -errno. + */ +static long papr_dynamic_indicator_ioc_set(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + + token = rtas_function_token(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_set_dynamic_indicator_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 3, 1, NULL, + kbuf.dynamic_param.token, + kbuf.dynamic_param.state, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,set-dynamic-indicator result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_set_dynamic_indicator_lock); + return ret; +} + +/** + * papr_dynamic_sensor_ioc_get - ibm,get-dynamic-sensor-state RTAS Call + * PAPR 2.13 7.3.19 + * + * @ubuf: Input parameters to RTAS call such as sensor token + * Copies the state in user space buffer. + * + * + * Returns success or -errno. + */ + +static long papr_dynamic_sensor_ioc_get(struct papr_indices_io_block __user *ubuf) +{ + struct papr_indices_io_block kbuf; + struct rtas_work_area *work_area; + s32 fwrc, token, ret; + u32 rets; + + token = rtas_function_token(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + mutex_lock(&rtas_ibm_get_dynamic_sensor_state_lock); + work_area = papr_dynamic_indice_buf_from_user(ubuf, &kbuf); + if (IS_ERR(work_area)) { + ret = PTR_ERR(work_area); + goto out; + } + + do { + fwrc = rtas_call(token, 2, 2, &rets, + kbuf.dynamic_param.token, + rtas_work_area_phys(work_area)); + } while (rtas_busy_delay(fwrc)); + + rtas_work_area_free(work_area); + + switch (fwrc) { + case RTAS_SUCCESS: + if (put_user(rets, &ubuf->dynamic_param.state)) + ret = -EFAULT; + else + ret = 0; + break; + case RTAS_IBM_DYNAMIC_INDICE_NO_INDICATOR: /* No such indicator */ + ret = -EOPNOTSUPP; + break; + default: + pr_err("unexpected ibm,get-dynamic-sensor result %d\n", + fwrc); + fallthrough; + case RTAS_HARDWARE_ERROR: /* Hardware/platform error */ + ret = -EIO; + break; + } + +out: + mutex_unlock(&rtas_ibm_get_dynamic_sensor_state_lock); + return ret; +} + +/* + * Top-level ioctl handler for /dev/papr-indices. + */ +static long papr_indices_dev_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_INDICES_IOC_GET: + ret = papr_indices_create_handle(argp); + break; + case PAPR_DYNAMIC_SENSOR_IOC_GET: + ret = papr_dynamic_sensor_ioc_get(argp); + break; + case PAPR_DYNAMIC_INDICATOR_IOC_SET: + if (filp->f_mode & FMODE_WRITE) + ret = papr_dynamic_indicator_ioc_set(argp); + else + ret = -EBADF; + break; + default: + ret = -ENOIOCTLCMD; + break; + } + + return ret; +} + +static const struct file_operations papr_indices_ops = { + .unlocked_ioctl = papr_indices_dev_ioctl, +}; + +static struct miscdevice papr_indices_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-indices", + .fops = &papr_indices_ops, +}; + +static __init int papr_indices_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_GET_INDICES)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_SET_DYNAMIC_INDICATOR)) + return -ENODEV; + + if (!rtas_function_implemented(RTAS_FN_IBM_GET_DYNAMIC_SENSOR_STATE)) + return -ENODEV; + + return misc_register(&papr_indices_dev); +} +machine_device_initcall(pseries, papr_indices_init); diff --git a/arch/powerpc/platforms/pseries/papr-phy-attest.c b/arch/powerpc/platforms/pseries/papr-phy-attest.c new file mode 100644 index 000000000000..1907f2411567 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-phy-attest.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-phy-attest: " fmt + +#include <linux/build_bug.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/lockdep.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/string_helpers.h> +#include <linux/uaccess.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-physical-attestation.h> +#include "papr-rtas-common.h" + +/** + * struct rtas_phy_attest_params - Parameters (in and out) for + * ibm,physical-attestation. + * + * @cmd: In: Caller-provided attestation command buffer. Must be + * RTAS-addressable. + * @work_area: In: Caller-provided work area buffer for attestation + * command structure + * Out: Caller-provided work area buffer for the response + * @cmd_len: In: Caller-provided attestation command structure + * length + * @sequence: In: Sequence number. Out: Next sequence number. + * @written: Out: Bytes written by ibm,physical-attestation to + * @work_area. + * @status: Out: RTAS call status. + */ +struct rtas_phy_attest_params { + struct papr_phy_attest_io_block cmd; + struct rtas_work_area *work_area; + u32 cmd_len; + u32 sequence; + u32 written; + s32 status; +}; + +/** + * rtas_physical_attestation() - Call ibm,physical-attestation to + * fill a work area buffer. + * @params: See &struct rtas_phy_attest_params. + * + * Calls ibm,physical-attestation until it errors or successfully + * deposits data into the supplied work area. Handles RTAS retry + * statuses. Maps RTAS error statuses to reasonable errno values. + * + * The caller is expected to invoke rtas_physical_attestation() + * multiple times to retrieve all the data for the provided + * attestation command. Only one sequence should be in progress at + * any time; starting a new sequence will disrupt any sequence + * already in progress. Serialization of attestation retrieval + * sequences is the responsibility of the caller. + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 otherwise. + */ +static int rtas_physical_attestation(struct rtas_phy_attest_params *params) +{ + struct rtas_work_area *work_area; + s32 fwrc, token; + u32 rets[2]; + int ret; + + work_area = params->work_area; + token = rtas_function_token(RTAS_FN_IBM_PHYSICAL_ATTESTATION); + if (token == RTAS_UNKNOWN_SERVICE) + return -ENOENT; + + lockdep_assert_held(&rtas_ibm_physical_attestation_lock); + + do { + fwrc = rtas_call(token, 3, 3, rets, + rtas_work_area_phys(work_area), + params->cmd_len, + params->sequence); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_INVALID_PARAMETER: + ret = -EINVAL; + break; + case RTAS_SEQ_MORE_DATA: + params->sequence = rets[0]; + fallthrough; + case RTAS_SEQ_COMPLETE: + params->written = rets[1]; + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(params->written > rtas_work_area_size(work_area), + "possible write beyond end of work area")) + ret = -EFAULT; + else + ret = 0; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,get-phy_attest status %d\n", fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Internal physical-attestation sequence APIs. A physical-attestation + * sequence is a series of calls to get ibm,physical-attestation + * for a given attestation command. The sequence ends when an error + * is encountered or all data for the attestation command has been + * returned. + */ + +/** + * phy_attest_sequence_begin() - Begin a response data for attestation + * command retrieval sequence. + * @seq: user specified parameters for RTAS call from seq struct. + * + * Context: May sleep. + */ +static void phy_attest_sequence_begin(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + /* + * We could allocate the work area before acquiring the + * function lock, but that would allow concurrent requests to + * exhaust the limited work area pool for no benefit. So + * allocate the work area under the lock. + */ + mutex_lock(&rtas_ibm_physical_attestation_lock); + param = (struct rtas_phy_attest_params *)seq->params; + param->work_area = rtas_work_area_alloc(SZ_4K); + memcpy(rtas_work_area_raw_buf(param->work_area), ¶m->cmd, + param->cmd_len); + param->sequence = 1; + param->status = 0; +} + +/** + * phy_attest_sequence_end() - Finalize a attestation command + * response retrieval sequence. + * @seq: Sequence state. + * + * Releases resources obtained by phy_attest_sequence_begin(). + */ +static void phy_attest_sequence_end(struct papr_rtas_sequence *seq) +{ + struct rtas_phy_attest_params *param; + + param = (struct rtas_phy_attest_params *)seq->params; + rtas_work_area_free(param->work_area); + mutex_unlock(&rtas_ibm_physical_attestation_lock); + kfree(param); +} + +/* + * Generator function to be passed to papr_rtas_blob_generate(). + */ +static const char *phy_attest_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) +{ + struct rtas_phy_attest_params *p; + bool init_state; + + p = (struct rtas_phy_attest_params *)seq->params; + init_state = (p->written == 0) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) + return NULL; + if (papr_rtas_sequence_set_err(seq, rtas_physical_attestation(p))) + return NULL; + *len = p->written; + return rtas_work_area_raw_buf(p->work_area); +} + +static const struct file_operations papr_phy_attest_handle_ops = { + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, +}; + +/** + * papr_phy_attest_create_handle() - Create a fd-based handle for + * reading the response for the given attestation command. + * @ulc: Attestation command in user memory; defines the scope of + * data for the attestation command to retrieve. + * + * Handler for PAPR_PHYSICAL_ATTESTATION_IOC_CREATE_HANDLE ioctl + * command. Validates @ulc and instantiates an immutable response + * "blob" for attestation command. The blob is attached to a file + * descriptor for reading by user space. The memory backing the blob + * is freed when the file is released. + * + * The entire requested response buffer for the attestation command + * retrieved by this call and all necessary RTAS interactions are + * performed before returning the fd to user space. This keeps the + * read handler simple and ensures that kernel can prevent + * interleaving ibm,physical-attestation call sequences. + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_phy_attest_create_handle(struct papr_phy_attest_io_block __user *ulc) +{ + struct rtas_phy_attest_params *params; + struct papr_rtas_sequence seq = {}; + int fd; + + /* + * Freed in phy_attest_sequence_end(). + */ + params = kzalloc(sizeof(*params), GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + if (copy_from_user(¶ms->cmd, ulc, + sizeof(struct papr_phy_attest_io_block))) + return -EFAULT; + + params->cmd_len = be32_to_cpu(params->cmd.length); + seq = (struct papr_rtas_sequence) { + .begin = phy_attest_sequence_begin, + .end = phy_attest_sequence_end, + .work = phy_attest_sequence_fill_work_area, + }; + + seq.params = (void *)params; + + fd = papr_rtas_setup_file_interface(&seq, + &papr_phy_attest_handle_ops, + "[papr-physical-attestation]"); + + return fd; +} + +/* + * Top-level ioctl handler for /dev/papr-physical-attestation. + */ +static long papr_phy_attest_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (__force void __user *)arg; + long ret; + + switch (ioctl) { + case PAPR_PHY_ATTEST_IOC_HANDLE: + ret = papr_phy_attest_create_handle(argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_phy_attest_ops = { + .unlocked_ioctl = papr_phy_attest_dev_ioctl, +}; + +static struct miscdevice papr_phy_attest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-physical-attestation", + .fops = &papr_phy_attest_ops, +}; + +static __init int papr_phy_attest_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PHYSICAL_ATTESTATION)) + return -ENODEV; + + return misc_register(&papr_phy_attest_dev); +} +machine_device_initcall(pseries, papr_phy_attest_init); diff --git a/arch/powerpc/platforms/pseries/papr-platform-dump.c b/arch/powerpc/platforms/pseries/papr-platform-dump.c new file mode 100644 index 000000000000..f8d55eccdb6b --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-platform-dump.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-platform-dump: " fmt + +#include <linux/anon_inodes.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <asm/machdep.h> +#include <asm/rtas-work-area.h> +#include <asm/rtas.h> +#include <uapi/asm/papr-platform-dump.h> + +/* + * Function-specific return values for ibm,platform-dump, derived from + * PAPR+ v2.13 7.3.3.4.1 "ibm,platform-dump RTAS Call". + */ +#define RTAS_IBM_PLATFORM_DUMP_COMPLETE 0 /* Complete dump retrieved. */ +#define RTAS_IBM_PLATFORM_DUMP_CONTINUE 1 /* Continue dump */ +#define RTAS_NOT_AUTHORIZED -9002 /* Not Authorized */ + +#define RTAS_IBM_PLATFORM_DUMP_START 2 /* Linux status to start dump */ + +/** + * struct ibm_platform_dump_params - Parameters (in and out) for + * ibm,platform-dump + * @work_area: In: work area buffer for results. + * @buf_length: In: work area buffer length in bytes + * @dump_tag_hi: In: Most-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @dump_tag_lo: In: Least-significant 32 bits of a Dump_Tag representing + * an id of the dump being processed. + * @sequence_hi: In: Sequence number in most-significant 32 bits. + * Out: Next sequence number in most-significant 32 bits. + * @sequence_lo: In: Sequence number in Least-significant 32 bits + * Out: Next sequence number in Least-significant 32 bits. + * @bytes_ret_hi: Out: Bytes written in most-significant 32 bits. + * @bytes_ret_lo: Out: Bytes written in Least-significant 32 bits. + * @status: Out: RTAS call status. + * @list: Maintain the list of dumps are in progress. Can + * retrieve multiple dumps with different dump IDs at + * the same time but not with the same dump ID. This list + * is used to determine whether the dump for the same ID + * is in progress. + */ +struct ibm_platform_dump_params { + struct rtas_work_area *work_area; + u32 buf_length; + u32 dump_tag_hi; + u32 dump_tag_lo; + u32 sequence_hi; + u32 sequence_lo; + u32 bytes_ret_hi; + u32 bytes_ret_lo; + s32 status; + struct list_head list; +}; + +/* + * Multiple dumps with different dump IDs can be retrieved at the same + * time, but not with dame dump ID. platform_dump_list_mutex and + * platform_dump_list are used to prevent this behavior. + */ +static DEFINE_MUTEX(platform_dump_list_mutex); +static LIST_HEAD(platform_dump_list); + +/** + * rtas_ibm_platform_dump() - Call ibm,platform-dump to fill a work area + * buffer. + * @params: See &struct ibm_platform_dump_params. + * @buf_addr: Address of dump buffer (work_area) + * @buf_length: Length of the buffer in bytes (min. 1024) + * + * Calls ibm,platform-dump until it errors or successfully deposits data + * into the supplied work area. Handles RTAS retry statuses. Maps RTAS + * error statuses to reasonable errno values. + * + * Can request multiple dumps with different dump IDs at the same time, + * but not with the same dump ID which is prevented with the check in + * the ioctl code (papr_platform_dump_create_handle()). + * + * The caller should inspect @params.status to determine whether more + * calls are needed to complete the sequence. + * + * Context: May sleep. + * Return: -ve on error, 0 for dump complete and 1 for continue dump + */ +static int rtas_ibm_platform_dump(struct ibm_platform_dump_params *params, + phys_addr_t buf_addr, u32 buf_length) +{ + u32 rets[4]; + s32 fwrc; + int ret = 0; + + do { + fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_PLATFORM_DUMP), + 6, 5, + rets, + params->dump_tag_hi, + params->dump_tag_lo, + params->sequence_hi, + params->sequence_lo, + buf_addr, + buf_length); + } while (rtas_busy_delay(fwrc)); + + switch (fwrc) { + case RTAS_HARDWARE_ERROR: + ret = -EIO; + break; + case RTAS_NOT_AUTHORIZED: + ret = -EPERM; + break; + case RTAS_IBM_PLATFORM_DUMP_CONTINUE: + case RTAS_IBM_PLATFORM_DUMP_COMPLETE: + params->sequence_hi = rets[0]; + params->sequence_lo = rets[1]; + params->bytes_ret_hi = rets[2]; + params->bytes_ret_lo = rets[3]; + break; + default: + ret = -EIO; + pr_err_ratelimited("unexpected ibm,platform-dump status %d\n", + fwrc); + break; + } + + params->status = fwrc; + return ret; +} + +/* + * Platform dump is used with multiple RTAS calls to retrieve the + * complete dump for the provided dump ID. Once the complete dump is + * retrieved, the hypervisor returns dump complete status (0) for the + * last RTAS call and expects the caller issues one more call with + * NULL buffer to invalidate the dump so that the hypervisor can remove + * the dump. + * + * After the specific dump is invalidated in the hypervisor, expect the + * dump complete status for the new sequence - the user space initiates + * new request for the same dump ID. + */ +static ssize_t papr_platform_dump_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + struct ibm_platform_dump_params *params = file->private_data; + u64 total_bytes; + s32 fwrc; + + /* + * Dump already completed with the previous read calls. + * In case if the user space issues further reads, returns + * -EINVAL. + */ + if (!params->buf_length) { + pr_warn_once("Platform dump completed for dump ID %llu\n", + (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo)); + return -EINVAL; + } + + /* + * The hypervisor returns status 0 if no more data available to + * download. The dump will be invalidated with ioctl (see below). + */ + if (params->status == RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + params->buf_length = 0; + /* + * Returns 0 to the user space so that user + * space read stops. + */ + return 0; + } + + if (size < SZ_1K) { + pr_err_once("Buffer length should be minimum 1024 bytes\n"); + return -EINVAL; + } else if (size > params->buf_length) { + /* + * Allocate 4K work area. So if the user requests > 4K, + * resize the buffer length. + */ + size = params->buf_length; + } + + fwrc = rtas_ibm_platform_dump(params, + rtas_work_area_phys(params->work_area), + size); + if (fwrc < 0) + return fwrc; + + total_bytes = (u64) (((u64)params->bytes_ret_hi << 32) | + params->bytes_ret_lo); + + /* + * Kernel or firmware bug, do not continue. + */ + if (WARN(total_bytes > size, "possible write beyond end of work area")) + return -EFAULT; + + if (copy_to_user(buf, rtas_work_area_raw_buf(params->work_area), + total_bytes)) + return -EFAULT; + + return total_bytes; +} + +static int papr_platform_dump_handle_release(struct inode *inode, + struct file *file) +{ + struct ibm_platform_dump_params *params = file->private_data; + + if (params->work_area) + rtas_work_area_free(params->work_area); + + mutex_lock(&platform_dump_list_mutex); + list_del(¶ms->list); + mutex_unlock(&platform_dump_list_mutex); + + kfree(params); + file->private_data = NULL; + return 0; +} + +/* + * This ioctl is used to invalidate the dump assuming the user space + * issue this ioctl after obtain the complete dump. + * Issue the last RTAS call with NULL buffer to invalidate the dump + * which means dump will be freed in the hypervisor. + */ +static long papr_platform_dump_invalidate_ioctl(struct file *file, + unsigned int ioctl, unsigned long arg) +{ + struct ibm_platform_dump_params *params; + u64 __user *argp = (void __user *)arg; + u64 param_dump_tag, dump_tag; + + if (ioctl != PAPR_PLATFORM_DUMP_IOC_INVALIDATE) + return -ENOIOCTLCMD; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + /* + * private_data is freeded during release(), so should not + * happen. + */ + if (!file->private_data) { + pr_err("No valid FD to invalidate dump for the ID(%llu)\n", + dump_tag); + return -EINVAL; + } + + params = file->private_data; + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag != param_dump_tag) { + pr_err("Invalid dump ID(%llu) to invalidate dump\n", + dump_tag); + return -EINVAL; + } + + if (params->status != RTAS_IBM_PLATFORM_DUMP_COMPLETE) { + pr_err("Platform dump is not complete, but requested " + "to invalidate dump for ID(%llu)\n", + dump_tag); + return -EINPROGRESS; + } + + return rtas_ibm_platform_dump(params, 0, 0); +} + +static const struct file_operations papr_platform_dump_handle_ops = { + .read = papr_platform_dump_handle_read, + .release = papr_platform_dump_handle_release, + .unlocked_ioctl = papr_platform_dump_invalidate_ioctl, +}; + +/** + * papr_platform_dump_create_handle() - Create a fd-based handle for + * reading platform dump + * + * Handler for PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE ioctl command + * Allocates RTAS parameter struct and work area and attached to the + * file descriptor for reading by user space with the multiple RTAS + * calls until the dump is completed. This memory allocation is freed + * when the file is released. + * + * Multiple dump requests with different IDs are allowed at the same + * time, but not with the same dump ID. So if the user space is + * already opened file descriptor for the specific dump ID, return + * -EALREADY for the next request. + * + * @dump_tag: Dump ID for the dump requested to retrieve from the + * hypervisor + * + * Return: The installed fd number if successful, -ve errno otherwise. + */ +static long papr_platform_dump_create_handle(u64 dump_tag) +{ + struct ibm_platform_dump_params *params; + u64 param_dump_tag; + struct file *file; + long err; + int fd; + + /* + * Return failure if the user space is already opened FD for + * the specific dump ID. This check will prevent multiple dump + * requests for the same dump ID at the same time. Generally + * should not expect this, but in case. + */ + list_for_each_entry(params, &platform_dump_list, list) { + param_dump_tag = (u64) (((u64)params->dump_tag_hi << 32) | + params->dump_tag_lo); + if (dump_tag == param_dump_tag) { + pr_err("Platform dump for ID(%llu) is already in progress\n", + dump_tag); + return -EALREADY; + } + } + + params = kzalloc(sizeof(struct ibm_platform_dump_params), + GFP_KERNEL_ACCOUNT); + if (!params) + return -ENOMEM; + + params->work_area = rtas_work_area_alloc(SZ_4K); + params->buf_length = SZ_4K; + params->dump_tag_hi = (u32)(dump_tag >> 32); + params->dump_tag_lo = (u32)(dump_tag & 0x00000000ffffffffULL); + params->status = RTAS_IBM_PLATFORM_DUMP_START; + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = fd; + goto free_area; + } + + file = anon_inode_getfile_fmode("[papr-platform-dump]", + &papr_platform_dump_handle_ops, + (void *)params, O_RDONLY, + FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + err = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + + list_add(¶ms->list, &platform_dump_list); + + pr_info("%s (%d) initiated platform dump for dump tag %llu\n", + current->comm, current->pid, dump_tag); + return fd; +put_fd: + put_unused_fd(fd); +free_area: + rtas_work_area_free(params->work_area); + kfree(params); + return err; +} + +/* + * Top-level ioctl handler for /dev/papr-platform-dump. + */ +static long papr_platform_dump_dev_ioctl(struct file *filp, + unsigned int ioctl, + unsigned long arg) +{ + u64 __user *argp = (void __user *)arg; + u64 dump_tag; + long ret; + + if (get_user(dump_tag, argp)) + return -EFAULT; + + switch (ioctl) { + case PAPR_PLATFORM_DUMP_IOC_CREATE_HANDLE: + mutex_lock(&platform_dump_list_mutex); + ret = papr_platform_dump_create_handle(dump_tag); + mutex_unlock(&platform_dump_list_mutex); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} + +static const struct file_operations papr_platform_dump_ops = { + .unlocked_ioctl = papr_platform_dump_dev_ioctl, +}; + +static struct miscdevice papr_platform_dump_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "papr-platform-dump", + .fops = &papr_platform_dump_ops, +}; + +static __init int papr_platform_dump_init(void) +{ + if (!rtas_function_implemented(RTAS_FN_IBM_PLATFORM_DUMP)) + return -ENODEV; + + return misc_register(&papr_platform_dump_dev); +} +machine_device_initcall(pseries, papr_platform_dump_init); diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.c b/arch/powerpc/platforms/pseries/papr-rtas-common.c new file mode 100644 index 000000000000..33c606e3378a --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "papr-common: " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/sched/signal.h> +#include "papr-rtas-common.h" + +/* + * Sequence based RTAS HCALL has to issue multiple times to retrieve + * complete data from the hypervisor. For some of these RTAS calls, + * the OS should not interleave calls with different input until the + * sequence is completed. So data is collected for these calls during + * ioctl handle and export to user space with read() handle. + * This file provides common functions needed for such sequence based + * RTAS calls Ex: ibm,get-vpd and ibm,get-indices. + */ + +bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob) +{ + return blob->data && blob->len; +} + +void papr_rtas_blob_free(const struct papr_rtas_blob *blob) +{ + if (blob) { + kvfree(blob->data); + kfree(blob); + } +} + +/** + * papr_rtas_blob_extend() - Append data to a &struct papr_rtas_blob. + * @blob: The blob to extend. + * @data: The new data to append to @blob. + * @len: The length of @data. + * + * Context: May sleep. + * Return: -ENOMEM on allocation failure, 0 otherwise. + */ +static int papr_rtas_blob_extend(struct papr_rtas_blob *blob, + const char *data, size_t len) +{ + const size_t new_len = blob->len + len; + const size_t old_len = blob->len; + const char *old_ptr = blob->data; + char *new_ptr; + + new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT); + if (!new_ptr) + return -ENOMEM; + + memcpy(&new_ptr[old_len], data, len); + blob->data = new_ptr; + blob->len = new_len; + return 0; +} + +/** + * papr_rtas_blob_generate() - Construct a new &struct papr_rtas_blob. + * @seq: work function of the caller that is called to obtain + * data with the caller RTAS call. + * + * The @work callback is invoked until it returns NULL. @seq is + * passed to @work in its first argument on each call. When + * @work returns data, it should store the data length in its + * second argument. + * + * Context: May sleep. + * Return: A completely populated &struct papr_rtas_blob, or NULL on error. + */ +static const struct papr_rtas_blob * +papr_rtas_blob_generate(struct papr_rtas_sequence *seq) +{ + struct papr_rtas_blob *blob; + const char *buf; + size_t len; + int err = 0; + + blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); + if (!blob) + return NULL; + + if (!seq->work) + return ERR_PTR(-EINVAL); + + + while (err == 0 && (buf = seq->work(seq, &len))) + err = papr_rtas_blob_extend(blob, buf, len); + + if (err != 0 || !papr_rtas_blob_has_data(blob)) + goto free_blob; + + return blob; +free_blob: + papr_rtas_blob_free(blob); + return NULL; +} + +int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, int err) +{ + /* Preserve the first error recorded. */ + if (seq->error == 0) + seq->error = err; + + return seq->error; +} + +/* + * Higher-level retrieval code below. These functions use the + * papr_rtas_blob_* and sequence_* APIs defined above to create fd-based + * handles for consumption by user space. + */ + +/** + * papr_rtas_run_sequence() - Run a single retrieval sequence. + * @seq: Functions of the caller to complete the sequence + * + * Context: May sleep. Holds a mutex and an RTAS work area for its + * duration. Typically performs multiple sleepable slab + * allocations. + * + * Return: A populated &struct papr_rtas_blob on success. Encoded error + * pointer otherwise. + */ +static const struct papr_rtas_blob *papr_rtas_run_sequence(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + if (seq->begin) + seq->begin(seq); + + blob = papr_rtas_blob_generate(seq); + if (!blob) + papr_rtas_sequence_set_err(seq, -ENOMEM); + + if (seq->end) + seq->end(seq); + + + if (seq->error) { + papr_rtas_blob_free(blob); + return ERR_PTR(seq->error); + } + + return blob; +} + +/** + * papr_rtas_retrieve() - Return the data blob that is exposed to + * user space. + * @seq: RTAS call specific functions to be invoked until the + * sequence is completed. + * + * Run sequences against @param until a blob is successfully + * instantiated, or a hard error is encountered, or a fatal signal is + * pending. + * + * Context: May sleep. + * Return: A fully populated data blob when successful. Encoded error + * pointer otherwise. + */ +const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq) +{ + const struct papr_rtas_blob *blob; + + /* + * EAGAIN means the sequence returns error with a -4 (data + * changed and need to start the sequence) status from RTAS calls + * and we should attempt a new sequence. PAPR+ (v2.13 R1–7.3.20–5 + * - ibm,get-vpd, R1–7.3.17–6 - ibm,get-indices) indicates that + * this should be a transient condition, not something that + * happens continuously. But we'll stop trying on a fatal signal. + */ + do { + blob = papr_rtas_run_sequence(seq); + if (!IS_ERR(blob)) /* Success. */ + break; + if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ + break; + cond_resched(); + } while (!fatal_signal_pending(current)); + + return blob; +} + +/** + * papr_rtas_setup_file_interface - Complete the sequence and obtain + * the data and export to user space with fd-based handles. Then the + * user spave gets the data with read() handle. + * @seq: RTAS call specific functions to get the data. + * @fops: RTAS call specific file operations such as read(). + * @name: RTAS call specific char device node. + * + * Return: FD handle for consumption by user space + */ +long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, + char *name) +{ + const struct papr_rtas_blob *blob; + struct file *file; + long ret; + int fd; + + blob = papr_rtas_retrieve(seq); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto free_blob; + } + + file = anon_inode_getfile_fmode(name, fops, (void *)blob, + O_RDONLY, FMODE_LSEEK | FMODE_PREAD); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto put_fd; + } + + fd_install(fd, file); + return fd; + +put_fd: + put_unused_fd(fd); +free_blob: + papr_rtas_blob_free(blob); + return ret; +} + +/* + * papr_rtas_sequence_should_stop() - Determine whether RTAS retrieval + * sequence should continue. + * + * Examines the sequence error state and outputs of the last call to + * the specific RTAS to determine whether the sequence in progress + * should continue or stop. + * + * Return: True if the sequence has encountered an error or if all data + * for this sequence has been retrieved. False otherwise. + */ +bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state) +{ + bool done; + + if (seq->error) + return true; + + switch (status) { + case RTAS_SEQ_COMPLETE: + if (init_state) + done = false; /* Initial state. */ + else + done = true; /* All data consumed. */ + break; + case RTAS_SEQ_MORE_DATA: + done = false; /* More data available. */ + break; + default: + done = true; /* Error encountered. */ + break; + } + + return done; +} + +/* + * User space read to retrieve data for the corresponding RTAS call. + * papr_rtas_blob is filled with the data using the corresponding RTAS + * call sequence API. + */ +ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off) +{ + const struct papr_rtas_blob *blob = file->private_data; + + /* We should not instantiate a handle without any data attached. */ + if (!papr_rtas_blob_has_data(blob)) { + pr_err_once("handle without data\n"); + return -EIO; + } + + return simple_read_from_buffer(buf, size, off, blob->data, blob->len); +} + +int papr_rtas_common_handle_release(struct inode *inode, + struct file *file) +{ + const struct papr_rtas_blob *blob = file->private_data; + + papr_rtas_blob_free(blob); + + return 0; +} + +loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence) +{ + const struct papr_rtas_blob *blob = file->private_data; + + return fixed_size_llseek(file, off, whence, blob->len); +} diff --git a/arch/powerpc/platforms/pseries/papr-rtas-common.h b/arch/powerpc/platforms/pseries/papr-rtas-common.h new file mode 100644 index 000000000000..4ceabcaf4905 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr-rtas-common.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_POWERPC_PAPR_RTAS_COMMON_H +#define _ASM_POWERPC_PAPR_RTAS_COMMON_H + +#include <linux/types.h> + +/* + * Return codes for sequence based RTAS calls. + * Not listed under PAPR+ v2.13 7.2.8: "Return Codes". + * But defined in the specific section of each RTAS call. + */ +#define RTAS_SEQ_COMPLETE 0 /* All data has been retrieved. */ +#define RTAS_SEQ_MORE_DATA 1 /* More data is available */ +#define RTAS_SEQ_START_OVER -4 /* Data changed, restart call sequence. */ + +/* + * Internal "blob" APIs for accumulating RTAS call results into + * an immutable buffer to be attached to a file descriptor. + */ +struct papr_rtas_blob { + const char *data; + size_t len; +}; + +/** + * struct papr_sequence - State for managing a sequence of RTAS calls. + * @error: Shall be zero as long as the sequence has not encountered an error, + * -ve errno otherwise. Use papr_rtas_sequence_set_err() to update. + * @params: Parameter block to pass to rtas_*() calls. + * @begin: Work area allocation and initialize the needed parameter + * values passed to RTAS call + * @end: Free the allocated work area + * @work: Obtain data with RTAS call and invoke it until the sequence is + * completed. + * + */ +struct papr_rtas_sequence { + int error; + void *params; + void (*begin)(struct papr_rtas_sequence *seq); + void (*end)(struct papr_rtas_sequence *seq); + const char *(*work)(struct papr_rtas_sequence *seq, size_t *len); +}; + +extern bool papr_rtas_blob_has_data(const struct papr_rtas_blob *blob); +extern void papr_rtas_blob_free(const struct papr_rtas_blob *blob); +extern int papr_rtas_sequence_set_err(struct papr_rtas_sequence *seq, + int err); +extern const struct papr_rtas_blob *papr_rtas_retrieve(struct papr_rtas_sequence *seq); +extern long papr_rtas_setup_file_interface(struct papr_rtas_sequence *seq, + const struct file_operations *fops, char *name); +extern bool papr_rtas_sequence_should_stop(const struct papr_rtas_sequence *seq, + s32 status, bool init_state); +extern ssize_t papr_rtas_common_handle_read(struct file *file, + char __user *buf, size_t size, loff_t *off); +extern int papr_rtas_common_handle_release(struct inode *inode, + struct file *file); +extern loff_t papr_rtas_common_handle_seek(struct file *file, loff_t off, + int whence); +#endif /* _ASM_POWERPC_PAPR_RTAS_COMMON_H */ + diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c index c86950d7105a..f38c188fc4a1 100644 --- a/arch/powerpc/platforms/pseries/papr-vpd.c +++ b/arch/powerpc/platforms/pseries/papr-vpd.c @@ -2,7 +2,6 @@ #define pr_fmt(fmt) "papr-vpd: " fmt -#include <linux/anon_inodes.h> #include <linux/build_bug.h> #include <linux/file.h> #include <linux/fs.h> @@ -20,14 +19,7 @@ #include <asm/rtas-work-area.h> #include <asm/rtas.h> #include <uapi/asm/papr-vpd.h> - -/* - * Function-specific return values for ibm,get-vpd, derived from PAPR+ - * v2.13 7.3.20 "ibm,get-vpd RTAS Call". - */ -#define RTAS_IBM_GET_VPD_COMPLETE 0 /* All VPD has been retrieved. */ -#define RTAS_IBM_GET_VPD_MORE_DATA 1 /* More VPD is available. */ -#define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */ +#include "papr-rtas-common.h" /** * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd. @@ -91,13 +83,14 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) case RTAS_INVALID_PARAMETER: ret = -EINVAL; break; - case RTAS_IBM_GET_VPD_START_OVER: + case RTAS_SEQ_START_OVER: ret = -EAGAIN; + pr_info_ratelimited("VPD changed during retrieval, retrying\n"); break; - case RTAS_IBM_GET_VPD_MORE_DATA: + case RTAS_SEQ_MORE_DATA: params->sequence = rets[0]; fallthrough; - case RTAS_IBM_GET_VPD_COMPLETE: + case RTAS_SEQ_COMPLETE: params->written = rets[1]; /* * Kernel or firmware bug, do not continue. @@ -119,91 +112,6 @@ static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params) } /* - * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into - * an immutable buffer to be attached to a file descriptor. - */ -struct vpd_blob { - const char *data; - size_t len; -}; - -static bool vpd_blob_has_data(const struct vpd_blob *blob) -{ - return blob->data && blob->len; -} - -static void vpd_blob_free(const struct vpd_blob *blob) -{ - if (blob) { - kvfree(blob->data); - kfree(blob); - } -} - -/** - * vpd_blob_extend() - Append data to a &struct vpd_blob. - * @blob: The blob to extend. - * @data: The new data to append to @blob. - * @len: The length of @data. - * - * Context: May sleep. - * Return: -ENOMEM on allocation failure, 0 otherwise. - */ -static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len) -{ - const size_t new_len = blob->len + len; - const size_t old_len = blob->len; - const char *old_ptr = blob->data; - char *new_ptr; - - new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT); - if (!new_ptr) - return -ENOMEM; - - memcpy(&new_ptr[old_len], data, len); - blob->data = new_ptr; - blob->len = new_len; - return 0; -} - -/** - * vpd_blob_generate() - Construct a new &struct vpd_blob. - * @generator: Function that supplies the blob data. - * @arg: Context pointer supplied by caller, passed to @generator. - * - * The @generator callback is invoked until it returns NULL. @arg is - * passed to @generator in its first argument on each call. When - * @generator returns data, it should store the data length in its - * second argument. - * - * Context: May sleep. - * Return: A completely populated &struct vpd_blob, or NULL on error. - */ -static const struct vpd_blob * -vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg) -{ - struct vpd_blob *blob; - const char *buf; - size_t len; - int err = 0; - - blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT); - if (!blob) - return NULL; - - while (err == 0 && (buf = generator(arg, &len))) - err = vpd_blob_extend(blob, buf, len); - - if (err != 0 || !vpd_blob_has_data(blob)) - goto free_blob; - - return blob; -free_blob: - vpd_blob_free(blob); - return NULL; -} - -/* * Internal VPD sequence APIs. A VPD sequence is a series of calls to * ibm,get-vpd for a given location code. The sequence ends when an * error is encountered or all VPD for the location code has been @@ -211,30 +119,14 @@ free_blob: */ /** - * struct vpd_sequence - State for managing a VPD sequence. - * @error: Shall be zero as long as the sequence has not encountered an error, - * -ve errno otherwise. Use vpd_sequence_set_err() to update this. - * @params: Parameter block to pass to rtas_ibm_get_vpd(). - */ -struct vpd_sequence { - int error; - struct rtas_ibm_get_vpd_params params; -}; - -/** * vpd_sequence_begin() - Begin a VPD retrieval sequence. - * @seq: Uninitialized sequence state. - * @loc_code: Location code that defines the scope of the VPD to return. - * - * Initializes @seq with the resources necessary to carry out a VPD - * sequence. Callers must pass @seq to vpd_sequence_end() regardless - * of whether the sequence succeeds. + * @seq: vpd call parameters from sequence struct * * Context: May sleep. */ -static void vpd_sequence_begin(struct vpd_sequence *seq, - const struct papr_location_code *loc_code) +static void vpd_sequence_begin(struct papr_rtas_sequence *seq) { + struct rtas_ibm_get_vpd_params *vpd_params; /* * Use a static data structure for the location code passed to * RTAS to ensure it's in the RMA and avoid a separate work @@ -242,6 +134,7 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, */ static struct papr_location_code static_loc_code; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; /* * We could allocate the work area before acquiring the * function lock, but that would allow concurrent requests to @@ -249,14 +142,12 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, * allocate the work area under the lock. */ mutex_lock(&rtas_ibm_get_vpd_lock); - static_loc_code = *loc_code; - *seq = (struct vpd_sequence) { - .params = { - .work_area = rtas_work_area_alloc(SZ_4K), - .loc_code = &static_loc_code, - .sequence = 1, - }, - }; + static_loc_code = *(struct papr_location_code *)vpd_params->loc_code; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + vpd_params->work_area = rtas_work_area_alloc(SZ_4K); + vpd_params->loc_code = &static_loc_code; + vpd_params->sequence = 1; + vpd_params->status = 0; } /** @@ -265,180 +156,39 @@ static void vpd_sequence_begin(struct vpd_sequence *seq, * * Releases resources obtained by vpd_sequence_begin(). */ -static void vpd_sequence_end(struct vpd_sequence *seq) +static void vpd_sequence_end(struct papr_rtas_sequence *seq) { - rtas_work_area_free(seq->params.work_area); - mutex_unlock(&rtas_ibm_get_vpd_lock); -} - -/** - * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence - * should continue. - * @seq: VPD sequence state. - * - * Examines the sequence error state and outputs of the last call to - * ibm,get-vpd to determine whether the sequence in progress should - * continue or stop. - * - * Return: True if the sequence has encountered an error or if all VPD for - * this sequence has been retrieved. False otherwise. - */ -static bool vpd_sequence_should_stop(const struct vpd_sequence *seq) -{ - bool done; - - if (seq->error) - return true; + struct rtas_ibm_get_vpd_params *vpd_params; - switch (seq->params.status) { - case 0: - if (seq->params.written == 0) - done = false; /* Initial state. */ - else - done = true; /* All data consumed. */ - break; - case 1: - done = false; /* More data available. */ - break; - default: - done = true; /* Error encountered. */ - break; - } - - return done; -} - -static int vpd_sequence_set_err(struct vpd_sequence *seq, int err) -{ - /* Preserve the first error recorded. */ - if (seq->error == 0) - seq->error = err; - - return seq->error; + vpd_params = (struct rtas_ibm_get_vpd_params *)seq->params; + rtas_work_area_free(vpd_params->work_area); + mutex_unlock(&rtas_ibm_get_vpd_lock); } /* - * Generator function to be passed to vpd_blob_generate(). + * Generator function to be passed to papr_rtas_blob_generate(). */ -static const char *vpd_sequence_fill_work_area(void *arg, size_t *len) +static const char *vpd_sequence_fill_work_area(struct papr_rtas_sequence *seq, + size_t *len) { - struct vpd_sequence *seq = arg; - struct rtas_ibm_get_vpd_params *p = &seq->params; + struct rtas_ibm_get_vpd_params *p; + bool init_state; - if (vpd_sequence_should_stop(seq)) + p = (struct rtas_ibm_get_vpd_params *)seq->params; + init_state = (p->written == 0) ? true : false; + + if (papr_rtas_sequence_should_stop(seq, p->status, init_state)) return NULL; - if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p))) + if (papr_rtas_sequence_set_err(seq, rtas_ibm_get_vpd(p))) return NULL; *len = p->written; return rtas_work_area_raw_buf(p->work_area); } -/* - * Higher-level VPD retrieval code below. These functions use the - * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based - * VPD handles for consumption by user space. - */ - -/** - * papr_vpd_run_sequence() - Run a single VPD retrieval sequence. - * @loc_code: Location code that defines the scope of VPD to return. - * - * Context: May sleep. Holds a mutex and an RTAS work area for its - * duration. Typically performs multiple sleepable slab - * allocations. - * - * Return: A populated &struct vpd_blob on success. Encoded error - * pointer otherwise. - */ -static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code) -{ - const struct vpd_blob *blob; - struct vpd_sequence seq; - - vpd_sequence_begin(&seq, loc_code); - blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq); - if (!blob) - vpd_sequence_set_err(&seq, -ENOMEM); - vpd_sequence_end(&seq); - - if (seq.error) { - vpd_blob_free(blob); - return ERR_PTR(seq.error); - } - - return blob; -} - -/** - * papr_vpd_retrieve() - Return the VPD for a location code. - * @loc_code: Location code that defines the scope of VPD to return. - * - * Run VPD sequences against @loc_code until a blob is successfully - * instantiated, or a hard error is encountered, or a fatal signal is - * pending. - * - * Context: May sleep. - * Return: A fully populated VPD blob when successful. Encoded error - * pointer otherwise. - */ -static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code) -{ - const struct vpd_blob *blob; - - /* - * EAGAIN means the sequence errored with a -4 (VPD changed) - * status from ibm,get-vpd, and we should attempt a new - * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this - * should be a transient condition, not something that happens - * continuously. But we'll stop trying on a fatal signal. - */ - do { - blob = papr_vpd_run_sequence(loc_code); - if (!IS_ERR(blob)) /* Success. */ - break; - if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */ - break; - pr_info_ratelimited("VPD changed during retrieval, retrying\n"); - cond_resched(); - } while (!fatal_signal_pending(current)); - - return blob; -} - -static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off) -{ - const struct vpd_blob *blob = file->private_data; - - /* bug: we should not instantiate a handle without any data attached. */ - if (!vpd_blob_has_data(blob)) { - pr_err_once("handle without data\n"); - return -EIO; - } - - return simple_read_from_buffer(buf, size, off, blob->data, blob->len); -} - -static int papr_vpd_handle_release(struct inode *inode, struct file *file) -{ - const struct vpd_blob *blob = file->private_data; - - vpd_blob_free(blob); - - return 0; -} - -static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence) -{ - const struct vpd_blob *blob = file->private_data; - - return fixed_size_llseek(file, off, whence, blob->len); -} - - static const struct file_operations papr_vpd_handle_ops = { - .read = papr_vpd_handle_read, - .llseek = papr_vpd_handle_seek, - .release = papr_vpd_handle_release, + .read = papr_rtas_common_handle_read, + .llseek = papr_rtas_common_handle_seek, + .release = papr_rtas_common_handle_release, }; /** @@ -460,10 +210,9 @@ static const struct file_operations papr_vpd_handle_ops = { */ static long papr_vpd_create_handle(struct papr_location_code __user *ulc) { + struct rtas_ibm_get_vpd_params vpd_params = {}; + struct papr_rtas_sequence seq = {}; struct papr_location_code klc; - const struct vpd_blob *blob; - struct file *file; - long err; int fd; if (copy_from_user(&klc, ulc, sizeof(klc))) @@ -472,30 +221,19 @@ static long papr_vpd_create_handle(struct papr_location_code __user *ulc) if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str))) return -EINVAL; - blob = papr_vpd_retrieve(&klc); - if (IS_ERR(blob)) - return PTR_ERR(blob); + seq = (struct papr_rtas_sequence) { + .begin = vpd_sequence_begin, + .end = vpd_sequence_end, + .work = vpd_sequence_fill_work_area, + }; - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); - if (fd < 0) { - err = fd; - goto free_blob; - } + vpd_params.loc_code = &klc; + seq.params = (void *)&vpd_params; + + fd = papr_rtas_setup_file_interface(&seq, &papr_vpd_handle_ops, + "[papr-vpd]"); - file = anon_inode_getfile_fmode("[papr-vpd]", &papr_vpd_handle_ops, - (void *)blob, O_RDONLY, - FMODE_LSEEK | FMODE_PREAD); - if (IS_ERR(file)) { - err = PTR_ERR(file); - goto put_fd; - } - fd_install(fd, file); return fd; -put_fd: - put_unused_fd(fd); -free_blob: - vpd_blob_free(blob); - return err; } /* diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index e14493685fe8..4a59ed1d62ce 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -207,7 +207,7 @@ unsigned int cpm2_get_irq(void) if (irq == 0) return(-1); - return irq_linear_revmap(cpm2_pic_host, irq); + return irq_find_mapping(cpm2_pic_host, irq); } static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq, @@ -259,7 +259,8 @@ void cpm2_pic_init(struct device_node *node) out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); /* create a legacy host */ - cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL); + cpm2_pic_host = irq_domain_create_linear(of_fwnode_handle(node), 64, + &cpm2_pic_host_ops, NULL); if (cpm2_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); return; diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 47db732981a8..e22fc638dbc7 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -138,7 +138,7 @@ static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, out_be32(&iop->dat, cpm2_gc->cpdata); } -static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc); @@ -150,6 +150,8 @@ static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) __cpm2_gpio32_set(mm_gc, pin_mask, value); spin_unlock_irqrestore(&cpm2_gc->lock, flags); + + return 0; } static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) @@ -208,7 +210,7 @@ int cpm2_gpiochip_add32(struct device *dev) gc->direction_input = cpm2_gpio32_dir_in; gc->direction_output = cpm2_gpio32_dir_out; gc->get = cpm2_gpio32_get; - gc->set = cpm2_gpio32_set; + gc->set_rv = cpm2_gpio32_set; gc->parent = dev; gc->owner = THIS_MODULE; diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index fb502b72fca1..b6f9774038e1 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c @@ -175,7 +175,7 @@ unsigned int ehv_pic_get_irq(void) * this will also setup revmap[] in the slow path for the first * time, next calls will always use fast path by indexing revmap */ - return irq_linear_revmap(global_ehv_pic->irqhost, irq); + return irq_find_mapping(global_ehv_pic->irqhost, irq); } static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, @@ -269,8 +269,9 @@ void __init ehv_pic_init(void) return; } - ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS, - &ehv_pic_host_ops, ehv_pic); + ehv_pic->irqhost = irq_domain_create_linear(of_fwnode_handle(np), + NR_EHV_PIC_INTS, + &ehv_pic_host_ops, ehv_pic); if (!ehv_pic->irqhost) { of_node_put(np); kfree(ehv_pic); diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 7b9a5ea9cad9..4fe8a7b1b288 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -412,7 +412,7 @@ static int fsl_of_msi_probe(struct platform_device *dev) } platform_set_drvdata(dev, msi); - msi->irqhost = irq_domain_add_linear(dev->dev.of_node, + msi->irqhost = irq_domain_create_linear(of_fwnode_handle(dev->dev.of_node), NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi); if (msi->irqhost == NULL) { diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c index a6c424680c37..0bc3f0b36528 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.c +++ b/arch/powerpc/sysdev/ge/ge_pic.c @@ -214,8 +214,9 @@ void __init gef_pic_init(struct device_node *np) } /* Setup an irq_domain structure */ - gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS, - &gef_pic_host_ops, NULL); + gef_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(np), + GEF_PIC_NUM_IRQS, + &gef_pic_host_ops, NULL); if (gef_pic_irq_host == NULL) return; @@ -244,7 +245,7 @@ unsigned int gef_pic_get_irq(void) if (active & (0x1 << hwirq)) break; } - virq = irq_linear_revmap(gef_pic_irq_host, + virq = irq_find_mapping(gef_pic_irq_host, (irq_hw_number_t)hwirq); } diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 06e391485da7..99bb2b916949 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -260,8 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr) raw_spin_unlock_irqrestore(&i8259_lock, flags); /* create a legacy host */ - i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, - &i8259_host_ops, NULL); + i8259_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0, + &i8259_host_ops, NULL); if (i8259_host == NULL) { printk(KERN_ERR "i8259: failed to allocate irq host !\n"); return; diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index a35be0232978..70be2105865d 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -711,8 +711,9 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) if (ipic == NULL) return NULL; - ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS, - &ipic_host_ops, ipic); + ipic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), + NR_IPIC_INTS, + &ipic_host_ops, ipic); if (ipic->irqhost == NULL) { kfree(ipic); return NULL; @@ -800,7 +801,7 @@ unsigned int ipic_get_irq(void) if (irq == 0) /* 0 --> no irq is pending */ return 0; - return irq_linear_revmap(primary_ipic->irqhost, irq); + return irq_find_mapping(primary_ipic->irqhost, irq); } #ifdef CONFIG_SUSPEND diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 4afbab83a2e2..ad7310bba00b 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -27,6 +27,7 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/syscore_ops.h> #include <linux/ratelimit.h> #include <linux/pgtable.h> @@ -474,9 +475,9 @@ static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); } - printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", - PCI_SLOT(devfn), PCI_FUNC(devfn), - flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); + pr_debug("mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", + PCI_SLOT(devfn), PCI_FUNC(devfn), + str_enabled_disabled(flags & HT_MSI_FLAGS_ENABLE), addr); if (!(flags & HT_MSI_FLAGS_ENABLE)) writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); @@ -1483,9 +1484,9 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; - mpic->irqhost = irq_domain_add_linear(mpic->node, - intvec_top, - &mpic_host_ops, mpic); + mpic->irqhost = irq_domain_create_linear(of_fwnode_handle(mpic->node), + intvec_top, + &mpic_host_ops, mpic); /* * FIXME: The code leaks the MPIC object and mappings here; this @@ -1785,7 +1786,7 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) return 0; } - return irq_linear_revmap(mpic->irqhost, src); + return irq_find_mapping(mpic->irqhost, src); } unsigned int mpic_get_one_irq(struct mpic *mpic) @@ -1823,7 +1824,7 @@ unsigned int mpic_get_coreint_irq(void) return 0; } - return irq_linear_revmap(mpic->irqhost, src); + return irq_find_mapping(mpic->irqhost, src); #else return 0; #endif diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 0e42f7bad7db..07d0f6a83879 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -404,8 +404,8 @@ void __init tsi108_pci_int_init(struct device_node *node) { DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); - pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, - &pci_irq_domain_ops, NULL); + pci_irq_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0, + &pci_irq_domain_ops, NULL); if (pci_irq_host == NULL) { printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); return; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index dc2e61837396..f10592405024 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1464,7 +1464,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = { static void __init xive_init_host(struct device_node *np) { - xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL); + xive_irq_domain = irq_domain_create_tree(of_fwnode_handle(np), &xive_irq_domain_ops, NULL); if (WARN_ON(xive_irq_domain == NULL)) return; irq_set_default_domain(xive_irq_domain); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 88abffa8b54c..cb3a3244ae6f 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1770,7 +1770,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr, sp + STACK_INT_FRAME_REGS); break; } - printf("--- Exception: %lx %s at ", regs.trap, + printf("---- Exception: %lx %s at ", regs.trap, getvecname(TRAP(®s))); pc = regs.nip; lr = regs.link; diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig index c67095a3d669..cd9b776602f8 100644 --- a/arch/riscv/crypto/Kconfig +++ b/arch/riscv/crypto/Kconfig @@ -18,16 +18,6 @@ config CRYPTO_AES_RISCV64 - Zvkb vector crypto extension (CTR) - Zvkg vector crypto extension (XTS) -config CRYPTO_CHACHA_RISCV64 - tristate "Ciphers: ChaCha" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO - select CRYPTO_SKCIPHER - help - Length-preserving ciphers: ChaCha20 stream cipher algorithm - - Architecture: riscv64 using: - - Zvkb vector crypto extension - config CRYPTO_GHASH_RISCV64 tristate "Hash functions: GHASH" depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO @@ -38,17 +28,6 @@ config CRYPTO_GHASH_RISCV64 Architecture: riscv64 using: - Zvkg vector crypto extension -config CRYPTO_SHA256_RISCV64 - tristate "Hash functions: SHA-224 and SHA-256" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO - select CRYPTO_SHA256 - help - SHA-224 and SHA-256 secure hash algorithm (FIPS 180) - - Architecture: riscv64 using: - - Zvknha or Zvknhb vector crypto extensions - - Zvkb vector crypto extension - config CRYPTO_SHA512_RISCV64 tristate "Hash functions: SHA-384 and SHA-512" depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO @@ -64,7 +43,7 @@ config CRYPTO_SM3_RISCV64 tristate "Hash functions: SM3 (ShangMi 3)" depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO select CRYPTO_HASH - select CRYPTO_SM3 + select CRYPTO_LIB_SM3 help SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012) diff --git a/arch/riscv/crypto/Makefile b/arch/riscv/crypto/Makefile index 247c7bc7288c..e10e8257734e 100644 --- a/arch/riscv/crypto/Makefile +++ b/arch/riscv/crypto/Makefile @@ -4,15 +4,9 @@ obj-$(CONFIG_CRYPTO_AES_RISCV64) += aes-riscv64.o aes-riscv64-y := aes-riscv64-glue.o aes-riscv64-zvkned.o \ aes-riscv64-zvkned-zvbb-zvkg.o aes-riscv64-zvkned-zvkb.o -obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o -chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o - obj-$(CONFIG_CRYPTO_GHASH_RISCV64) += ghash-riscv64.o ghash-riscv64-y := ghash-riscv64-glue.o ghash-riscv64-zvkg.o -obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o -sha256-riscv64-y := sha256-riscv64-glue.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o - obj-$(CONFIG_CRYPTO_SHA512_RISCV64) += sha512-riscv64.o sha512-riscv64-y := sha512-riscv64-glue.o sha512-riscv64-zvknhb-zvkb.o diff --git a/arch/riscv/crypto/chacha-riscv64-glue.c b/arch/riscv/crypto/chacha-riscv64-glue.c deleted file mode 100644 index 10b46f36375a..000000000000 --- a/arch/riscv/crypto/chacha-riscv64-glue.c +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ChaCha20 using the RISC-V vector crypto extensions - * - * Copyright (C) 2023 SiFive, Inc. - * Author: Jerry Shih <jerry.shih@sifive.com> - */ - -#include <asm/simd.h> -#include <asm/vector.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/skcipher.h> -#include <linux/linkage.h> -#include <linux/module.h> - -asmlinkage void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out, - size_t len, const u32 iv[4]); - -static int riscv64_chacha20_crypt(struct skcipher_request *req) -{ - u32 iv[CHACHA_IV_SIZE / sizeof(u32)]; - u8 block_buffer[CHACHA_BLOCK_SIZE]; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - unsigned int nbytes; - unsigned int tail_bytes; - int err; - - iv[0] = get_unaligned_le32(req->iv); - iv[1] = get_unaligned_le32(req->iv + 4); - iv[2] = get_unaligned_le32(req->iv + 8); - iv[3] = get_unaligned_le32(req->iv + 12); - - err = skcipher_walk_virt(&walk, req, false); - while (walk.nbytes) { - nbytes = walk.nbytes & ~(CHACHA_BLOCK_SIZE - 1); - tail_bytes = walk.nbytes & (CHACHA_BLOCK_SIZE - 1); - kernel_vector_begin(); - if (nbytes) { - chacha20_zvkb(ctx->key, walk.src.virt.addr, - walk.dst.virt.addr, nbytes, iv); - iv[0] += nbytes / CHACHA_BLOCK_SIZE; - } - if (walk.nbytes == walk.total && tail_bytes > 0) { - memcpy(block_buffer, walk.src.virt.addr + nbytes, - tail_bytes); - chacha20_zvkb(ctx->key, block_buffer, block_buffer, - CHACHA_BLOCK_SIZE, iv); - memcpy(walk.dst.virt.addr + nbytes, block_buffer, - tail_bytes); - tail_bytes = 0; - } - kernel_vector_end(); - - err = skcipher_walk_done(&walk, tail_bytes); - } - - return err; -} - -static struct skcipher_alg riscv64_chacha_alg = { - .setkey = chacha20_setkey, - .encrypt = riscv64_chacha20_crypt, - .decrypt = riscv64_chacha20_crypt, - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 4 * CHACHA_BLOCK_SIZE, - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct chacha_ctx), - .cra_priority = 300, - .cra_name = "chacha20", - .cra_driver_name = "chacha20-riscv64-zvkb", - .cra_module = THIS_MODULE, - }, -}; - -static int __init riscv64_chacha_mod_init(void) -{ - if (riscv_isa_extension_available(NULL, ZVKB) && - riscv_vector_vlen() >= 128) - return crypto_register_skcipher(&riscv64_chacha_alg); - - return -ENODEV; -} - -static void __exit riscv64_chacha_mod_exit(void) -{ - crypto_unregister_skcipher(&riscv64_chacha_alg); -} - -module_init(riscv64_chacha_mod_init); -module_exit(riscv64_chacha_mod_exit); - -MODULE_DESCRIPTION("ChaCha20 (RISC-V accelerated)"); -MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("chacha20"); diff --git a/arch/riscv/crypto/ghash-riscv64-glue.c b/arch/riscv/crypto/ghash-riscv64-glue.c index 312e7891fd0a..d86073d25387 100644 --- a/arch/riscv/crypto/ghash-riscv64-glue.c +++ b/arch/riscv/crypto/ghash-riscv64-glue.c @@ -11,11 +11,16 @@ #include <asm/simd.h> #include <asm/vector.h> +#include <crypto/b128ops.h> +#include <crypto/gf128mul.h> #include <crypto/ghash.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> -#include <linux/linkage.h> +#include <crypto/utils.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> asmlinkage void ghash_zvkg(be128 *accumulator, const be128 *key, const u8 *data, size_t len); @@ -26,8 +31,6 @@ struct riscv64_ghash_tfm_ctx { struct riscv64_ghash_desc_ctx { be128 accumulator; - u8 buffer[GHASH_BLOCK_SIZE]; - u32 bytes; }; static int riscv64_ghash_setkey(struct crypto_shash *tfm, const u8 *key, @@ -78,50 +81,24 @@ static int riscv64_ghash_update(struct shash_desc *desc, const u8 *src, { const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int len; - - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_BLOCK_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, srclen); - dctx->bytes += srclen; - return 0; - } - memcpy(dctx->buffer + dctx->bytes, src, - GHASH_BLOCK_SIZE - dctx->bytes); - riscv64_ghash_blocks(tctx, dctx, dctx->buffer, - GHASH_BLOCK_SIZE); - src += GHASH_BLOCK_SIZE - dctx->bytes; - srclen -= GHASH_BLOCK_SIZE - dctx->bytes; - dctx->bytes = 0; - } - - len = round_down(srclen, GHASH_BLOCK_SIZE); - if (len) { - riscv64_ghash_blocks(tctx, dctx, src, len); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; - } - - return 0; + riscv64_ghash_blocks(tctx, dctx, src, + round_down(srclen, GHASH_BLOCK_SIZE)); + return srclen - round_down(srclen, GHASH_BLOCK_SIZE); } -static int riscv64_ghash_final(struct shash_desc *desc, u8 *out) +static int riscv64_ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { const struct riscv64_ghash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct riscv64_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - int i; - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_BLOCK_SIZE; i++) - dctx->buffer[i] = 0; + if (len) { + u8 buf[GHASH_BLOCK_SIZE] = {}; - riscv64_ghash_blocks(tctx, dctx, dctx->buffer, - GHASH_BLOCK_SIZE); + memcpy(buf, src, len); + riscv64_ghash_blocks(tctx, dctx, buf, GHASH_BLOCK_SIZE); + memzero_explicit(buf, sizeof(buf)); } memcpy(out, &dctx->accumulator, GHASH_DIGEST_SIZE); @@ -131,7 +108,7 @@ static int riscv64_ghash_final(struct shash_desc *desc, u8 *out) static struct shash_alg riscv64_ghash_alg = { .init = riscv64_ghash_init, .update = riscv64_ghash_update, - .final = riscv64_ghash_final, + .finup = riscv64_ghash_finup, .setkey = riscv64_ghash_setkey, .descsize = sizeof(struct riscv64_ghash_desc_ctx), .digestsize = GHASH_DIGEST_SIZE, @@ -139,6 +116,7 @@ static struct shash_alg riscv64_ghash_alg = { .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct riscv64_ghash_tfm_ctx), .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_name = "ghash", .cra_driver_name = "ghash-riscv64-zvkg", .cra_module = THIS_MODULE, diff --git a/arch/riscv/crypto/sha256-riscv64-glue.c b/arch/riscv/crypto/sha256-riscv64-glue.c deleted file mode 100644 index 71e051e40a64..000000000000 --- a/arch/riscv/crypto/sha256-riscv64-glue.c +++ /dev/null @@ -1,137 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * SHA-256 and SHA-224 using the RISC-V vector crypto extensions - * - * Copyright (C) 2022 VRULL GmbH - * Author: Heiko Stuebner <heiko.stuebner@vrull.eu> - * - * Copyright (C) 2023 SiFive, Inc. - * Author: Jerry Shih <jerry.shih@sifive.com> - */ - -#include <asm/simd.h> -#include <asm/vector.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <crypto/sha256_base.h> -#include <linux/linkage.h> -#include <linux/module.h> - -/* - * Note: the asm function only uses the 'state' field of struct sha256_state. - * It is assumed to be the first field. - */ -asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb( - struct sha256_state *state, const u8 *data, int num_blocks); - -static int riscv64_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - /* - * Ensure struct sha256_state begins directly with the SHA-256 - * 256-bit internal state, as this is what the asm function expects. - */ - BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); - - if (crypto_simd_usable()) { - kernel_vector_begin(); - sha256_base_do_update(desc, data, len, - sha256_transform_zvknha_or_zvknhb_zvkb); - kernel_vector_end(); - } else { - crypto_sha256_update(desc, data, len); - } - return 0; -} - -static int riscv64_sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - if (crypto_simd_usable()) { - kernel_vector_begin(); - if (len) - sha256_base_do_update( - desc, data, len, - sha256_transform_zvknha_or_zvknhb_zvkb); - sha256_base_do_finalize( - desc, sha256_transform_zvknha_or_zvknhb_zvkb); - kernel_vector_end(); - - return sha256_base_finish(desc, out); - } - - return crypto_sha256_finup(desc, data, len, out); -} - -static int riscv64_sha256_final(struct shash_desc *desc, u8 *out) -{ - return riscv64_sha256_finup(desc, NULL, 0, out); -} - -static int riscv64_sha256_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_base_init(desc) ?: - riscv64_sha256_finup(desc, data, len, out); -} - -static struct shash_alg riscv64_sha256_algs[] = { - { - .init = sha256_base_init, - .update = riscv64_sha256_update, - .final = riscv64_sha256_final, - .finup = riscv64_sha256_finup, - .digest = riscv64_sha256_digest, - .descsize = sizeof(struct sha256_state), - .digestsize = SHA256_DIGEST_SIZE, - .base = { - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_priority = 300, - .cra_name = "sha256", - .cra_driver_name = "sha256-riscv64-zvknha_or_zvknhb-zvkb", - .cra_module = THIS_MODULE, - }, - }, { - .init = sha224_base_init, - .update = riscv64_sha256_update, - .final = riscv64_sha256_final, - .finup = riscv64_sha256_finup, - .descsize = sizeof(struct sha256_state), - .digestsize = SHA224_DIGEST_SIZE, - .base = { - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_priority = 300, - .cra_name = "sha224", - .cra_driver_name = "sha224-riscv64-zvknha_or_zvknhb-zvkb", - .cra_module = THIS_MODULE, - }, - }, -}; - -static int __init riscv64_sha256_mod_init(void) -{ - /* Both zvknha and zvknhb provide the SHA-256 instructions. */ - if ((riscv_isa_extension_available(NULL, ZVKNHA) || - riscv_isa_extension_available(NULL, ZVKNHB)) && - riscv_isa_extension_available(NULL, ZVKB) && - riscv_vector_vlen() >= 128) - return crypto_register_shashes(riscv64_sha256_algs, - ARRAY_SIZE(riscv64_sha256_algs)); - - return -ENODEV; -} - -static void __exit riscv64_sha256_mod_exit(void) -{ - crypto_unregister_shashes(riscv64_sha256_algs, - ARRAY_SIZE(riscv64_sha256_algs)); -} - -module_init(riscv64_sha256_mod_init); -module_exit(riscv64_sha256_mod_exit); - -MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)"); -MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha224"); diff --git a/arch/riscv/crypto/sha512-riscv64-glue.c b/arch/riscv/crypto/sha512-riscv64-glue.c index 43b56a08aeb5..4634fca78ae2 100644 --- a/arch/riscv/crypto/sha512-riscv64-glue.c +++ b/arch/riscv/crypto/sha512-riscv64-glue.c @@ -14,7 +14,7 @@ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/sha512_base.h> -#include <linux/linkage.h> +#include <linux/kernel.h> #include <linux/module.h> /* @@ -24,8 +24,8 @@ asmlinkage void sha512_transform_zvknhb_zvkb( struct sha512_state *state, const u8 *data, int num_blocks); -static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static void sha512_block(struct sha512_state *state, const u8 *data, + int num_blocks) { /* * Ensure struct sha512_state begins directly with the SHA-512 @@ -35,35 +35,24 @@ static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data, if (crypto_simd_usable()) { kernel_vector_begin(); - sha512_base_do_update(desc, data, len, - sha512_transform_zvknhb_zvkb); + sha512_transform_zvknhb_zvkb(state, data, num_blocks); kernel_vector_end(); } else { - crypto_sha512_update(desc, data, len); + sha512_generic_block_fn(state, data, num_blocks); } - return 0; } -static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int riscv64_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - if (crypto_simd_usable()) { - kernel_vector_begin(); - if (len) - sha512_base_do_update(desc, data, len, - sha512_transform_zvknhb_zvkb); - sha512_base_do_finalize(desc, sha512_transform_zvknhb_zvkb); - kernel_vector_end(); - - return sha512_base_finish(desc, out); - } - - return crypto_sha512_finup(desc, data, len, out); + return sha512_base_do_update_blocks(desc, data, len, sha512_block); } -static int riscv64_sha512_final(struct shash_desc *desc, u8 *out) +static int riscv64_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - return riscv64_sha512_finup(desc, NULL, 0, out); + sha512_base_do_finup(desc, data, len, sha512_block); + return sha512_base_finish(desc, out); } static int riscv64_sha512_digest(struct shash_desc *desc, const u8 *data, @@ -77,14 +66,15 @@ static struct shash_alg riscv64_sha512_algs[] = { { .init = sha512_base_init, .update = riscv64_sha512_update, - .final = riscv64_sha512_final, .finup = riscv64_sha512_finup, .digest = riscv64_sha512_digest, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA512_DIGEST_SIZE, .base = { .cra_blocksize = SHA512_BLOCK_SIZE, .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_name = "sha512", .cra_driver_name = "sha512-riscv64-zvknhb-zvkb", .cra_module = THIS_MODULE, @@ -92,13 +82,14 @@ static struct shash_alg riscv64_sha512_algs[] = { }, { .init = sha384_base_init, .update = riscv64_sha512_update, - .final = riscv64_sha512_final, .finup = riscv64_sha512_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .digestsize = SHA384_DIGEST_SIZE, .base = { .cra_blocksize = SHA384_BLOCK_SIZE, .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_name = "sha384", .cra_driver_name = "sha384-riscv64-zvknhb-zvkb", .cra_module = THIS_MODULE, diff --git a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S index 3a9ae210f915..89f4a10d12dd 100644 --- a/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S +++ b/arch/riscv/crypto/sha512-riscv64-zvknhb-zvkb.S @@ -43,7 +43,7 @@ // - RISC-V Vector SHA-2 Secure Hash extension ('Zvknhb') // - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb') -#include <linux/cfi_types.h> +#include <linux/linkage.h> .text .option arch, +zvknhb, +zvkb @@ -95,7 +95,7 @@ // void sha512_transform_zvknhb_zvkb(u64 state[8], const u8 *data, // int num_blocks); -SYM_TYPED_FUNC_START(sha512_transform_zvknhb_zvkb) +SYM_FUNC_START(sha512_transform_zvknhb_zvkb) // Setup mask for the vmerge to replace the first word (idx==0) in // message scheduling. There are 4 words, so an 8-bit mask suffices. diff --git a/arch/riscv/crypto/sm3-riscv64-glue.c b/arch/riscv/crypto/sm3-riscv64-glue.c index e1737a970c7c..abdfe4a63a27 100644 --- a/arch/riscv/crypto/sm3-riscv64-glue.c +++ b/arch/riscv/crypto/sm3-riscv64-glue.c @@ -13,8 +13,9 @@ #include <asm/vector.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> +#include <crypto/sm3.h> #include <crypto/sm3_base.h> -#include <linux/linkage.h> +#include <linux/kernel.h> #include <linux/module.h> /* @@ -24,8 +25,8 @@ asmlinkage void sm3_transform_zvksh_zvkb( struct sm3_state *state, const u8 *data, int num_blocks); -static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static void sm3_block(struct sm3_state *state, const u8 *data, + int num_blocks) { /* * Ensure struct sm3_state begins directly with the SM3 @@ -35,52 +36,36 @@ static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data, if (crypto_simd_usable()) { kernel_vector_begin(); - sm3_base_do_update(desc, data, len, sm3_transform_zvksh_zvkb); + sm3_transform_zvksh_zvkb(state, data, num_blocks); kernel_vector_end(); } else { - sm3_update(shash_desc_ctx(desc), data, len); + sm3_block_generic(state, data, num_blocks); } - return 0; } -static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int riscv64_sm3_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct sm3_state *ctx; - - if (crypto_simd_usable()) { - kernel_vector_begin(); - if (len) - sm3_base_do_update(desc, data, len, - sm3_transform_zvksh_zvkb); - sm3_base_do_finalize(desc, sm3_transform_zvksh_zvkb); - kernel_vector_end(); - - return sm3_base_finish(desc, out); - } - - ctx = shash_desc_ctx(desc); - if (len) - sm3_update(ctx, data, len); - sm3_final(ctx, out); - - return 0; + return sm3_base_do_update_blocks(desc, data, len, sm3_block); } -static int riscv64_sm3_final(struct shash_desc *desc, u8 *out) +static int riscv64_sm3_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - return riscv64_sm3_finup(desc, NULL, 0, out); + sm3_base_do_finup(desc, data, len, sm3_block); + return sm3_base_finish(desc, out); } static struct shash_alg riscv64_sm3_alg = { .init = sm3_base_init, .update = riscv64_sm3_update, - .final = riscv64_sm3_final, .finup = riscv64_sm3_finup, - .descsize = sizeof(struct sm3_state), + .descsize = SM3_STATE_SIZE, .digestsize = SM3_DIGEST_SIZE, .base = { .cra_blocksize = SM3_BLOCK_SIZE, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_priority = 300, .cra_name = "sm3", .cra_driver_name = "sm3-riscv64-zvksh-zvkb", diff --git a/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S index a2b65d961c04..4fe754846f65 100644 --- a/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S +++ b/arch/riscv/crypto/sm3-riscv64-zvksh-zvkb.S @@ -43,7 +43,7 @@ // - RISC-V Vector SM3 Secure Hash extension ('Zvksh') // - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb') -#include <linux/cfi_types.h> +#include <linux/linkage.h> .text .option arch, +zvksh, +zvkb @@ -81,7 +81,7 @@ .endm // void sm3_transform_zvksh_zvkb(u32 state[8], const u8 *data, int num_blocks); -SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb) +SYM_FUNC_START(sm3_transform_zvksh_zvkb) // Load the state and endian-swap each 32-bit word. vsetivli zero, 8, e32, m2, ta, ma diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index cd627ec289f1..bfc8ea5f9319 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -52,6 +52,8 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s); DECLARE_DO_ERROR_INFO(do_trap_ecall_m); DECLARE_DO_ERROR_INFO(do_trap_break); +asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs); +asmlinkage void ret_from_fork_user(struct pt_regs *regs); asmlinkage void handle_bad_stack(struct pt_regs *regs); asmlinkage void do_page_fault(struct pt_regs *regs); asmlinkage void do_irq(struct pt_regs *regs); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 33a5a9f2a0d4..0fb338000c6d 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -319,17 +319,21 @@ SYM_CODE_END(handle_kernel_stack_overflow) ASM_NOKPROBE(handle_kernel_stack_overflow) #endif -SYM_CODE_START(ret_from_fork) +SYM_CODE_START(ret_from_fork_kernel_asm) + call schedule_tail + move a0, s1 /* fn_arg */ + move a1, s0 /* fn */ + move a2, sp /* pt_regs */ + call ret_from_fork_kernel + j ret_from_exception +SYM_CODE_END(ret_from_fork_kernel_asm) + +SYM_CODE_START(ret_from_fork_user_asm) call schedule_tail - beqz s0, 1f /* not from kernel thread */ - /* Call fn(arg) */ - move a0, s1 - jalr s0 -1: move a0, sp /* pt_regs */ - call syscall_exit_to_user_mode + call ret_from_fork_user j ret_from_exception -SYM_CODE_END(ret_from_fork) +SYM_CODE_END(ret_from_fork_user_asm) #ifdef CONFIG_IRQ_STACKS /* diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 15d8f75902f8..bbf7ec6a75c0 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -17,7 +17,9 @@ #include <linux/ptrace.h> #include <linux/uaccess.h> #include <linux/personality.h> +#include <linux/entry-common.h> +#include <asm/asm-prototypes.h> #include <asm/unistd.h> #include <asm/processor.h> #include <asm/csr.h> @@ -36,7 +38,8 @@ unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif -extern asmlinkage void ret_from_fork(void); +extern asmlinkage void ret_from_fork_kernel_asm(void); +extern asmlinkage void ret_from_fork_user_asm(void); void noinstr arch_cpu_idle(void) { @@ -206,6 +209,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } +asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs) +{ + fn(fn_arg); + + syscall_exit_to_user_mode(regs); +} + +asmlinkage void ret_from_fork_user(struct pt_regs *regs) +{ + syscall_exit_to_user_mode(regs); +} + int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long clone_flags = args->flags; @@ -228,6 +243,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.s[0] = (unsigned long)args->fn; p->thread.s[1] = (unsigned long)args->fn_arg; + p->thread.ra = (unsigned long)ret_from_fork_kernel_asm; } else { *childregs = *(current_pt_regs()); /* Turn off status.VS */ @@ -237,12 +253,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) if (clone_flags & CLONE_SETTLS) childregs->tp = tls; childregs->a0 = 0; /* Return value of fork() */ - p->thread.s[0] = 0; + p->thread.ra = (unsigned long)ret_from_fork_user_asm; } p->thread.riscv_v_flags = 0; if (has_vector() || has_xtheadvector()) riscv_v_thread_alloc(p); - p->thread.ra = (unsigned long)ret_from_fork; p->thread.sp = (unsigned long)childregs; /* kernel sp */ return 0; } diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index b1c46153606a..0baec92d2f55 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-y += crypto/ lib-y += delay.o lib-y += memcpy.o lib-y += memset.o diff --git a/arch/riscv/lib/crypto/Kconfig b/arch/riscv/lib/crypto/Kconfig new file mode 100644 index 000000000000..47c99ea97ce2 --- /dev/null +++ b/arch/riscv/lib/crypto/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_CHACHA_RISCV64 + tristate + depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default CRYPTO_LIB_CHACHA + select CRYPTO_ARCH_HAVE_LIB_CHACHA + select CRYPTO_LIB_CHACHA_GENERIC + +config CRYPTO_SHA256_RISCV64 + tristate + depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD + select CRYPTO_LIB_SHA256_GENERIC diff --git a/arch/riscv/lib/crypto/Makefile b/arch/riscv/lib/crypto/Makefile new file mode 100644 index 000000000000..b7cb877a2c07 --- /dev/null +++ b/arch/riscv/lib/crypto/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_CHACHA_RISCV64) += chacha-riscv64.o +chacha-riscv64-y := chacha-riscv64-glue.o chacha-riscv64-zvkb.o + +obj-$(CONFIG_CRYPTO_SHA256_RISCV64) += sha256-riscv64.o +sha256-riscv64-y := sha256.o sha256-riscv64-zvknha_or_zvknhb-zvkb.o diff --git a/arch/riscv/lib/crypto/chacha-riscv64-glue.c b/arch/riscv/lib/crypto/chacha-riscv64-glue.c new file mode 100644 index 000000000000..8c3f11d79be3 --- /dev/null +++ b/arch/riscv/lib/crypto/chacha-riscv64-glue.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ChaCha stream cipher (RISC-V optimized) + * + * Copyright (C) 2023 SiFive, Inc. + * Author: Jerry Shih <jerry.shih@sifive.com> + */ + +#include <asm/simd.h> +#include <asm/vector.h> +#include <crypto/chacha.h> +#include <crypto/internal/simd.h> +#include <linux/linkage.h> +#include <linux/module.h> + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_zvkb); + +asmlinkage void chacha_zvkb(struct chacha_state *state, const u8 *in, u8 *out, + size_t nblocks, int nrounds); + +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) +{ + hchacha_block_generic(state, out, nrounds); +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + u8 block_buffer[CHACHA_BLOCK_SIZE]; + unsigned int full_blocks = bytes / CHACHA_BLOCK_SIZE; + unsigned int tail_bytes = bytes % CHACHA_BLOCK_SIZE; + + if (!static_branch_likely(&use_zvkb) || !crypto_simd_usable()) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + + kernel_vector_begin(); + if (full_blocks) { + chacha_zvkb(state, src, dst, full_blocks, nrounds); + src += full_blocks * CHACHA_BLOCK_SIZE; + dst += full_blocks * CHACHA_BLOCK_SIZE; + } + if (tail_bytes) { + memcpy(block_buffer, src, tail_bytes); + chacha_zvkb(state, block_buffer, block_buffer, 1, nrounds); + memcpy(dst, block_buffer, tail_bytes); + } + kernel_vector_end(); +} +EXPORT_SYMBOL(chacha_crypt_arch); + +bool chacha_is_arch_optimized(void) +{ + return static_key_enabled(&use_zvkb); +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +static int __init riscv64_chacha_mod_init(void) +{ + if (riscv_isa_extension_available(NULL, ZVKB) && + riscv_vector_vlen() >= 128) + static_branch_enable(&use_zvkb); + return 0; +} +subsys_initcall(riscv64_chacha_mod_init); + +static void __exit riscv64_chacha_mod_exit(void) +{ +} +module_exit(riscv64_chacha_mod_exit); + +MODULE_DESCRIPTION("ChaCha stream cipher (RISC-V optimized)"); +MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>"); +MODULE_LICENSE("GPL"); diff --git a/arch/riscv/crypto/chacha-riscv64-zvkb.S b/arch/riscv/lib/crypto/chacha-riscv64-zvkb.S index bf057737ac69..b777d0b4e379 100644 --- a/arch/riscv/crypto/chacha-riscv64-zvkb.S +++ b/arch/riscv/lib/crypto/chacha-riscv64-zvkb.S @@ -46,11 +46,11 @@ .text .option arch, +zvkb -#define KEYP a0 +#define STATEP a0 #define INP a1 #define OUTP a2 -#define LEN a3 -#define IVP a4 +#define NBLOCKS a3 +#define NROUNDS a4 #define CONSTS0 a5 #define CONSTS1 a6 @@ -59,7 +59,7 @@ #define TMP t1 #define VL t2 #define STRIDE t3 -#define NROUNDS t4 +#define ROUND_CTR t4 #define KEY0 s0 #define KEY1 s1 #define KEY2 s2 @@ -132,14 +132,16 @@ vror.vi \b3, \b3, 32 - 7 .endm -// void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out, size_t len, -// const u32 iv[4]); +// void chacha_zvkb(struct chacha_state *state, const u8 *in, u8 *out, +// size_t nblocks, int nrounds); // -// |len| must be nonzero and a multiple of 64 (CHACHA_BLOCK_SIZE). -// The counter is treated as 32-bit, following the RFC7539 convention. -SYM_FUNC_START(chacha20_zvkb) - srli LEN, LEN, 6 // Bytes to blocks - +// |nblocks| is the number of 64-byte blocks to process, and must be nonzero. +// +// |state| gives the ChaCha state matrix, including the 32-bit counter in +// state->x[12] following the RFC7539 convention; note that this differs from +// the original Salsa20 paper which uses a 64-bit counter in state->x[12..13]. +// The updated 32-bit counter is written back to state->x[12] before returning. +SYM_FUNC_START(chacha_zvkb) addi sp, sp, -96 sd s0, 0(sp) sd s1, 8(sp) @@ -157,26 +159,26 @@ SYM_FUNC_START(chacha20_zvkb) li STRIDE, 64 // Set up the initial state matrix in scalar registers. - li CONSTS0, 0x61707865 // "expa" little endian - li CONSTS1, 0x3320646e // "nd 3" little endian - li CONSTS2, 0x79622d32 // "2-by" little endian - li CONSTS3, 0x6b206574 // "te k" little endian - lw KEY0, 0(KEYP) - lw KEY1, 4(KEYP) - lw KEY2, 8(KEYP) - lw KEY3, 12(KEYP) - lw KEY4, 16(KEYP) - lw KEY5, 20(KEYP) - lw KEY6, 24(KEYP) - lw KEY7, 28(KEYP) - lw COUNTER, 0(IVP) - lw NONCE0, 4(IVP) - lw NONCE1, 8(IVP) - lw NONCE2, 12(IVP) + lw CONSTS0, 0(STATEP) + lw CONSTS1, 4(STATEP) + lw CONSTS2, 8(STATEP) + lw CONSTS3, 12(STATEP) + lw KEY0, 16(STATEP) + lw KEY1, 20(STATEP) + lw KEY2, 24(STATEP) + lw KEY3, 28(STATEP) + lw KEY4, 32(STATEP) + lw KEY5, 36(STATEP) + lw KEY6, 40(STATEP) + lw KEY7, 44(STATEP) + lw COUNTER, 48(STATEP) + lw NONCE0, 52(STATEP) + lw NONCE1, 56(STATEP) + lw NONCE2, 60(STATEP) .Lblock_loop: // Set vl to the number of blocks to process in this iteration. - vsetvli VL, LEN, e32, m1, ta, ma + vsetvli VL, NBLOCKS, e32, m1, ta, ma // Set up the initial state matrix for the next VL blocks in v0-v15. // v{i} holds the i'th 32-bit word of the state matrix for all blocks. @@ -203,16 +205,16 @@ SYM_FUNC_START(chacha20_zvkb) // v{16+i} holds the i'th 32-bit word for all blocks. vlsseg8e32.v v16, (INP), STRIDE - li NROUNDS, 20 + mv ROUND_CTR, NROUNDS .Lnext_doubleround: - addi NROUNDS, NROUNDS, -2 + addi ROUND_CTR, ROUND_CTR, -2 // column round chacha_round v0, v4, v8, v12, v1, v5, v9, v13, \ v2, v6, v10, v14, v3, v7, v11, v15 // diagonal round chacha_round v0, v5, v10, v15, v1, v6, v11, v12, \ v2, v7, v8, v13, v3, v4, v9, v14 - bnez NROUNDS, .Lnext_doubleround + bnez ROUND_CTR, .Lnext_doubleround // Load the second half of the input data for each block into v24-v31. // v{24+i} holds the {8+i}'th 32-bit word for all blocks. @@ -271,12 +273,13 @@ SYM_FUNC_START(chacha20_zvkb) // Update the counter, the remaining number of blocks, and the input and // output pointers according to the number of blocks processed (VL). add COUNTER, COUNTER, VL - sub LEN, LEN, VL + sub NBLOCKS, NBLOCKS, VL slli TMP, VL, 6 add OUTP, OUTP, TMP add INP, INP, TMP - bnez LEN, .Lblock_loop + bnez NBLOCKS, .Lblock_loop + sw COUNTER, 48(STATEP) ld s0, 0(sp) ld s1, 8(sp) ld s2, 16(sp) @@ -291,4 +294,4 @@ SYM_FUNC_START(chacha20_zvkb) ld s11, 88(sp) addi sp, sp, 96 ret -SYM_FUNC_END(chacha20_zvkb) +SYM_FUNC_END(chacha_zvkb) diff --git a/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S b/arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S index 8ebcc17de4dc..fad501ad0617 100644 --- a/arch/riscv/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S +++ b/arch/riscv/lib/crypto/sha256-riscv64-zvknha_or_zvknhb-zvkb.S @@ -43,7 +43,7 @@ // - RISC-V Vector SHA-2 Secure Hash extension ('Zvknha' or 'Zvknhb') // - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb') -#include <linux/cfi_types.h> +#include <linux/linkage.h> .text .option arch, +zvknha, +zvkb @@ -106,9 +106,9 @@ sha256_4rounds \last, \k3, W3, W0, W1, W2 .endm -// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[8], const u8 *data, -// int num_blocks); -SYM_TYPED_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb) +// void sha256_transform_zvknha_or_zvknhb_zvkb(u32 state[SHA256_STATE_WORDS], +// const u8 *data, size_t nblocks); +SYM_FUNC_START(sha256_transform_zvknha_or_zvknhb_zvkb) // Load the round constants into K0-K15. vsetivli zero, 4, e32, m1, ta, ma diff --git a/arch/riscv/lib/crypto/sha256.c b/arch/riscv/lib/crypto/sha256.c new file mode 100644 index 000000000000..71808397dff4 --- /dev/null +++ b/arch/riscv/lib/crypto/sha256.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256 (RISC-V accelerated) + * + * Copyright (C) 2022 VRULL GmbH + * Author: Heiko Stuebner <heiko.stuebner@vrull.eu> + * + * Copyright (C) 2023 SiFive, Inc. + * Author: Jerry Shih <jerry.shih@sifive.com> + */ + +#include <asm/vector.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb( + u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions); + +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_extensions)) { + kernel_vector_begin(); + sha256_transform_zvknha_or_zvknhb_zvkb(state, data, nblocks); + kernel_vector_end(); + } else { + sha256_blocks_generic(state, data, nblocks); + } +} +EXPORT_SYMBOL_GPL(sha256_blocks_simd); + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + sha256_blocks_generic(state, data, nblocks); +} +EXPORT_SYMBOL_GPL(sha256_blocks_arch); + +bool sha256_is_arch_optimized(void) +{ + return static_key_enabled(&have_extensions); +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +static int __init riscv64_sha256_mod_init(void) +{ + /* Both zvknha and zvknhb provide the SHA-256 instructions. */ + if ((riscv_isa_extension_available(NULL, ZVKNHA) || + riscv_isa_extension_available(NULL, ZVKNHB)) && + riscv_isa_extension_available(NULL, ZVKB) && + riscv_vector_vlen() >= 128) + static_branch_enable(&have_extensions); + return 0; +} +subsys_initcall(riscv64_sha256_mod_init); + +static void __exit riscv64_sha256_mod_exit(void) +{ +} +module_exit(riscv64_sha256_mod_exit); + +MODULE_DESCRIPTION("SHA-256 (RISC-V accelerated)"); +MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@vrull.eu>"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 99fb986fca6e..0c16dc443e2f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -146,6 +146,7 @@ config S390 select ARCH_WANTS_NO_INSTR select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WANT_KERNEL_PMD_MKWRITE select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index d04e9b89d14a..f584d7da29cb 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -179,7 +179,7 @@ void setup_boot_command_line(void) if (has_ebcdic_char(parmarea.command_line)) EBCASC(parmarea.command_line, COMMAND_LINE_SIZE); /* copy arch command line */ - strcpy(early_command_line, strim(parmarea.command_line)); + strscpy(early_command_line, strim(parmarea.command_line)); /* append IPL PARM data to the boot command line */ if (!is_prot_virt_guest() && ipl_block_valid) @@ -253,7 +253,8 @@ void parse_boot_command_line(void) int rc; __kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE); - args = strcpy(command_line_buf, early_command_line); + strscpy(command_line_buf, early_command_line); + args = command_line_buf; while (*args) { args = next_arg(args, ¶m, &val); @@ -309,7 +310,7 @@ void parse_boot_command_line(void) if (!strcmp(param, "bootdebug")) { bootdebug = true; if (val) - strncpy(bootdebug_filter, val, sizeof(bootdebug_filter) - 1); + strscpy(bootdebug_filter, val); } if (!strcmp(param, "quiet")) boot_console_loglevel = CONSOLE_LOGLEVEL_QUIET; diff --git a/arch/s390/boot/printk.c b/arch/s390/boot/printk.c index 8cf6331bc060..4bb6bc95704e 100644 --- a/arch/s390/boot/printk.c +++ b/arch/s390/boot/printk.c @@ -29,7 +29,8 @@ static void boot_rb_add(const char *str, size_t len) /* store strings separated by '\0' */ if (len + 1 > avail) boot_rb_off = 0; - strcpy(boot_rb + boot_rb_off, str); + avail = sizeof(boot_rb) - boot_rb_off - 1; + strscpy(boot_rb + boot_rb_off, str, avail); boot_rb_off += len + 1; } @@ -158,10 +159,10 @@ static noinline char *strsym(char *buf, void *ip) p = findsym((unsigned long)ip, &off, &len); if (p) { - strncpy(buf, p, MAX_SYMLEN); + strscpy(buf, p, MAX_SYMLEN); /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ p = buf + strnlen(buf, MAX_SYMLEN - 15); - strcpy(p, "+0x"); + strscpy(p, "+0x", MAX_SYMLEN - (p - buf)); as_hex(p + 3, off, 0); strcat(p, "/0x"); as_hex(p + strlen(p), len, 0); diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 06316fb8e0fa..da8337e63a3e 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -6,6 +6,7 @@ #include <asm/boot_data.h> #include <asm/extmem.h> #include <asm/sections.h> +#include <asm/diag288.h> #include <asm/maccess.h> #include <asm/machine.h> #include <asm/sysinfo.h> @@ -71,6 +72,20 @@ static void detect_machine_type(void) set_machine_feature(MFEATURE_VM); } +static void detect_diag288(void) +{ + /* "BEGIN" in EBCDIC character set */ + static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5"; + unsigned long action, len; + + action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART; + len = machine_is_vm() ? sizeof(cmd) : 0; + if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len)) + return; + __diag288(WDT_FUNC_CANCEL, 0, 0, 0); + set_machine_feature(MFEATURE_DIAG288); +} + static void detect_diag9c(void) { unsigned int cpu; @@ -519,6 +534,8 @@ void startup_kernel(void) detect_facilities(); detect_diag9c(); detect_machine_type(); + /* detect_diag288() needs machine type */ + detect_diag288(); cmma_init(); sanitize_prot_virt_host(); max_physmem_end = detect_max_physmem_end(); diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c index f6b9b1df48a8..bd68161434a6 100644 --- a/arch/s390/boot/string.c +++ b/arch/s390/boot/string.c @@ -29,6 +29,18 @@ int strncmp(const char *cs, const char *ct, size_t count) return 0; } +ssize_t sized_strscpy(char *dst, const char *src, size_t count) +{ + size_t len; + + if (count == 0) + return -E2BIG; + len = strnlen(src, count - 1); + memcpy(dst, src, len); + dst[len] = '\0'; + return src[len] ? -E2BIG : len; +} + void *memset64(uint64_t *s, uint64_t v, size_t count) { uint64_t *xs = s; diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 24b22f6a9e99..8ecad727497e 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -755,10 +755,10 @@ CONFIG_FORTIFY_SOURCE=y CONFIG_HARDENED_USERCOPY=y CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SELFTESTS=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m @@ -806,7 +806,6 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA1_S390=m -CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA3_256_S390=m CONFIG_CRYPTO_SHA3_512_S390=m CONFIG_CRYPTO_GHASH_S390=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 2b8b42d569bc..c13a77765162 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -741,10 +741,10 @@ CONFIG_IMA_APPRAISE=y CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SELFTESTS=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDSA=m @@ -793,7 +793,6 @@ CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA1_S390=m -CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA3_256_S390=m CONFIG_CRYPTO_SHA3_512_S390=m CONFIG_CRYPTO_GHASH_S390=m diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig index 8c4db8b64fa2..e2c27588b21a 100644 --- a/arch/s390/crypto/Kconfig +++ b/arch/s390/crypto/Kconfig @@ -4,7 +4,6 @@ menu "Accelerated Cryptographic Algorithms for CPU (s390)" config CRYPTO_SHA512_S390 tristate "Hash functions: SHA-384 and SHA-512" - depends on S390 select CRYPTO_HASH help SHA-384 and SHA-512 secure hash algorithms (FIPS 180) @@ -15,7 +14,6 @@ config CRYPTO_SHA512_S390 config CRYPTO_SHA1_S390 tristate "Hash functions: SHA-1" - depends on S390 select CRYPTO_HASH help SHA-1 secure hash algorithm (FIPS 180) @@ -24,20 +22,8 @@ config CRYPTO_SHA1_S390 It is available as of z990. -config CRYPTO_SHA256_S390 - tristate "Hash functions: SHA-224 and SHA-256" - depends on S390 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: s390 - - It is available as of z9. - config CRYPTO_SHA3_256_S390 tristate "Hash functions: SHA3-224 and SHA3-256" - depends on S390 select CRYPTO_HASH help SHA3-224 and SHA3-256 secure hash algorithms (FIPS 202) @@ -48,7 +34,6 @@ config CRYPTO_SHA3_256_S390 config CRYPTO_SHA3_512_S390 tristate "Hash functions: SHA3-384 and SHA3-512" - depends on S390 select CRYPTO_HASH help SHA3-384 and SHA3-512 secure hash algorithms (FIPS 202) @@ -59,7 +44,6 @@ config CRYPTO_SHA3_512_S390 config CRYPTO_GHASH_S390 tristate "Hash functions: GHASH" - depends on S390 select CRYPTO_HASH help GCM GHASH hash function (NIST SP800-38D) @@ -70,7 +54,6 @@ config CRYPTO_GHASH_S390 config CRYPTO_AES_S390 tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS, GCM" - depends on S390 select CRYPTO_ALGAPI select CRYPTO_SKCIPHER help @@ -92,7 +75,6 @@ config CRYPTO_AES_S390 config CRYPTO_DES_S390 tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR" - depends on S390 select CRYPTO_ALGAPI select CRYPTO_SKCIPHER select CRYPTO_LIB_DES @@ -107,23 +89,8 @@ config CRYPTO_DES_S390 As of z990 the ECB and CBC mode are hardware accelerated. As of z196 the CTR mode is hardware accelerated. -config CRYPTO_CHACHA_S390 - tristate - depends on S390 - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - default CRYPTO_LIB_CHACHA_INTERNAL - help - Length-preserving cipher: ChaCha20 stream cipher (RFC 7539) - - Architecture: s390 - - It is available as of z13. - config CRYPTO_HMAC_S390 tristate "Keyed-hash message authentication code: HMAC" - depends on S390 select CRYPTO_HASH help s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 14dafadbcbed..21757d86cd49 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -4,17 +4,13 @@ # obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o -obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA3_256_S390) += sha3_256_s390.o sha_common.o obj-$(CONFIG_CRYPTO_SHA3_512_S390) += sha3_512_s390.o sha_common.o obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o -obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o obj-y += arch_random.o - -chacha_s390-y := chacha-glue.o chacha-s390.o diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c deleted file mode 100644 index 920e9f0941e7..000000000000 --- a/arch/s390/crypto/chacha-glue.c +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * s390 ChaCha stream cipher. - * - * Copyright IBM Corp. 2021 - */ - -#define KMSG_COMPONENT "chacha_s390" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include <crypto/internal/chacha.h> -#include <crypto/internal/skcipher.h> -#include <crypto/algapi.h> -#include <linux/cpufeature.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sizes.h> -#include <asm/fpu.h> -#include "chacha-s390.h" - -static void chacha20_crypt_s390(u32 *state, u8 *dst, const u8 *src, - unsigned int nbytes, const u32 *key, - u32 *counter) -{ - DECLARE_KERNEL_FPU_ONSTACK32(vxstate); - - kernel_fpu_begin(&vxstate, KERNEL_VXR); - chacha20_vx(dst, src, nbytes, key, counter); - kernel_fpu_end(&vxstate, KERNEL_VXR); - - *counter += round_up(nbytes, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE; -} - -static int chacha20_s390(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 state[CHACHA_STATE_WORDS] __aligned(16); - struct skcipher_walk walk; - unsigned int nbytes; - int rc; - - rc = skcipher_walk_virt(&walk, req, false); - chacha_init(state, ctx->key, req->iv); - - while (walk.nbytes > 0) { - nbytes = walk.nbytes; - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - if (nbytes <= CHACHA_BLOCK_SIZE) { - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); - } else { - chacha20_crypt_s390(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - &state[4], &state[12]); - } - rc = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - return rc; -} - -void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -{ - /* TODO: implement hchacha_block_arch() in assembly */ - hchacha_block_generic(state, stream, nrounds); -} -EXPORT_SYMBOL(hchacha_block_arch); - -void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds) -{ - /* s390 chacha20 implementation has 20 rounds hard-coded, - * it cannot handle a block of data or less, but otherwise - * it can handle data of arbitrary size - */ - if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx()) - chacha_crypt_generic(state, dst, src, bytes, nrounds); - else - chacha20_crypt_s390(state, dst, src, bytes, - &state[4], &state[12]); -} -EXPORT_SYMBOL(chacha_crypt_arch); - -static struct skcipher_alg chacha_algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-s390", - .base.cra_priority = 900, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha20_s390, - .decrypt = chacha20_s390, - } -}; - -static int __init chacha_mod_init(void) -{ - return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ? - crypto_register_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs)) : 0; -} - -static void __exit chacha_mod_fini(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) - crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs)); -} - -module_cpu_feature_match(S390_CPU_FEATURE_VXRS, chacha_mod_init); -module_exit(chacha_mod_fini); - -MODULE_DESCRIPTION("ChaCha20 stream cipher"); -MODULE_LICENSE("GPL v2"); - -MODULE_ALIAS_CRYPTO("chacha20"); diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c index 0800a2a5799f..dcbcee37cb63 100644 --- a/arch/s390/crypto/ghash_s390.c +++ b/arch/s390/crypto/ghash_s390.c @@ -8,29 +8,28 @@ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> */ +#include <asm/cpacf.h> +#include <crypto/ghash.h> #include <crypto/internal/hash.h> -#include <linux/module.h> #include <linux/cpufeature.h> -#include <asm/cpacf.h> - -#define GHASH_BLOCK_SIZE 16 -#define GHASH_DIGEST_SIZE 16 +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> -struct ghash_ctx { +struct s390_ghash_ctx { u8 key[GHASH_BLOCK_SIZE]; }; -struct ghash_desc_ctx { +struct s390_ghash_desc_ctx { u8 icv[GHASH_BLOCK_SIZE]; u8 key[GHASH_BLOCK_SIZE]; - u8 buffer[GHASH_BLOCK_SIZE]; - u32 bytes; }; static int ghash_init(struct shash_desc *desc) { - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + struct s390_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc); memset(dctx, 0, sizeof(*dctx)); memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE); @@ -41,7 +40,7 @@ static int ghash_init(struct shash_desc *desc) static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { - struct ghash_ctx *ctx = crypto_shash_ctx(tfm); + struct s390_ghash_ctx *ctx = crypto_shash_ctx(tfm); if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; @@ -54,80 +53,71 @@ static int ghash_setkey(struct crypto_shash *tfm, static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc); unsigned int n; - u8 *buf = dctx->buffer; - - if (dctx->bytes) { - u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes); - n = min(srclen, dctx->bytes); - dctx->bytes -= n; - srclen -= n; - - memcpy(pos, src, n); - src += n; + n = srclen & ~(GHASH_BLOCK_SIZE - 1); + cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n); + return srclen - n; +} - if (!dctx->bytes) { - cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, - GHASH_BLOCK_SIZE); - } - } +static void ghash_flush(struct s390_ghash_desc_ctx *dctx, const u8 *src, + unsigned int len) +{ + if (len) { + u8 buf[GHASH_BLOCK_SIZE] = {}; - n = srclen & ~(GHASH_BLOCK_SIZE - 1); - if (n) { - cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n); - src += n; - srclen -= n; + memcpy(buf, src, len); + cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE); + memzero_explicit(buf, sizeof(buf)); } +} - if (srclen) { - dctx->bytes = GHASH_BLOCK_SIZE - srclen; - memcpy(buf, src, srclen); - } +static int ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) +{ + struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + ghash_flush(dctx, src, len); + memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE); return 0; } -static int ghash_flush(struct ghash_desc_ctx *dctx) +static int ghash_export(struct shash_desc *desc, void *out) { - u8 *buf = dctx->buffer; - - if (dctx->bytes) { - u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes); - - memset(pos, 0, dctx->bytes); - cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE); - dctx->bytes = 0; - } + struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + memcpy(out, dctx->icv, GHASH_DIGEST_SIZE); return 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_import(struct shash_desc *desc, const void *in) { - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - int ret; + struct s390_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + struct s390_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - ret = ghash_flush(dctx); - if (!ret) - memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE); - return ret; + memcpy(dctx->icv, in, GHASH_DIGEST_SIZE); + memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE); + return 0; } static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, - .final = ghash_final, + .finup = ghash_finup, .setkey = ghash_setkey, - .descsize = sizeof(struct ghash_desc_ctx), + .export = ghash_export, + .import = ghash_import, + .statesize = sizeof(struct ghash_desc_ctx), + .descsize = sizeof(struct s390_ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-s390", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = GHASH_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ghash_ctx), + .cra_ctxsize = sizeof(struct s390_ghash_ctx), .cra_module = THIS_MODULE, }, }; diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c index bba9a818dfdc..93a1098d9f8d 100644 --- a/arch/s390/crypto/hmac_s390.c +++ b/arch/s390/crypto/hmac_s390.c @@ -9,10 +9,14 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <asm/cpacf.h> -#include <crypto/sha2.h> #include <crypto/internal/hash.h> +#include <crypto/hmac.h> +#include <crypto/sha2.h> #include <linux/cpufeature.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> /* * KMAC param block layout for sha2 function codes: @@ -71,32 +75,31 @@ union s390_kmac_gr0 { struct s390_kmac_sha2_ctx { u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE]; union s390_kmac_gr0 gr0; - u8 buf[MAX_BLOCK_SIZE]; - unsigned int buflen; + u64 buflen[2]; }; /* * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize */ -static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen, - unsigned int blocksize) +static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo, + u64 buflen_hi, unsigned int blocksize) { u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize); switch (blocksize) { case SHA256_BLOCK_SIZE: - *(u64 *)imbl = (u64)buflen * BITS_PER_BYTE; + *(u64 *)imbl = buflen_lo * BITS_PER_BYTE; break; case SHA512_BLOCK_SIZE: - *(u128 *)imbl = (u128)buflen * BITS_PER_BYTE; + *(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3; break; default: break; } } -static int hash_key(const u8 *in, unsigned int inlen, - u8 *digest, unsigned int digestsize) +static int hash_data(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize, bool final) { unsigned long func; union { @@ -123,19 +126,23 @@ static int hash_key(const u8 *in, unsigned int inlen, switch (digestsize) { case SHA224_DIGEST_SIZE: - func = CPACF_KLMD_SHA_256; + func = final ? CPACF_KLMD_SHA_256 : CPACF_KIMD_SHA_256; PARAM_INIT(256, 224, inlen * 8); + if (!final) + digestsize = SHA256_DIGEST_SIZE; break; case SHA256_DIGEST_SIZE: - func = CPACF_KLMD_SHA_256; + func = final ? CPACF_KLMD_SHA_256 : CPACF_KIMD_SHA_256; PARAM_INIT(256, 256, inlen * 8); break; case SHA384_DIGEST_SIZE: - func = CPACF_KLMD_SHA_512; + func = final ? CPACF_KLMD_SHA_512 : CPACF_KIMD_SHA_512; PARAM_INIT(512, 384, inlen * 8); + if (!final) + digestsize = SHA512_DIGEST_SIZE; break; case SHA512_DIGEST_SIZE: - func = CPACF_KLMD_SHA_512; + func = final ? CPACF_KLMD_SHA_512 : CPACF_KIMD_SHA_512; PARAM_INIT(512, 512, inlen * 8); break; default: @@ -151,6 +158,12 @@ static int hash_key(const u8 *in, unsigned int inlen, return 0; } +static int hash_key(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize) +{ + return hash_data(in, inlen, digest, digestsize, true); +} + static int s390_hmac_sha2_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -176,7 +189,8 @@ static int s390_hmac_sha2_init(struct shash_desc *desc) memcpy(ctx->param + SHA2_KEY_OFFSET(bs), tfm_ctx->key, bs); - ctx->buflen = 0; + ctx->buflen[0] = 0; + ctx->buflen[1] = 0; ctx->gr0.reg = 0; switch (crypto_shash_digestsize(desc->tfm)) { case SHA224_DIGEST_SIZE: @@ -203,48 +217,31 @@ static int s390_hmac_sha2_update(struct shash_desc *desc, { struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); unsigned int bs = crypto_shash_blocksize(desc->tfm); - unsigned int offset, n; - - /* check current buffer */ - offset = ctx->buflen % bs; - ctx->buflen += len; - if (offset + len < bs) - goto store; - - /* process one stored block */ - if (offset) { - n = bs - offset; - memcpy(ctx->buf + offset, data, n); - ctx->gr0.iimp = 1; - _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); - data += n; - len -= n; - offset = 0; - } - /* process as many blocks as possible */ - if (len >= bs) { - n = (len / bs) * bs; - ctx->gr0.iimp = 1; - _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n); - data += n; - len -= n; - } -store: - /* store incomplete block in buffer */ - if (len) - memcpy(ctx->buf + offset, data, len); + unsigned int n = round_down(len, bs); - return 0; + ctx->buflen[0] += n; + if (ctx->buflen[0] < n) + ctx->buflen[1]++; + + /* process as many blocks as possible */ + ctx->gr0.iimp = 1; + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n); + return len - n; } -static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out) +static int s390_hmac_sha2_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); unsigned int bs = crypto_shash_blocksize(desc->tfm); + ctx->buflen[0] += len; + if (ctx->buflen[0] < len) + ctx->buflen[1]++; + ctx->gr0.iimp = 0; - kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs); - _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs); + kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs); + _cpacf_kmac(&ctx->gr0.reg, ctx->param, src, len); memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm)); return 0; @@ -262,7 +259,7 @@ static int s390_hmac_sha2_digest(struct shash_desc *desc, return rc; ctx->gr0.iimp = 0; - kmac_sha2_set_imbl(ctx->param, len, + kmac_sha2_set_imbl(ctx->param, len, 0, crypto_shash_blocksize(desc->tfm)); _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len); memcpy(out, ctx->param, ds); @@ -270,22 +267,89 @@ static int s390_hmac_sha2_digest(struct shash_desc *desc, return 0; } -#define S390_HMAC_SHA2_ALG(x) { \ +static int s390_hmac_export_zero(struct shash_desc *desc, void *out) +{ + struct crypto_shash *tfm = desc->tfm; + u8 ipad[SHA512_BLOCK_SIZE]; + struct s390_hmac_ctx *ctx; + unsigned int bs; + int err, i; + + ctx = crypto_shash_ctx(tfm); + bs = crypto_shash_blocksize(tfm); + for (i = 0; i < bs; i++) + ipad[i] = ctx->key[i] ^ HMAC_IPAD_VALUE; + + err = hash_data(ipad, bs, out, crypto_shash_digestsize(tfm), false); + memzero_explicit(ipad, sizeof(ipad)); + return err; +} + +static int s390_hmac_export(struct shash_desc *desc, void *out) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int ds = bs / 2; + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int err = 0; + + if (!ctx->gr0.ikp) + err = s390_hmac_export_zero(desc, out); + else + memcpy(p.u8, ctx->param, ds); + p.u8 += ds; + put_unaligned(ctx->buflen[0], p.u64++); + if (ds == SHA512_DIGEST_SIZE) + put_unaligned(ctx->buflen[1], p.u64); + return err; +} + +static int s390_hmac_import(struct shash_desc *desc, const void *in) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int ds = bs / 2; + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int err; + + err = s390_hmac_sha2_init(desc); + memcpy(ctx->param, p.u8, ds); + p.u8 += ds; + ctx->buflen[0] = get_unaligned(p.u64++); + if (ds == SHA512_DIGEST_SIZE) + ctx->buflen[1] = get_unaligned(p.u64); + if (ctx->buflen[0] | ctx->buflen[1]) + ctx->gr0.ikp = 1; + return err; +} + +#define S390_HMAC_SHA2_ALG(x, ss) { \ .fc = CPACF_KMAC_HMAC_SHA_##x, \ .alg = { \ .init = s390_hmac_sha2_init, \ .update = s390_hmac_sha2_update, \ - .final = s390_hmac_sha2_final, \ + .finup = s390_hmac_sha2_finup, \ .digest = s390_hmac_sha2_digest, \ .setkey = s390_hmac_sha2_setkey, \ + .export = s390_hmac_export, \ + .import = s390_hmac_import, \ .descsize = sizeof(struct s390_kmac_sha2_ctx), \ .halg = { \ + .statesize = ss, \ .digestsize = SHA##x##_DIGEST_SIZE, \ .base = { \ .cra_name = "hmac(sha" #x ")", \ .cra_driver_name = "hmac_s390_sha" #x, \ .cra_blocksize = SHA##x##_BLOCK_SIZE, \ .cra_priority = 400, \ + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | \ + CRYPTO_AHASH_ALG_FINUP_MAX, \ .cra_ctxsize = sizeof(struct s390_hmac_ctx), \ .cra_module = THIS_MODULE, \ }, \ @@ -298,10 +362,10 @@ static struct s390_hmac_alg { unsigned int fc; struct shash_alg alg; } s390_hmac_algs[] = { - S390_HMAC_SHA2_ALG(224), - S390_HMAC_SHA2_ALG(256), - S390_HMAC_SHA2_ALG(384), - S390_HMAC_SHA2_ALG(512), + S390_HMAC_SHA2_ALG(224, sizeof(struct crypto_sha256_state)), + S390_HMAC_SHA2_ALG(256, sizeof(struct crypto_sha256_state)), + S390_HMAC_SHA2_ALG(384, SHA512_STATE_SIZE), + S390_HMAC_SHA2_ALG(512, SHA512_STATE_SIZE), }; static __always_inline void _s390_hmac_algs_unregister(void) diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 511093713a6f..8a340c16acb4 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -5,7 +5,7 @@ * s390 implementation of the AES Cipher Algorithm with protected keys. * * s390 Version: - * Copyright IBM Corp. 2017, 2023 + * Copyright IBM Corp. 2017, 2025 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Harald Freudenberger <freude@de.ibm.com> */ @@ -13,16 +13,18 @@ #define KMSG_COMPONENT "paes_s390" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include <crypto/aes.h> -#include <crypto/algapi.h> -#include <linux/bug.h> -#include <linux/err.h> -#include <linux/module.h> +#include <linux/atomic.h> #include <linux/cpufeature.h> +#include <linux/delay.h> +#include <linux/err.h> #include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> -#include <linux/delay.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/engine.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> #include <asm/cpacf.h> @@ -44,23 +46,61 @@ static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; +static struct crypto_engine *paes_crypto_engine; +#define MAX_QLEN 10 + +/* + * protected key specific stuff + */ + struct paes_protkey { u32 type; u32 len; u8 protkey[PXTS_256_PROTKEY_SIZE]; }; -struct key_blob { - /* - * Small keys will be stored in the keybuf. Larger keys are - * stored in extra allocated memory. In both cases does - * key point to the memory where the key is stored. - * The code distinguishes by checking keylen against - * sizeof(keybuf). See the two following helper functions. - */ - u8 *key; - u8 keybuf[128]; +#define PK_STATE_NO_KEY 0 +#define PK_STATE_CONVERT_IN_PROGRESS 1 +#define PK_STATE_VALID 2 + +struct s390_paes_ctx { + /* source key material used to derive a protected key from */ + u8 keybuf[PAES_MAX_KEYSIZE]; + unsigned int keylen; + + /* cpacf function code to use with this protected key type */ + long fc; + + /* nr of requests enqueued via crypto engine which use this tfm ctx */ + atomic_t via_engine_ctr; + + /* spinlock to atomic read/update all the following fields */ + spinlock_t pk_lock; + + /* see PK_STATE* defines above, < 0 holds convert failure rc */ + int pk_state; + /* if state is valid, pk holds the protected key */ + struct paes_protkey pk; +}; + +struct s390_pxts_ctx { + /* source key material used to derive a protected key from */ + u8 keybuf[2 * PAES_MAX_KEYSIZE]; unsigned int keylen; + + /* cpacf function code to use with this protected key type */ + long fc; + + /* nr of requests enqueued via crypto engine which use this tfm ctx */ + atomic_t via_engine_ctr; + + /* spinlock to atomic read/update all the following fields */ + spinlock_t pk_lock; + + /* see PK_STATE* defines above, < 0 holds convert failure rc */ + int pk_state; + /* if state is valid, pk[] hold(s) the protected key(s) */ + struct paes_protkey pk[2]; }; /* @@ -89,214 +129,370 @@ static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest) return sizeof(*token) + cklen; } -static inline int _key_to_kb(struct key_blob *kb, - const u8 *key, - unsigned int keylen) +/* + * paes_ctx_setkey() - Set key value into context, maybe construct + * a clear key token digestible by pkey from a clear key value. + */ +static inline int paes_ctx_setkey(struct s390_paes_ctx *ctx, + const u8 *key, unsigned int keylen) { + if (keylen > sizeof(ctx->keybuf)) + return -EINVAL; + switch (keylen) { case 16: case 24: case 32: /* clear key value, prepare pkey clear key token in keybuf */ - memset(kb->keybuf, 0, sizeof(kb->keybuf)); - kb->keylen = make_clrkey_token(key, keylen, kb->keybuf); - kb->key = kb->keybuf; + memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); + ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf); break; default: /* other key material, let pkey handle this */ - if (keylen <= sizeof(kb->keybuf)) - kb->key = kb->keybuf; - else { - kb->key = kmalloc(keylen, GFP_KERNEL); - if (!kb->key) - return -ENOMEM; - } - memcpy(kb->key, key, keylen); - kb->keylen = keylen; + memcpy(ctx->keybuf, key, keylen); + ctx->keylen = keylen; break; } return 0; } -static inline int _xts_key_to_kb(struct key_blob *kb, - const u8 *key, - unsigned int keylen) +/* + * pxts_ctx_setkey() - Set key value into context, maybe construct + * a clear key token digestible by pkey from a clear key value. + */ +static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx, + const u8 *key, unsigned int keylen) { size_t cklen = keylen / 2; - memset(kb->keybuf, 0, sizeof(kb->keybuf)); + if (keylen > sizeof(ctx->keybuf)) + return -EINVAL; switch (keylen) { case 32: case 64: /* clear key value, prepare pkey clear key tokens in keybuf */ - kb->key = kb->keybuf; - kb->keylen = make_clrkey_token(key, cklen, kb->key); - kb->keylen += make_clrkey_token(key + cklen, cklen, - kb->key + kb->keylen); + memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); + ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf); + ctx->keylen += make_clrkey_token(key + cklen, cklen, + ctx->keybuf + ctx->keylen); break; default: /* other key material, let pkey handle this */ - if (keylen <= sizeof(kb->keybuf)) { - kb->key = kb->keybuf; - } else { - kb->key = kmalloc(keylen, GFP_KERNEL); - if (!kb->key) - return -ENOMEM; - } - memcpy(kb->key, key, keylen); - kb->keylen = keylen; + memcpy(ctx->keybuf, key, keylen); + ctx->keylen = keylen; break; } return 0; } -static inline void _free_kb_keybuf(struct key_blob *kb) +/* + * Convert the raw key material into a protected key via PKEY api. + * This function may sleep - don't call in non-sleeping context. + */ +static inline int convert_key(const u8 *key, unsigned int keylen, + struct paes_protkey *pk) { - if (kb->key && kb->key != kb->keybuf - && kb->keylen > sizeof(kb->keybuf)) { - kfree_sensitive(kb->key); - kb->key = NULL; + int rc, i; + + pk->len = sizeof(pk->protkey); + + /* + * In case of a busy card retry with increasing delay + * of 200, 400, 800 and 1600 ms - in total 3 s. + */ + for (rc = -EIO, i = 0; rc && i < 5; i++) { + if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) { + rc = -EINTR; + goto out; + } + rc = pkey_key2protkey(key, keylen, + pk->protkey, &pk->len, &pk->type, + PKEY_XFLAG_NOMEMALLOC); } - memzero_explicit(kb->keybuf, sizeof(kb->keybuf)); + +out: + pr_debug("rc=%d\n", rc); + return rc; } -struct s390_paes_ctx { - struct key_blob kb; +/* + * (Re-)Convert the raw key material from the ctx into a protected key + * via convert_key() function. Update the pk_state, pk_type, pk_len + * and the protected key in the tfm context. + * Please note this function may be invoked concurrently with the very + * same tfm context. The pk_lock spinlock in the context ensures an + * atomic update of the pk and the pk state but does not guarantee any + * order of update. So a fresh converted valid protected key may get + * updated with an 'old' expired key value. As the cpacf instructions + * detect this, refuse to operate with an invalid key and the calling + * code triggers a (re-)conversion this does no harm. This may lead to + * unnecessary additional conversion but never to invalid data on en- + * or decrypt operations. + */ +static int paes_convert_key(struct s390_paes_ctx *ctx) +{ struct paes_protkey pk; - spinlock_t pk_lock; - unsigned long fc; -}; + int rc; -struct s390_pxts_ctx { - struct key_blob kb; - struct paes_protkey pk[2]; - spinlock_t pk_lock; - unsigned long fc; -}; + spin_lock_bh(&ctx->pk_lock); + ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; + spin_unlock_bh(&ctx->pk_lock); -static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen, - struct paes_protkey *pk) -{ - int i, rc = -EIO; + rc = convert_key(ctx->keybuf, ctx->keylen, &pk); - /* try three times in case of busy card */ - for (i = 0; rc && i < 3; i++) { - if (rc == -EBUSY && in_task()) { - if (msleep_interruptible(1000)) - return -EINTR; - } - rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len, - &pk->type); + /* update context */ + spin_lock_bh(&ctx->pk_lock); + if (rc) { + ctx->pk_state = rc; + } else { + ctx->pk_state = PK_STATE_VALID; + ctx->pk = pk; } + spin_unlock_bh(&ctx->pk_lock); + memzero_explicit(&pk, sizeof(pk)); + pr_debug("rc=%d\n", rc); return rc; } -static inline int __paes_convert_key(struct s390_paes_ctx *ctx) +/* + * (Re-)Convert the raw xts key material from the ctx into a + * protected key via convert_key() function. Update the pk_state, + * pk_type, pk_len and the protected key in the tfm context. + * See also comments on function paes_convert_key. + */ +static int pxts_convert_key(struct s390_pxts_ctx *ctx) { - struct paes_protkey pk; + struct paes_protkey pk0, pk1; + size_t split_keylen; int rc; - pk.len = sizeof(pk.protkey); - rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk); + spin_lock_bh(&ctx->pk_lock); + ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; + spin_unlock_bh(&ctx->pk_lock); + + rc = convert_key(ctx->keybuf, ctx->keylen, &pk0); if (rc) - return rc; + goto out; + + switch (pk0.type) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_256: + /* second keytoken required */ + if (ctx->keylen % 2) { + rc = -EINVAL; + goto out; + } + split_keylen = ctx->keylen / 2; + rc = convert_key(ctx->keybuf + split_keylen, + split_keylen, &pk1); + if (rc) + goto out; + if (pk0.type != pk1.type) { + rc = -EINVAL; + goto out; + } + break; + case PKEY_KEYTYPE_AES_XTS_128: + case PKEY_KEYTYPE_AES_XTS_256: + /* single key */ + pk1.type = 0; + break; + default: + /* unsupported protected keytype */ + rc = -EINVAL; + goto out; + } +out: + /* update context */ spin_lock_bh(&ctx->pk_lock); - memcpy(&ctx->pk, &pk, sizeof(pk)); + if (rc) { + ctx->pk_state = rc; + } else { + ctx->pk_state = PK_STATE_VALID; + ctx->pk[0] = pk0; + ctx->pk[1] = pk1; + } spin_unlock_bh(&ctx->pk_lock); - return 0; + memzero_explicit(&pk0, sizeof(pk0)); + memzero_explicit(&pk1, sizeof(pk1)); + pr_debug("rc=%d\n", rc); + return rc; } -static int ecb_paes_init(struct crypto_skcipher *tfm) -{ - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); +/* + * PAES ECB implementation + */ - ctx->kb.key = NULL; - spin_lock_init(&ctx->pk_lock); +struct ecb_param { + u8 key[PAES_256_PROTKEY_SIZE]; +} __packed; - return 0; -} +struct s390_pecb_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + struct ecb_param param; +}; -static void ecb_paes_exit(struct crypto_skcipher *tfm) +static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) { struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - - _free_kb_keybuf(&ctx->kb); -} - -static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx) -{ - unsigned long fc; + long fc; int rc; - rc = __paes_convert_key(ctx); + /* set raw key into context */ + rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) - return rc; + goto out; - /* Pick the correct function code based on the protected key type */ - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; + /* convert key into protected key */ + rc = paes_convert_key(ctx); + if (rc) + goto out; - /* Check if the function code is available */ + /* Pick the correct function code based on the protected key type */ + switch (ctx->pk.type) { + case PKEY_KEYTYPE_AES_128: + fc = CPACF_KM_PAES_128; + break; + case PKEY_KEYTYPE_AES_192: + fc = CPACF_KM_PAES_192; + break; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KM_PAES_256; + break; + default: + fc = 0; + break; + } ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; - return ctx->fc ? 0 : -EINVAL; + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx, + struct s390_pecb_req_ctx *req_ctx, + bool maysleep) { - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - int rc; - - _free_kb_keybuf(&ctx->kb); - rc = _key_to_kb(&ctx->kb, in_key, key_len); + struct ecb_param *param = &req_ctx->param; + struct skcipher_walk *walk = &req_ctx->walk; + unsigned int nbytes, n, k; + int pk_state, rc = 0; + + if (!req_ctx->param_init_done) { + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + req_ctx->param_init_done = true; + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + } if (rc) - return rc; + goto out; - return __ecb_paes_set_key(ctx); + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { + /* only use complete blocks */ + n = nbytes & ~(AES_BLOCK_SIZE - 1); + k = cpacf_km(ctx->fc | req_ctx->modifier, param, + walk->dst.virt.addr, walk->src.virt.addr, n); + if (k) + rc = skcipher_walk_done(walk, nbytes - k); + if (k < n) { + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) + goto out; + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + spin_unlock_bh(&ctx->pk_lock); + } + } + +out: + pr_debug("rc=%d\n", rc); + return rc; } static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) { + struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - } param; - struct skcipher_walk walk; - unsigned int nbytes, n, k; + struct skcipher_walk *walk = &req_ctx->walk; int rc; - rc = skcipher_walk_virt(&walk, req, false); + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ + + rc = skcipher_walk_virt(walk, req, false); if (rc) - return rc; + goto out; - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); + req_ctx->modifier = modifier; + req_ctx->param_init_done = false; - while ((nbytes = walk.nbytes) != 0) { - /* only use complete blocks */ - n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_km(ctx->fc | modifier, ¶m, - walk.dst.virt.addr, walk.src.virt.addr, n); - if (k) - rc = skcipher_walk_done(&walk, nbytes - k); - if (k < n) { - if (__paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - } + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = ecb_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); return rc; } @@ -310,112 +506,256 @@ static int ecb_paes_decrypt(struct skcipher_request *req) return ecb_paes_crypt(req, CPACF_DECRYPT); } -static struct skcipher_alg ecb_paes_alg = { - .base.cra_name = "ecb(paes)", - .base.cra_driver_name = "ecb-paes-s390", - .base.cra_priority = 401, /* combo: aes + ecb + 1 */ - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list), - .init = ecb_paes_init, - .exit = ecb_paes_exit, - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .setkey = ecb_paes_set_key, - .encrypt = ecb_paes_encrypt, - .decrypt = ecb_paes_decrypt, -}; - -static int cbc_paes_init(struct crypto_skcipher *tfm) +static int ecb_paes_init(struct crypto_skcipher *tfm) { struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->kb.key = NULL; + memset(ctx, 0, sizeof(*ctx)); spin_lock_init(&ctx->pk_lock); + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pecb_req_ctx)); + return 0; } -static void cbc_paes_exit(struct crypto_skcipher *tfm) +static void ecb_paes_exit(struct crypto_skcipher *tfm) { struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - _free_kb_keybuf(&ctx->kb); + memzero_explicit(ctx, sizeof(*ctx)); } -static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx) +static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq) { - unsigned long fc; + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; int rc; - rc = __paes_convert_key(ctx); - if (rc) - return rc; + /* walk has already been prepared */ + + rc = ecb_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); + } - /* Pick the correct function code based on the protected key type */ - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 : - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 : - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0; + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); + return rc; +} - /* Check if the function code is available */ - ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; +static struct skcipher_engine_alg ecb_paes_alg = { + .base = { + .base.cra_name = "ecb(paes)", + .base.cra_driver_name = "ecb-paes-s390", + .base.cra_priority = 401, /* combo: aes + ecb + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.base.cra_list), + .init = ecb_paes_init, + .exit = ecb_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .setkey = ecb_paes_setkey, + .encrypt = ecb_paes_encrypt, + .decrypt = ecb_paes_decrypt, + }, + .op = { + .do_one_request = ecb_paes_do_one_request, + }, +}; - return ctx->fc ? 0 : -EINVAL; -} +/* + * PAES CBC implementation + */ + +struct cbc_param { + u8 iv[AES_BLOCK_SIZE]; + u8 key[PAES_256_PROTKEY_SIZE]; +} __packed; + +struct s390_pcbc_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + struct cbc_param param; +}; -static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) { struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + long fc; int rc; - _free_kb_keybuf(&ctx->kb); - rc = _key_to_kb(&ctx->kb, in_key, key_len); + /* set raw key into context */ + rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) - return rc; + goto out; - return __cbc_paes_set_key(ctx); + /* convert raw key into protected key */ + rc = paes_convert_key(ctx); + if (rc) + goto out; + + /* Pick the correct function code based on the protected key type */ + switch (ctx->pk.type) { + case PKEY_KEYTYPE_AES_128: + fc = CPACF_KMC_PAES_128; + break; + case PKEY_KEYTYPE_AES_192: + fc = CPACF_KMC_PAES_192; + break; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KMC_PAES_256; + break; + default: + fc = 0; + break; + } + ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; + + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) +static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx, + struct s390_pcbc_req_ctx *req_ctx, + bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct { - u8 iv[AES_BLOCK_SIZE]; - u8 key[PAES_256_PROTKEY_SIZE]; - } param; - struct skcipher_walk walk; + struct cbc_param *param = &req_ctx->param; + struct skcipher_walk *walk = &req_ctx->walk; unsigned int nbytes, n, k; - int rc; - - rc = skcipher_walk_virt(&walk, req, false); + int pk_state, rc = 0; + + if (!req_ctx->param_init_done) { + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + req_ctx->param_init_done = true; + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + } if (rc) - return rc; + goto out; - memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); + memcpy(param->iv, walk->iv, AES_BLOCK_SIZE); - while ((nbytes = walk.nbytes) != 0) { + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_kmc(ctx->fc | modifier, ¶m, - walk.dst.virt.addr, walk.src.virt.addr, n); + k = cpacf_kmc(ctx->fc | req_ctx->modifier, param, + walk->dst.virt.addr, walk->src.virt.addr, n); if (k) { - memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); - rc = skcipher_walk_done(&walk, nbytes - k); + memcpy(walk->iv, param->iv, AES_BLOCK_SIZE); + rc = skcipher_walk_done(walk, nbytes - k); } if (k < n) { - if (__paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) + goto out; spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); spin_unlock_bh(&ctx->pk_lock); } } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) +{ + struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; + + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ + + rc = skcipher_walk_virt(walk, req, false); + if (rc) + goto out; + + req_ctx->modifier = modifier; + req_ctx->param_init_done = false; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = cbc_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); return rc; } @@ -429,496 +769,882 @@ static int cbc_paes_decrypt(struct skcipher_request *req) return cbc_paes_crypt(req, CPACF_DECRYPT); } -static struct skcipher_alg cbc_paes_alg = { - .base.cra_name = "cbc(paes)", - .base.cra_driver_name = "cbc-paes-s390", - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list), - .init = cbc_paes_init, - .exit = cbc_paes_exit, - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = cbc_paes_set_key, - .encrypt = cbc_paes_encrypt, - .decrypt = cbc_paes_decrypt, -}; - -static int xts_paes_init(struct crypto_skcipher *tfm) +static int cbc_paes_init(struct crypto_skcipher *tfm) { - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->kb.key = NULL; + memset(ctx, 0, sizeof(*ctx)); spin_lock_init(&ctx->pk_lock); + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pcbc_req_ctx)); + return 0; } -static void xts_paes_exit(struct crypto_skcipher *tfm) +static void cbc_paes_exit(struct crypto_skcipher *tfm) { - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - _free_kb_keybuf(&ctx->kb); + memzero_explicit(ctx, sizeof(*ctx)); } -static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) +static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq) { - struct paes_protkey pk0, pk1; - size_t split_keylen; + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; int rc; - pk0.len = sizeof(pk0.protkey); - pk1.len = sizeof(pk1.protkey); - - rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0); - if (rc) - return rc; + /* walk has already been prepared */ + + rc = cbc_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); + } - switch (pk0.type) { - case PKEY_KEYTYPE_AES_128: - case PKEY_KEYTYPE_AES_256: - /* second keytoken required */ - if (ctx->kb.keylen % 2) - return -EINVAL; - split_keylen = ctx->kb.keylen / 2; + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); + return rc; +} - rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen, - split_keylen, &pk1); - if (rc) - return rc; +static struct skcipher_engine_alg cbc_paes_alg = { + .base = { + .base.cra_name = "cbc(paes)", + .base.cra_driver_name = "cbc-paes-s390", + .base.cra_priority = 402, /* cbc-paes-s390 + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.base.cra_list), + .init = cbc_paes_init, + .exit = cbc_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_paes_setkey, + .encrypt = cbc_paes_encrypt, + .decrypt = cbc_paes_decrypt, + }, + .op = { + .do_one_request = cbc_paes_do_one_request, + }, +}; - if (pk0.type != pk1.type) - return -EINVAL; - break; - case PKEY_KEYTYPE_AES_XTS_128: - case PKEY_KEYTYPE_AES_XTS_256: - /* single key */ - pk1.type = 0; - break; - default: - /* unsupported protected keytype */ - return -EINVAL; - } +/* + * PAES CTR implementation + */ - spin_lock_bh(&ctx->pk_lock); - ctx->pk[0] = pk0; - ctx->pk[1] = pk1; - spin_unlock_bh(&ctx->pk_lock); +struct ctr_param { + u8 key[PAES_256_PROTKEY_SIZE]; +} __packed; - return 0; -} +struct s390_pctr_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + struct ctr_param param; +}; -static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx) +static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) { - unsigned long fc; + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + long fc; int rc; - rc = __xts_paes_convert_key(ctx); + /* set raw key into context */ + rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) - return rc; + goto out; + + /* convert raw key into protected key */ + rc = paes_convert_key(ctx); + if (rc) + goto out; /* Pick the correct function code based on the protected key type */ - switch (ctx->pk[0].type) { + switch (ctx->pk.type) { case PKEY_KEYTYPE_AES_128: - fc = CPACF_KM_PXTS_128; - break; - case PKEY_KEYTYPE_AES_256: - fc = CPACF_KM_PXTS_256; + fc = CPACF_KMCTR_PAES_128; break; - case PKEY_KEYTYPE_AES_XTS_128: - fc = CPACF_KM_PXTS_128_FULL; + case PKEY_KEYTYPE_AES_192: + fc = CPACF_KMCTR_PAES_192; break; - case PKEY_KEYTYPE_AES_XTS_256: - fc = CPACF_KM_PXTS_256_FULL; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KMCTR_PAES_256; break; default: fc = 0; break; } + ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; - /* Check if the function code is available */ - ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + memcpy(ctrptr, iv, AES_BLOCK_SIZE); + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { + memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); + crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); + ctrptr += AES_BLOCK_SIZE; + } + return n; +} + +static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx, + struct s390_pctr_req_ctx *req_ctx, + bool maysleep) +{ + struct ctr_param *param = &req_ctx->param; + struct skcipher_walk *walk = &req_ctx->walk; + u8 buf[AES_BLOCK_SIZE], *ctrptr; + unsigned int nbytes, n, k; + int pk_state, locked, rc = 0; + + if (!req_ctx->param_init_done) { + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + req_ctx->param_init_done = true; + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + } + if (rc) + goto out; + + locked = mutex_trylock(&ctrblk_lock); + + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + n = AES_BLOCK_SIZE; + if (nbytes >= 2 * AES_BLOCK_SIZE && locked) + n = __ctrblk_init(ctrblk, walk->iv, nbytes); + ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; + k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr, + walk->src.virt.addr, n, ctrptr); + if (k) { + if (ctrptr == ctrblk) + memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(walk->iv, AES_BLOCK_SIZE); + rc = skcipher_walk_done(walk, nbytes - k); + } + if (k < n) { + if (!maysleep) { + if (locked) + mutex_unlock(&ctrblk_lock); + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) { + if (locked) + mutex_unlock(&ctrblk_lock); + goto out; + } + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + spin_unlock_bh(&ctx->pk_lock); + } + } + if (locked) + mutex_unlock(&ctrblk_lock); + + /* final block may be < AES_BLOCK_SIZE, copy only nbytes */ + if (nbytes) { + memset(buf, 0, AES_BLOCK_SIZE); + memcpy(buf, walk->src.virt.addr, nbytes); + while (1) { + if (cpacf_kmctr(ctx->fc, param, buf, + buf, AES_BLOCK_SIZE, + walk->iv) == AES_BLOCK_SIZE) + break; + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) + goto out; + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + spin_unlock_bh(&ctx->pk_lock); + } + memcpy(walk->dst.virt.addr, buf, nbytes); + crypto_inc(walk->iv, AES_BLOCK_SIZE); + rc = skcipher_walk_done(walk, 0); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ctr_paes_crypt(struct skcipher_request *req) +{ + struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; + + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ + + rc = skcipher_walk_virt(walk, req, false); + if (rc) + goto out; + + req_ctx->param_init_done = false; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = ctr_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ctr_paes_init(struct crypto_skcipher *tfm) +{ + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + + memset(ctx, 0, sizeof(*ctx)); + spin_lock_init(&ctx->pk_lock); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pctr_req_ctx)); + + return 0; +} + +static void ctr_paes_exit(struct crypto_skcipher *tfm) +{ + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + + memzero_explicit(ctx, sizeof(*ctx)); +} + +static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; - return ctx->fc ? 0 : -EINVAL; + /* walk has already been prepared */ + + rc = ctr_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); + } + + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); + return rc; } -static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int in_keylen) +static struct skcipher_engine_alg ctr_paes_alg = { + .base = { + .base.cra_name = "ctr(paes)", + .base.cra_driver_name = "ctr-paes-s390", + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.base.cra_list), + .init = ctr_paes_init, + .exit = ctr_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ctr_paes_setkey, + .encrypt = ctr_paes_crypt, + .decrypt = ctr_paes_crypt, + .chunksize = AES_BLOCK_SIZE, + }, + .op = { + .do_one_request = ctr_paes_do_one_request, + }, +}; + +/* + * PAES XTS implementation + */ + +struct xts_full_km_param { + u8 key[64]; + u8 tweak[16]; + u8 nap[16]; + u8 wkvp[32]; +} __packed; + +struct xts_km_param { + u8 key[PAES_256_PROTKEY_SIZE]; + u8 init[16]; +} __packed; + +struct xts_pcc_param { + u8 key[PAES_256_PROTKEY_SIZE]; + u8 tweak[16]; + u8 block[16]; + u8 bit[16]; + u8 xts[16]; +} __packed; + +struct s390_pxts_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + union { + struct xts_full_km_param full_km_param; + struct xts_km_param km_param; + } param; +}; + +static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int in_keylen) { struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); u8 ckey[2 * AES_MAX_KEY_SIZE]; unsigned int ckey_len; + long fc; int rc; if ((in_keylen == 32 || in_keylen == 64) && xts_verify_key(tfm, in_key, in_keylen)) return -EINVAL; - _free_kb_keybuf(&ctx->kb); - rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen); + /* set raw key into context */ + rc = pxts_ctx_setkey(ctx, in_key, in_keylen); if (rc) - return rc; + goto out; - rc = __xts_paes_set_key(ctx); + /* convert raw key(s) into protected key(s) */ + rc = pxts_convert_key(ctx); if (rc) - return rc; + goto out; /* - * It is not possible on a single protected key (e.g. full AES-XTS) to - * check, if k1 and k2 are the same. - */ - if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 || - ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256) - return 0; - /* * xts_verify_key verifies the key length is not odd and makes * sure that the two keys are not the same. This can be done - * on the two protected keys as well + * on the two protected keys as well - but not for full xts keys. */ - ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? - AES_KEYSIZE_128 : AES_KEYSIZE_256; - memcpy(ckey, ctx->pk[0].protkey, ckey_len); - memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); - return xts_verify_key(tfm, ckey, 2*ckey_len); + if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 || + ctx->pk[0].type == PKEY_KEYTYPE_AES_256) { + ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? + AES_KEYSIZE_128 : AES_KEYSIZE_256; + memcpy(ckey, ctx->pk[0].protkey, ckey_len); + memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); + rc = xts_verify_key(tfm, ckey, 2 * ckey_len); + memzero_explicit(ckey, sizeof(ckey)); + if (rc) + goto out; + } + + /* Pick the correct function code based on the protected key type */ + switch (ctx->pk[0].type) { + case PKEY_KEYTYPE_AES_128: + fc = CPACF_KM_PXTS_128; + break; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KM_PXTS_256; + break; + case PKEY_KEYTYPE_AES_XTS_128: + fc = CPACF_KM_PXTS_128_FULL; + break; + case PKEY_KEYTYPE_AES_XTS_256: + fc = CPACF_KM_PXTS_256_FULL; + break; + default: + fc = 0; + break; + } + ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int paes_xts_crypt_full(struct skcipher_request *req, - unsigned long modifier) +static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx, + struct s390_pxts_req_ctx *req_ctx, + bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct xts_full_km_param *param = &req_ctx->param.full_km_param; + struct skcipher_walk *walk = &req_ctx->walk; unsigned int keylen, offset, nbytes, n, k; - struct { - u8 key[64]; - u8 tweak[16]; - u8 nap[16]; - u8 wkvp[32]; - } fxts_param = { - .nap = {0}, - }; - struct skcipher_walk walk; - int rc; + int rc = 0; - rc = skcipher_walk_virt(&walk, req, false); - if (rc) - return rc; + /* + * The calling function xts_paes_do_crypt() ensures the + * protected key state is always PK_STATE_VALID when this + * function is invoked. + */ keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64; offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0; - spin_lock_bh(&ctx->pk_lock); - memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen); - memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen, - sizeof(fxts_param.wkvp)); - spin_unlock_bh(&ctx->pk_lock); - memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak)); - fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ + if (!req_ctx->param_init_done) { + memset(param, 0, sizeof(*param)); + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); + memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); + spin_unlock_bh(&ctx->pk_lock); + memcpy(param->tweak, walk->iv, sizeof(param->tweak)); + param->nap[0] = 0x01; /* initial alpha power (1, little-endian) */ + req_ctx->param_init_done = true; + } - while ((nbytes = walk.nbytes) != 0) { + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset, - walk.dst.virt.addr, walk.src.virt.addr, n); + k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, + walk->dst.virt.addr, walk->src.virt.addr, n); if (k) - rc = skcipher_walk_done(&walk, nbytes - k); + rc = skcipher_walk_done(walk, nbytes - k); if (k < n) { - if (__xts_paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = pxts_convert_key(ctx); + if (rc) + goto out; spin_lock_bh(&ctx->pk_lock); - memcpy(fxts_param.key + offset, ctx->pk[0].protkey, - keylen); - memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen, - sizeof(fxts_param.wkvp)); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); + memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); spin_unlock_bh(&ctx->pk_lock); } } +out: + pr_debug("rc=%d\n", rc); return rc; } -static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier) +static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx, + struct xts_km_param *param, + struct skcipher_walk *walk, + unsigned int keylen, + unsigned int offset, bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct xts_pcc_param pcc_param; + unsigned long cc = 1; + int rc = 0; + + while (cc) { + memset(&pcc_param, 0, sizeof(pcc_param)); + memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); + spin_lock_bh(&ctx->pk_lock); + memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); + spin_unlock_bh(&ctx->pk_lock); + cc = cpacf_pcc(ctx->fc, pcc_param.key + offset); + if (cc) { + if (!maysleep) { + rc = -EKEYEXPIRED; + break; + } + rc = pxts_convert_key(ctx); + if (rc) + break; + continue; + } + memcpy(param->init, pcc_param.xts, 16); + } + + memzero_explicit(pcc_param.key, sizeof(pcc_param.key)); + return rc; +} + +static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx, + struct s390_pxts_req_ctx *req_ctx, + bool maysleep) +{ + struct xts_km_param *param = &req_ctx->param.km_param; + struct skcipher_walk *walk = &req_ctx->walk; unsigned int keylen, offset, nbytes, n, k; - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - u8 tweak[16]; - u8 block[16]; - u8 bit[16]; - u8 xts[16]; - } pcc_param; - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - u8 init[16]; - } xts_param; - struct skcipher_walk walk; - int rc; + int rc = 0; - rc = skcipher_walk_virt(&walk, req, false); - if (rc) - return rc; + /* + * The calling function xts_paes_do_crypt() ensures the + * protected key state is always PK_STATE_VALID when this + * function is invoked. + */ keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; - memset(&pcc_param, 0, sizeof(pcc_param)); - memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); - spin_lock_bh(&ctx->pk_lock); - memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); - memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); - spin_unlock_bh(&ctx->pk_lock); - cpacf_pcc(ctx->fc, pcc_param.key + offset); - memcpy(xts_param.init, pcc_param.xts, 16); + if (!req_ctx->param_init_done) { + rc = __xts_2keys_prep_param(ctx, param, walk, + keylen, offset, maysleep); + if (rc) + goto out; + req_ctx->param_init_done = true; + } - while ((nbytes = walk.nbytes) != 0) { + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_km(ctx->fc | modifier, xts_param.key + offset, - walk.dst.virt.addr, walk.src.virt.addr, n); + k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, + walk->dst.virt.addr, walk->src.virt.addr, n); if (k) - rc = skcipher_walk_done(&walk, nbytes - k); + rc = skcipher_walk_done(walk, nbytes - k); if (k < n) { - if (__xts_paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = pxts_convert_key(ctx); + if (rc) + goto out; spin_lock_bh(&ctx->pk_lock); - memcpy(xts_param.key + offset, - ctx->pk[0].protkey, keylen); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); spin_unlock_bh(&ctx->pk_lock); } } +out: + pr_debug("rc=%d\n", rc); return rc; } -static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) +static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx, + struct s390_pxts_req_ctx *req_ctx, + bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + int pk_state, rc = 0; + + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + if (rc) + goto out; + /* Call the 'real' crypt function based on the xts prot key type. */ switch (ctx->fc) { case CPACF_KM_PXTS_128: case CPACF_KM_PXTS_256: - return paes_xts_crypt(req, modifier); + rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep); + break; case CPACF_KM_PXTS_128_FULL: case CPACF_KM_PXTS_256_FULL: - return paes_xts_crypt_full(req, modifier); + rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep); + break; default: - return -EINVAL; + rc = -EINVAL; } -} -static int xts_paes_encrypt(struct skcipher_request *req) -{ - return xts_paes_crypt(req, 0); +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int xts_paes_decrypt(struct skcipher_request *req) +static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) { - return xts_paes_crypt(req, CPACF_DECRYPT); -} + struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; -static struct skcipher_alg xts_paes_alg = { - .base.cra_name = "xts(paes)", - .base.cra_driver_name = "xts-paes-s390", - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list), - .init = xts_paes_init, - .exit = xts_paes_exit, - .min_keysize = 2 * PAES_MIN_KEYSIZE, - .max_keysize = 2 * PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = xts_paes_set_key, - .encrypt = xts_paes_encrypt, - .decrypt = xts_paes_decrypt, -}; + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ -static int ctr_paes_init(struct crypto_skcipher *tfm) -{ - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + rc = skcipher_walk_virt(walk, req, false); + if (rc) + goto out; - ctx->kb.key = NULL; - spin_lock_init(&ctx->pk_lock); + req_ctx->modifier = modifier; + req_ctx->param_init_done = false; - return 0; -} + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = xts_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } -static void ctr_paes_exit(struct crypto_skcipher *tfm) -{ - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); - _free_kb_keybuf(&ctx->kb); +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); + return rc; } -static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx) +static int xts_paes_encrypt(struct skcipher_request *req) { - unsigned long fc; - int rc; - - rc = __paes_convert_key(ctx); - if (rc) - return rc; - - /* Pick the correct function code based on the protected key type */ - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 : - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 : - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? - CPACF_KMCTR_PAES_256 : 0; - - /* Check if the function code is available */ - ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; + return xts_paes_crypt(req, 0); +} - return ctx->fc ? 0 : -EINVAL; +static int xts_paes_decrypt(struct skcipher_request *req) +{ + return xts_paes_crypt(req, CPACF_DECRYPT); } -static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int xts_paes_init(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - int rc; + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); - _free_kb_keybuf(&ctx->kb); - rc = _key_to_kb(&ctx->kb, in_key, key_len); - if (rc) - return rc; + memset(ctx, 0, sizeof(*ctx)); + spin_lock_init(&ctx->pk_lock); - return __ctr_paes_set_key(ctx); + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pxts_req_ctx)); + + return 0; } -static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) +static void xts_paes_exit(struct crypto_skcipher *tfm) { - unsigned int i, n; + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); - /* only use complete blocks, max. PAGE_SIZE */ - memcpy(ctrptr, iv, AES_BLOCK_SIZE); - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { - memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); - crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); - ctrptr += AES_BLOCK_SIZE; - } - return n; + memzero_explicit(ctx, sizeof(*ctx)); } -static int ctr_paes_crypt(struct skcipher_request *req) +static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq) { + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - u8 buf[AES_BLOCK_SIZE], *ctrptr; - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - } param; - struct skcipher_walk walk; - unsigned int nbytes, n, k; - int rc, locked; - - rc = skcipher_walk_virt(&walk, req, false); - if (rc) - return rc; - - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - - locked = mutex_trylock(&ctrblk_lock); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { - n = AES_BLOCK_SIZE; - if (nbytes >= 2*AES_BLOCK_SIZE && locked) - n = __ctrblk_init(ctrblk, walk.iv, nbytes); - ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; - k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr, - walk.src.virt.addr, n, ctrptr); - if (k) { - if (ctrptr == ctrblk) - memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(walk.iv, AES_BLOCK_SIZE); - rc = skcipher_walk_done(&walk, nbytes - k); - } - if (k < n) { - if (__paes_convert_key(ctx)) { - if (locked) - mutex_unlock(&ctrblk_lock); - return skcipher_walk_done(&walk, -EIO); - } - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - } - } - if (locked) - mutex_unlock(&ctrblk_lock); - /* - * final block may be < AES_BLOCK_SIZE, copy only nbytes - */ - if (nbytes) { - memset(buf, 0, AES_BLOCK_SIZE); - memcpy(buf, walk.src.virt.addr, nbytes); - while (1) { - if (cpacf_kmctr(ctx->fc, ¶m, buf, - buf, AES_BLOCK_SIZE, - walk.iv) == AES_BLOCK_SIZE) - break; - if (__paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - } - memcpy(walk.dst.virt.addr, buf, nbytes); - crypto_inc(walk.iv, AES_BLOCK_SIZE); - rc = skcipher_walk_done(&walk, nbytes); + /* walk has already been prepared */ + + rc = xts_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); } + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); return rc; } -static struct skcipher_alg ctr_paes_alg = { - .base.cra_name = "ctr(paes)", - .base.cra_driver_name = "ctr-paes-s390", - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list), - .init = ctr_paes_init, - .exit = ctr_paes_exit, - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ctr_paes_set_key, - .encrypt = ctr_paes_crypt, - .decrypt = ctr_paes_crypt, - .chunksize = AES_BLOCK_SIZE, +static struct skcipher_engine_alg xts_paes_alg = { + .base = { + .base.cra_name = "xts(paes)", + .base.cra_driver_name = "xts-paes-s390", + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.base.cra_list), + .init = xts_paes_init, + .exit = xts_paes_exit, + .min_keysize = 2 * PAES_MIN_KEYSIZE, + .max_keysize = 2 * PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = xts_paes_setkey, + .encrypt = xts_paes_encrypt, + .decrypt = xts_paes_decrypt, + }, + .op = { + .do_one_request = xts_paes_do_one_request, + }, }; -static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg) +/* + * alg register, unregister, module init, exit + */ + +static struct miscdevice paes_dev = { + .name = "paes", + .minor = MISC_DYNAMIC_MINOR, +}; + +static inline void __crypto_unregister_skcipher(struct skcipher_engine_alg *alg) { - if (!list_empty(&alg->base.cra_list)) - crypto_unregister_skcipher(alg); + if (!list_empty(&alg->base.base.cra_list)) + crypto_engine_unregister_skcipher(alg); } static void paes_s390_fini(void) { + if (paes_crypto_engine) { + crypto_engine_stop(paes_crypto_engine); + crypto_engine_exit(paes_crypto_engine); + } __crypto_unregister_skcipher(&ctr_paes_alg); __crypto_unregister_skcipher(&xts_paes_alg); __crypto_unregister_skcipher(&cbc_paes_alg); __crypto_unregister_skcipher(&ecb_paes_alg); if (ctrblk) - free_page((unsigned long) ctrblk); + free_page((unsigned long)ctrblk); + misc_deregister(&paes_dev); } static int __init paes_s390_init(void) { int rc; + /* register a simple paes pseudo misc device */ + rc = misc_register(&paes_dev); + if (rc) + return rc; + + /* with this pseudo devie alloc and start a crypto engine */ + paes_crypto_engine = + crypto_engine_alloc_init_and_set(paes_dev.this_device, + true, NULL, false, MAX_QLEN); + if (!paes_crypto_engine) { + rc = -ENOMEM; + goto out_err; + } + rc = crypto_engine_start(paes_crypto_engine); + if (rc) { + crypto_engine_exit(paes_crypto_engine); + paes_crypto_engine = NULL; + goto out_err; + } + /* Query available functions for KM, KMC and KMCTR */ cpacf_query(CPACF_KM, &km_functions); cpacf_query(CPACF_KMC, &kmc_functions); @@ -927,40 +1653,45 @@ static int __init paes_s390_init(void) if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { - rc = crypto_register_skcipher(&ecb_paes_alg); + rc = crypto_engine_register_skcipher(&ecb_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", ecb_paes_alg.base.base.cra_driver_name); } if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { - rc = crypto_register_skcipher(&cbc_paes_alg); + rc = crypto_engine_register_skcipher(&cbc_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", cbc_paes_alg.base.base.cra_driver_name); } if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { - rc = crypto_register_skcipher(&xts_paes_alg); + rc = crypto_engine_register_skcipher(&xts_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", xts_paes_alg.base.base.cra_driver_name); } if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { - ctrblk = (u8 *) __get_free_page(GFP_KERNEL); + ctrblk = (u8 *)__get_free_page(GFP_KERNEL); if (!ctrblk) { rc = -ENOMEM; goto out_err; } - rc = crypto_register_skcipher(&ctr_paes_alg); + rc = crypto_engine_register_skcipher(&ctr_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", ctr_paes_alg.base.base.cra_driver_name); } return 0; + out_err: paes_s390_fini(); return rc; diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h index 2bb22db54c31..d757ccbce2b4 100644 --- a/arch/s390/crypto/sha.h +++ b/arch/s390/crypto/sha.h @@ -10,27 +10,33 @@ #ifndef _CRYPTO_ARCH_S390_SHA_H #define _CRYPTO_ARCH_S390_SHA_H -#include <linux/crypto.h> -#include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/sha3.h> +#include <linux/types.h> /* must be big enough for the largest SHA variant */ -#define SHA3_STATE_SIZE 200 #define CPACF_MAX_PARMBLOCK_SIZE SHA3_STATE_SIZE #define SHA_MAX_BLOCK_SIZE SHA3_224_BLOCK_SIZE +#define S390_SHA_CTX_SIZE sizeof(struct s390_sha_ctx) struct s390_sha_ctx { u64 count; /* message length in bytes */ - u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)]; - u8 buf[SHA_MAX_BLOCK_SIZE]; + union { + u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)]; + struct { + u64 state[SHA512_DIGEST_SIZE / sizeof(u64)]; + u64 count_hi; + } sha512; + }; int func; /* KIMD function to use */ - int first_message_part; + bool first_message_part; }; struct shash_desc; -int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len); -int s390_sha_final(struct shash_desc *desc, u8 *out); +int s390_sha_update_blocks(struct shash_desc *desc, const u8 *data, + unsigned int len); +int s390_sha_finup(struct shash_desc *desc, const u8 *src, unsigned int len, + u8 *out); #endif diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index bc3a22704e09..d229cbd2ba22 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -18,12 +18,12 @@ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> */ +#include <asm/cpacf.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/cpufeature.h> #include <crypto/sha1.h> -#include <asm/cpacf.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> #include "sha.h" @@ -49,7 +49,6 @@ static int s390_sha1_export(struct shash_desc *desc, void *out) octx->count = sctx->count; memcpy(octx->state, sctx->state, sizeof(octx->state)); - memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); return 0; } @@ -60,7 +59,6 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in) sctx->count = ictx->count; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); - memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); sctx->func = CPACF_KIMD_SHA_1; return 0; } @@ -68,16 +66,18 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in) static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = s390_sha1_init, - .update = s390_sha_update, - .final = s390_sha_final, + .update = s390_sha_update_blocks, + .finup = s390_sha_finup, .export = s390_sha1_export, .import = s390_sha1_import, - .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha1_state), + .descsize = S390_SHA_CTX_SIZE, + .statesize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-s390", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c deleted file mode 100644 index 6f1ccdf93d3e..000000000000 --- a/arch/s390/crypto/sha256_s390.c +++ /dev/null @@ -1,143 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Cryptographic API. - * - * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm. - * - * s390 Version: - * Copyright IBM Corp. 2005, 2011 - * Author(s): Jan Glauber (jang@de.ibm.com) - */ -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/cpufeature.h> -#include <crypto/sha2.h> -#include <asm/cpacf.h> - -#include "sha.h" - -static int s390_sha256_init(struct shash_desc *desc) -{ - struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - - sctx->state[0] = SHA256_H0; - sctx->state[1] = SHA256_H1; - sctx->state[2] = SHA256_H2; - sctx->state[3] = SHA256_H3; - sctx->state[4] = SHA256_H4; - sctx->state[5] = SHA256_H5; - sctx->state[6] = SHA256_H6; - sctx->state[7] = SHA256_H7; - sctx->count = 0; - sctx->func = CPACF_KIMD_SHA_256; - - return 0; -} - -static int sha256_export(struct shash_desc *desc, void *out) -{ - struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - struct sha256_state *octx = out; - - octx->count = sctx->count; - memcpy(octx->state, sctx->state, sizeof(octx->state)); - memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); - return 0; -} - -static int sha256_import(struct shash_desc *desc, const void *in) -{ - struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - const struct sha256_state *ictx = in; - - sctx->count = ictx->count; - memcpy(sctx->state, ictx->state, sizeof(ictx->state)); - memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); - sctx->func = CPACF_KIMD_SHA_256; - return 0; -} - -static struct shash_alg sha256_alg = { - .digestsize = SHA256_DIGEST_SIZE, - .init = s390_sha256_init, - .update = s390_sha_update, - .final = s390_sha_final, - .export = sha256_export, - .import = sha256_import, - .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-s390", - .cra_priority = 300, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int s390_sha224_init(struct shash_desc *desc) -{ - struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - - sctx->state[0] = SHA224_H0; - sctx->state[1] = SHA224_H1; - sctx->state[2] = SHA224_H2; - sctx->state[3] = SHA224_H3; - sctx->state[4] = SHA224_H4; - sctx->state[5] = SHA224_H5; - sctx->state[6] = SHA224_H6; - sctx->state[7] = SHA224_H7; - sctx->count = 0; - sctx->func = CPACF_KIMD_SHA_256; - - return 0; -} - -static struct shash_alg sha224_alg = { - .digestsize = SHA224_DIGEST_SIZE, - .init = s390_sha224_init, - .update = s390_sha_update, - .final = s390_sha_final, - .export = sha256_export, - .import = sha256_import, - .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-s390", - .cra_priority = 300, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init sha256_s390_init(void) -{ - int ret; - - if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256)) - return -ENODEV; - ret = crypto_register_shash(&sha256_alg); - if (ret < 0) - goto out; - ret = crypto_register_shash(&sha224_alg); - if (ret < 0) - crypto_unregister_shash(&sha256_alg); -out: - return ret; -} - -static void __exit sha256_s390_fini(void) -{ - crypto_unregister_shash(&sha224_alg); - crypto_unregister_shash(&sha256_alg); -} - -module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha256_s390_init); -module_exit(sha256_s390_fini); - -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm"); diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c index a84ef692f572..4a7731ac6bcd 100644 --- a/arch/s390/crypto/sha3_256_s390.c +++ b/arch/s390/crypto/sha3_256_s390.c @@ -8,12 +8,14 @@ * Copyright IBM Corp. 2019 * Author(s): Joerg Schmidbauer (jschmidb@de.ibm.com) */ +#include <asm/cpacf.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/cpufeature.h> #include <crypto/sha3.h> -#include <asm/cpacf.h> +#include <linux/cpufeature.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> #include "sha.h" @@ -21,11 +23,11 @@ static int sha3_256_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - if (!test_facility(86)) /* msa 12 */ + sctx->first_message_part = test_facility(86); + if (!sctx->first_message_part) memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_256; - sctx->first_message_part = 1; return 0; } @@ -35,11 +37,11 @@ static int sha3_256_export(struct shash_desc *desc, void *out) struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha3_state *octx = out; - octx->rsiz = sctx->count; + if (sctx->first_message_part) { + memset(sctx->state, 0, sizeof(sctx->state)); + sctx->first_message_part = 0; + } memcpy(octx->st, sctx->state, sizeof(octx->st)); - memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); - octx->partial = sctx->first_message_part; - return 0; } @@ -48,10 +50,9 @@ static int sha3_256_import(struct shash_desc *desc, const void *in) struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha3_state *ictx = in; - sctx->count = ictx->rsiz; + sctx->count = 0; memcpy(sctx->state, ictx->st, sizeof(ictx->st)); - memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); - sctx->first_message_part = ictx->partial; + sctx->first_message_part = 0; sctx->func = CPACF_KIMD_SHA3_256; return 0; @@ -60,30 +61,26 @@ static int sha3_256_import(struct shash_desc *desc, const void *in) static int sha3_224_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - const struct sha3_state *ictx = in; - sctx->count = ictx->rsiz; - memcpy(sctx->state, ictx->st, sizeof(ictx->st)); - memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); - sctx->first_message_part = ictx->partial; + sha3_256_import(desc, in); sctx->func = CPACF_KIMD_SHA3_224; - return 0; } static struct shash_alg sha3_256_alg = { .digestsize = SHA3_256_DIGEST_SIZE, /* = 32 */ .init = sha3_256_init, - .update = s390_sha_update, - .final = s390_sha_final, + .update = s390_sha_update_blocks, + .finup = s390_sha_finup, .export = sha3_256_export, .import = sha3_256_import, - .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha3_state), + .descsize = S390_SHA_CTX_SIZE, + .statesize = SHA3_STATE_SIZE, .base = { .cra_name = "sha3-256", .cra_driver_name = "sha3-256-s390", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA3_256_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -93,28 +90,25 @@ static int sha3_224_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - if (!test_facility(86)) /* msa 12 */ - memset(sctx->state, 0, sizeof(sctx->state)); - sctx->count = 0; + sha3_256_init(desc); sctx->func = CPACF_KIMD_SHA3_224; - sctx->first_message_part = 1; - return 0; } static struct shash_alg sha3_224_alg = { .digestsize = SHA3_224_DIGEST_SIZE, .init = sha3_224_init, - .update = s390_sha_update, - .final = s390_sha_final, + .update = s390_sha_update_blocks, + .finup = s390_sha_finup, .export = sha3_256_export, /* same as for 256 */ .import = sha3_224_import, /* function code different! */ - .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha3_state), + .descsize = S390_SHA_CTX_SIZE, + .statesize = SHA3_STATE_SIZE, .base = { .cra_name = "sha3-224", .cra_driver_name = "sha3-224-s390", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA3_224_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c index 07528fc98ff7..018f02fff444 100644 --- a/arch/s390/crypto/sha3_512_s390.c +++ b/arch/s390/crypto/sha3_512_s390.c @@ -7,12 +7,14 @@ * Copyright IBM Corp. 2019 * Author(s): Joerg Schmidbauer (jschmidb@de.ibm.com) */ +#include <asm/cpacf.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/cpufeature.h> #include <crypto/sha3.h> -#include <asm/cpacf.h> +#include <linux/cpufeature.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> #include "sha.h" @@ -20,11 +22,11 @@ static int sha3_512_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - if (!test_facility(86)) /* msa 12 */ + sctx->first_message_part = test_facility(86); + if (!sctx->first_message_part) memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_512; - sctx->first_message_part = 1; return 0; } @@ -34,13 +36,12 @@ static int sha3_512_export(struct shash_desc *desc, void *out) struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha3_state *octx = out; - octx->rsiz = sctx->count; - octx->rsizw = sctx->count >> 32; + if (sctx->first_message_part) { + memset(sctx->state, 0, sizeof(sctx->state)); + sctx->first_message_part = 0; + } memcpy(octx->st, sctx->state, sizeof(octx->st)); - memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); - octx->partial = sctx->first_message_part; - return 0; } @@ -49,13 +50,9 @@ static int sha3_512_import(struct shash_desc *desc, const void *in) struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha3_state *ictx = in; - if (unlikely(ictx->rsizw)) - return -ERANGE; - sctx->count = ictx->rsiz; - + sctx->count = 0; memcpy(sctx->state, ictx->st, sizeof(ictx->st)); - memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); - sctx->first_message_part = ictx->partial; + sctx->first_message_part = 0; sctx->func = CPACF_KIMD_SHA3_512; return 0; @@ -64,33 +61,26 @@ static int sha3_512_import(struct shash_desc *desc, const void *in) static int sha3_384_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - const struct sha3_state *ictx = in; - if (unlikely(ictx->rsizw)) - return -ERANGE; - sctx->count = ictx->rsiz; - - memcpy(sctx->state, ictx->st, sizeof(ictx->st)); - memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); - sctx->first_message_part = ictx->partial; + sha3_512_import(desc, in); sctx->func = CPACF_KIMD_SHA3_384; - return 0; } static struct shash_alg sha3_512_alg = { .digestsize = SHA3_512_DIGEST_SIZE, .init = sha3_512_init, - .update = s390_sha_update, - .final = s390_sha_final, + .update = s390_sha_update_blocks, + .finup = s390_sha_finup, .export = sha3_512_export, .import = sha3_512_import, - .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha3_state), + .descsize = S390_SHA_CTX_SIZE, + .statesize = SHA3_STATE_SIZE, .base = { .cra_name = "sha3-512", .cra_driver_name = "sha3-512-s390", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA3_512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -102,28 +92,25 @@ static int sha3_384_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - if (!test_facility(86)) /* msa 12 */ - memset(sctx->state, 0, sizeof(sctx->state)); - sctx->count = 0; + sha3_512_init(desc); sctx->func = CPACF_KIMD_SHA3_384; - sctx->first_message_part = 1; - return 0; } static struct shash_alg sha3_384_alg = { .digestsize = SHA3_384_DIGEST_SIZE, .init = sha3_384_init, - .update = s390_sha_update, - .final = s390_sha_final, + .update = s390_sha_update_blocks, + .finup = s390_sha_finup, .export = sha3_512_export, /* same as for 512 */ .import = sha3_384_import, /* function code different! */ - .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha3_state), + .descsize = S390_SHA_CTX_SIZE, + .statesize = SHA3_STATE_SIZE, .base = { .cra_name = "sha3-384", .cra_driver_name = "sha3-384-s390", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA3_384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_sha_ctx), .cra_module = THIS_MODULE, diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 04f11c407763..33711a29618c 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -7,14 +7,13 @@ * Copyright IBM Corp. 2007 * Author(s): Jan Glauber (jang@de.ibm.com) */ +#include <asm/cpacf.h> #include <crypto/internal/hash.h> #include <crypto/sha2.h> +#include <linux/cpufeature.h> #include <linux/errno.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/cpufeature.h> -#include <asm/cpacf.h> #include "sha.h" @@ -22,15 +21,16 @@ static int sha512_init(struct shash_desc *desc) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); - *(__u64 *)&ctx->state[0] = SHA512_H0; - *(__u64 *)&ctx->state[2] = SHA512_H1; - *(__u64 *)&ctx->state[4] = SHA512_H2; - *(__u64 *)&ctx->state[6] = SHA512_H3; - *(__u64 *)&ctx->state[8] = SHA512_H4; - *(__u64 *)&ctx->state[10] = SHA512_H5; - *(__u64 *)&ctx->state[12] = SHA512_H6; - *(__u64 *)&ctx->state[14] = SHA512_H7; + ctx->sha512.state[0] = SHA512_H0; + ctx->sha512.state[1] = SHA512_H1; + ctx->sha512.state[2] = SHA512_H2; + ctx->sha512.state[3] = SHA512_H3; + ctx->sha512.state[4] = SHA512_H4; + ctx->sha512.state[5] = SHA512_H5; + ctx->sha512.state[6] = SHA512_H6; + ctx->sha512.state[7] = SHA512_H7; ctx->count = 0; + ctx->sha512.count_hi = 0; ctx->func = CPACF_KIMD_SHA_512; return 0; @@ -42,9 +42,8 @@ static int sha512_export(struct shash_desc *desc, void *out) struct sha512_state *octx = out; octx->count[0] = sctx->count; - octx->count[1] = 0; + octx->count[1] = sctx->sha512.count_hi; memcpy(octx->state, sctx->state, sizeof(octx->state)); - memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); return 0; } @@ -53,12 +52,10 @@ static int sha512_import(struct shash_desc *desc, const void *in) struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha512_state *ictx = in; - if (unlikely(ictx->count[1])) - return -ERANGE; sctx->count = ictx->count[0]; + sctx->sha512.count_hi = ictx->count[1]; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); - memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); sctx->func = CPACF_KIMD_SHA_512; return 0; } @@ -66,16 +63,18 @@ static int sha512_import(struct shash_desc *desc, const void *in) static struct shash_alg sha512_alg = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_init, - .update = s390_sha_update, - .final = s390_sha_final, + .update = s390_sha_update_blocks, + .finup = s390_sha_finup, .export = sha512_export, .import = sha512_import, .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha512_state), + .statesize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name= "sha512-s390", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -87,15 +86,16 @@ static int sha384_init(struct shash_desc *desc) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); - *(__u64 *)&ctx->state[0] = SHA384_H0; - *(__u64 *)&ctx->state[2] = SHA384_H1; - *(__u64 *)&ctx->state[4] = SHA384_H2; - *(__u64 *)&ctx->state[6] = SHA384_H3; - *(__u64 *)&ctx->state[8] = SHA384_H4; - *(__u64 *)&ctx->state[10] = SHA384_H5; - *(__u64 *)&ctx->state[12] = SHA384_H6; - *(__u64 *)&ctx->state[14] = SHA384_H7; + ctx->sha512.state[0] = SHA384_H0; + ctx->sha512.state[1] = SHA384_H1; + ctx->sha512.state[2] = SHA384_H2; + ctx->sha512.state[3] = SHA384_H3; + ctx->sha512.state[4] = SHA384_H4; + ctx->sha512.state[5] = SHA384_H5; + ctx->sha512.state[6] = SHA384_H6; + ctx->sha512.state[7] = SHA384_H7; ctx->count = 0; + ctx->sha512.count_hi = 0; ctx->func = CPACF_KIMD_SHA_512; return 0; @@ -104,17 +104,19 @@ static int sha384_init(struct shash_desc *desc) static struct shash_alg sha384_alg = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_init, - .update = s390_sha_update, - .final = s390_sha_final, + .update = s390_sha_update_blocks, + .finup = s390_sha_finup, .export = sha512_export, .import = sha512_import, .descsize = sizeof(struct s390_sha_ctx), - .statesize = sizeof(struct sha512_state), + .statesize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name= "sha384-s390", .cra_priority = 300, .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_ctxsize = sizeof(struct s390_sha_ctx), .cra_module = THIS_MODULE, } diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index 961d7d522af1..b5e2c365ea05 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -13,50 +13,33 @@ #include <asm/cpacf.h> #include "sha.h" -int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) +int s390_sha_update_blocks(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct s390_sha_ctx *ctx = shash_desc_ctx(desc); unsigned int bsize = crypto_shash_blocksize(desc->tfm); - unsigned int index, n; + struct s390_sha_ctx *ctx = shash_desc_ctx(desc); + unsigned int n; int fc; - /* how much is already in the buffer? */ - index = ctx->count % bsize; - ctx->count += len; - - if ((index + len) < bsize) - goto store; - fc = ctx->func; if (ctx->first_message_part) - fc |= test_facility(86) ? CPACF_KIMD_NIP : 0; - - /* process one stored block */ - if (index) { - memcpy(ctx->buf + index, data, bsize - index); - cpacf_kimd(fc, ctx->state, ctx->buf, bsize); - ctx->first_message_part = 0; - fc &= ~CPACF_KIMD_NIP; - data += bsize - index; - len -= bsize - index; - index = 0; - } + fc |= CPACF_KIMD_NIP; /* process as many blocks as possible */ - if (len >= bsize) { - n = (len / bsize) * bsize; - cpacf_kimd(fc, ctx->state, data, n); - ctx->first_message_part = 0; - data += n; - len -= n; + n = (len / bsize) * bsize; + ctx->count += n; + switch (ctx->func) { + case CPACF_KLMD_SHA_512: + case CPACF_KLMD_SHA3_384: + if (ctx->count < n) + ctx->sha512.count_hi++; + break; } -store: - if (len) - memcpy(ctx->buf + index , data, len); - - return 0; + cpacf_kimd(fc, ctx->state, data, n); + ctx->first_message_part = 0; + return len - n; } -EXPORT_SYMBOL_GPL(s390_sha_update); +EXPORT_SYMBOL_GPL(s390_sha_update_blocks); static int s390_crypto_shash_parmsize(int func) { @@ -77,15 +60,15 @@ static int s390_crypto_shash_parmsize(int func) } } -int s390_sha_final(struct shash_desc *desc, u8 *out) +int s390_sha_finup(struct shash_desc *desc, const u8 *src, unsigned int len, + u8 *out) { struct s390_sha_ctx *ctx = shash_desc_ctx(desc); - unsigned int bsize = crypto_shash_blocksize(desc->tfm); - u64 bits; - unsigned int n; int mbl_offset, fc; + u64 bits; + + ctx->count += len; - n = ctx->count % bsize; bits = ctx->count * 8; mbl_offset = s390_crypto_shash_parmsize(ctx->func); if (mbl_offset < 0) @@ -95,17 +78,16 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) /* set total msg bit length (mbl) in CPACF parmblock */ switch (ctx->func) { - case CPACF_KLMD_SHA_1: - case CPACF_KLMD_SHA_256: - memcpy(ctx->state + mbl_offset, &bits, sizeof(bits)); - break; case CPACF_KLMD_SHA_512: - /* - * the SHA512 parmblock has a 128-bit mbl field, clear - * high-order u64 field, copy bits to low-order u64 field - */ - memset(ctx->state + mbl_offset, 0x00, sizeof(bits)); + /* The SHA512 parmblock has a 128-bit mbl field. */ + if (ctx->count < len) + ctx->sha512.count_hi++; + ctx->sha512.count_hi <<= 3; + ctx->sha512.count_hi |= ctx->count >> 61; mbl_offset += sizeof(u64) / sizeof(u32); + fallthrough; + case CPACF_KLMD_SHA_1: + case CPACF_KLMD_SHA_256: memcpy(ctx->state + mbl_offset, &bits, sizeof(bits)); break; case CPACF_KLMD_SHA3_224: @@ -121,16 +103,14 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0; if (ctx->first_message_part) fc |= CPACF_KLMD_NIP; - cpacf_klmd(fc, ctx->state, ctx->buf, n); + cpacf_klmd(fc, ctx->state, src, len); /* copy digest to out */ memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); - /* wipe context */ - memset(ctx, 0, sizeof *ctx); return 0; } -EXPORT_SYMBOL_GPL(s390_sha_final); +EXPORT_SYMBOL_GPL(s390_sha_finup); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("s390 SHA cipher common functions"); diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 04ea1c03a5ff..96409573c75d 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -342,7 +342,7 @@ static struct dentry *hypfs_create_file(struct dentry *parent, const char *name, struct inode *inode; inode_lock(d_inode(parent)); - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) { dentry = ERR_PTR(-ENOMEM); goto fail; diff --git a/arch/s390/include/asm/asce.h b/arch/s390/include/asm/asce.h new file mode 100644 index 000000000000..f6dfaaba735a --- /dev/null +++ b/arch/s390/include/asm/asce.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_ASCE_H +#define _ASM_S390_ASCE_H + +#include <linux/thread_info.h> +#include <linux/irqflags.h> +#include <asm/lowcore.h> +#include <asm/ctlreg.h> + +static inline bool enable_sacf_uaccess(void) +{ + unsigned long flags; + + if (test_thread_flag(TIF_ASCE_PRIMARY)) + return true; + local_irq_save(flags); + local_ctl_load(1, &get_lowcore()->kernel_asce); + set_thread_flag(TIF_ASCE_PRIMARY); + local_irq_restore(flags); + return false; +} + +static inline void disable_sacf_uaccess(bool previous) +{ + unsigned long flags; + + if (previous) + return; + local_irq_save(flags); + local_ctl_load(1, &get_lowcore()->user_asce); + clear_thread_flag(TIF_ASCE_PRIMARY); + local_irq_restore(flags); +} + +#endif /* _ASM_S390_ASCE_H */ diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index 59ab1192e2d5..54cb97603ec0 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -649,18 +649,30 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len, * instruction * @func: the function code passed to PCC; see CPACF_KM_xxx defines * @param: address of parameter block; see POP for details on each func + * + * Returns the condition code, this is + * 0 - cc code 0 (normal completion) + * 1 - cc code 1 (protected key wkvp mismatch or src operand out of range) + * 2 - cc code 2 (something invalid, scalar multiply infinity, ...) + * Condition code 3 (partial completion) is handled within the asm code + * and never returned. */ -static inline void cpacf_pcc(unsigned long func, void *param) +static inline int cpacf_pcc(unsigned long func, void *param) { + int cc; + asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */ " brc 1,0b\n" /* handle partial completion */ - : + CC_IPM(cc) + : CC_OUT(cc, cc) : [fc] "d" (func), [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_PCC) - : "cc", "memory", "0", "1"); + : CC_CLOBBER_LIST("memory", "0", "1")); + + return CC_TRANSFORM(cc); } /** diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h index e08169bd63a5..6c6a99660e78 100644 --- a/arch/s390/include/asm/cpufeature.h +++ b/arch/s390/include/asm/cpufeature.h @@ -15,6 +15,7 @@ enum { S390_CPU_FEATURE_MSA, S390_CPU_FEATURE_VXRS, S390_CPU_FEATURE_UV, + S390_CPU_FEATURE_D288, MAX_CPU_FEATURES }; diff --git a/arch/s390/include/asm/diag288.h b/arch/s390/include/asm/diag288.h new file mode 100644 index 000000000000..5e1b43cea9d6 --- /dev/null +++ b/arch/s390/include/asm/diag288.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_DIAG288_H +#define _ASM_S390_DIAG288_H + +#include <asm/asm-extable.h> +#include <asm/types.h> + +#define MIN_INTERVAL 15 /* Minimal time supported by diag288 */ +#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ + +#define WDT_DEFAULT_TIMEOUT 30 + +/* Function codes - init, change, cancel */ +#define WDT_FUNC_INIT 0 +#define WDT_FUNC_CHANGE 1 +#define WDT_FUNC_CANCEL 2 +#define WDT_FUNC_CONCEAL 0x80000000 + +/* Action codes for LPAR watchdog */ +#define LPARWDT_RESTART 0 + +static inline int __diag288(unsigned int func, unsigned int timeout, + unsigned long action, unsigned int len) +{ + union register_pair r1 = { .even = func, .odd = timeout, }; + union register_pair r3 = { .even = action, .odd = len, }; + int rc = -EINVAL; + + asm volatile( + " diag %[r1],%[r3],0x288\n" + "0: lhi %[rc],0\n" + "1:" + EX_TABLE(0b, 1b) + : [rc] "+d" (rc) + : [r1] "d" (r1.pair), [r3] "d" (r3.pair) + : "cc", "memory"); + return rc; +} + +#endif /* _ASM_S390_DIAG288_H */ diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index f5781794356b..942f21c39697 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -13,9 +13,11 @@ static uaccess_kmsan_or_inline int \ __futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ { \ + bool sacf_flag; \ int rc, new; \ \ instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \ + sacf_flag = enable_sacf_uaccess(); \ asm_inline volatile( \ " sacf 256\n" \ "0: l %[old],%[uaddr]\n" \ @@ -32,6 +34,7 @@ __futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \ : [oparg] "d" (oparg) \ : "cc"); \ + disable_sacf_uaccess(sacf_flag); \ if (!rc) \ instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \ return rc; \ @@ -75,9 +78,11 @@ int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) static uaccess_kmsan_or_inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + bool sacf_flag; int rc; instrument_copy_from_user_before(uval, uaddr, sizeof(*uval)); + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " sacf 256\n" "0: cs %[old],%[new],%[uaddr]\n" @@ -88,6 +93,7 @@ int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr) : [new] "d" (newval) : "cc", "memory"); + disable_sacf_uaccess(sacf_flag); *uval = oldval; instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0); return rc; diff --git a/arch/s390/include/asm/machine.h b/arch/s390/include/asm/machine.h index 54478caa5237..8abe5afdbfc4 100644 --- a/arch/s390/include/asm/machine.h +++ b/arch/s390/include/asm/machine.h @@ -18,6 +18,7 @@ #define MFEATURE_VM 7 #define MFEATURE_KVM 8 #define MFEATURE_LPAR 9 +#define MFEATURE_DIAG288 10 #ifndef __ASSEMBLY__ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 88f84beebb9e..d9b8501bc93d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -13,6 +13,7 @@ #include <linux/mm_types.h> #include <asm/tlbflush.h> #include <asm/ctlreg.h> +#include <asm/asce.h> #include <asm-generic/mm_hooks.h> #define init_new_context init_new_context @@ -77,7 +78,8 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct * else get_lowcore()->user_asce.val = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); - /* Clear previous user-ASCE from CR7 */ + /* Clear previous user-ASCE from CR1 and CR7 */ + local_ctl_load(1, &s390_invalid_asce); local_ctl_load(7, &s390_invalid_asce); if (prev != next) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); @@ -99,6 +101,7 @@ static inline void finish_arch_post_lock_switch(void) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; + unsigned long flags; if (mm) { preempt_disable(); @@ -108,15 +111,25 @@ static inline void finish_arch_post_lock_switch(void) __tlb_flush_mm_lazy(mm); preempt_enable(); } + local_irq_save(flags); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); local_ctl_load(7, &get_lowcore()->user_asce); + local_irq_restore(flags); } #define activate_mm activate_mm static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - switch_mm(prev, next, current); + switch_mm_irqs_off(prev, next, current); cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); local_ctl_load(7, &get_lowcore()->user_asce); } diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h index 5dca1a46a9f6..b7b59faf16f4 100644 --- a/arch/s390/include/asm/pkey.h +++ b/arch/s390/include/asm/pkey.h @@ -20,9 +20,22 @@ * @param key pointer to a buffer containing the key blob * @param keylen size of the key blob in bytes * @param protkey pointer to buffer receiving the protected key + * @param xflags additional execution flags (see PKEY_XFLAG_* definitions below) + * As of now the only supported flag is PKEY_XFLAG_NOMEMALLOC. * @return 0 on success, negative errno value on failure */ int pkey_key2protkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); + +/* + * If this flag is given in the xflags parameter, the pkey implementation + * is not allowed to allocate memory but instead should fall back to use + * preallocated memory or simple fail with -ENOMEM. + * This flag is for protected key derive within a cipher or similar + * which must not allocate memory which would cause io operations - see + * also the CRYPTO_ALG_ALLOCATES_MEMORY flag in crypto.h. + */ +#define PKEY_XFLAG_NOMEMALLOC 0x0001 #endif /* _KAPI_PKEY_H */ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index c66f3fc6daaf..62c0ab4a4b9d 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -9,6 +9,7 @@ #include <linux/bits.h> #include <uapi/asm/ptrace.h> +#include <asm/thread_info.h> #include <asm/tpi.h> #define PIF_SYSCALL 0 /* inside a system call */ @@ -126,7 +127,6 @@ struct pt_regs { struct tpi_info tpi_info; }; unsigned long flags; - unsigned long cr1; unsigned long last_break; }; @@ -229,8 +229,44 @@ static inline void instruction_pointer_set(struct pt_regs *regs, int regs_query_register_offset(const char *name); const char *regs_query_register_name(unsigned int offset); -unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); -unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); + +static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->gprs[15]; +} + +static __always_inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (offset >= NUM_GPRS) + return 0; + return regs->gprs[offset]; +} + +static __always_inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static __always_inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return READ_ONCE_NOCHECK(addr); +} /** * regs_get_kernel_argument() - get Nth function argument in kernel @@ -251,11 +287,6 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, return regs_get_kernel_stack_nth(regs, argoffset + n); } -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ - return regs->gprs[15]; -} - static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) { regs->gprs[2] = rc; diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index 2ab868cbae6c..f8f68f4ef255 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -26,11 +26,9 @@ void *memmove(void *dest, const void *src, size_t n); #define __HAVE_ARCH_MEMSCAN /* inline & arch function */ #define __HAVE_ARCH_STRCAT /* inline & arch function */ #define __HAVE_ARCH_STRCMP /* arch function */ -#define __HAVE_ARCH_STRCPY /* inline & arch function */ #define __HAVE_ARCH_STRLCAT /* arch function */ #define __HAVE_ARCH_STRLEN /* inline & arch function */ #define __HAVE_ARCH_STRNCAT /* arch function */ -#define __HAVE_ARCH_STRNCPY /* arch function */ #define __HAVE_ARCH_STRNLEN /* inline & arch function */ #define __HAVE_ARCH_STRSTR /* arch function */ #define __HAVE_ARCH_MEMSET16 /* arch function */ @@ -42,7 +40,6 @@ int memcmp(const void *s1, const void *s2, size_t n); int strcmp(const char *s1, const char *s2); size_t strlcat(char *dest, const char *src, size_t n); char *strncat(char *dest, const char *src, size_t n); -char *strncpy(char *dest, const char *src, size_t n); char *strstr(const char *s1, const char *s2); #endif /* !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN) */ @@ -155,22 +152,6 @@ static inline char *strcat(char *dst, const char *src) } #endif -#ifdef __HAVE_ARCH_STRCPY -static inline char *strcpy(char *dst, const char *src) -{ - char *ret = dst; - - asm volatile( - " lghi 0,0\n" - "0: mvst %[dst],%[src]\n" - " jo 0b" - : [dst] "+&a" (dst), [src] "+&a" (src) - : - : "cc", "memory", "0"); - return ret; -} -#endif - #if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s) { @@ -208,7 +189,6 @@ static inline size_t strnlen(const char * s, size_t n) void *memchr(const void * s, int c, size_t n); void *memscan(void *s, int c, size_t n); char *strcat(char *dst, const char *src); -char *strcpy(char *dst, const char *src); size_t strlen(const char *s); size_t strnlen(const char * s, size_t n); #endif /* !IN_ARCH_STRING_C */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 91f569cae1ce..391eb04d26d8 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -9,6 +9,7 @@ #define _ASM_THREAD_INFO_H #include <linux/bits.h> +#include <vdso/page.h> /* * General size of kernel stacks @@ -24,8 +25,6 @@ #define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) #ifndef __ASSEMBLY__ -#include <asm/lowcore.h> -#include <asm/page.h> /* * low level task data that entry.S needs immediate access to @@ -64,6 +63,7 @@ void arch_setup_new_exec(void); #define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */ #define TIF_UPROBE 4 /* breakpointed or single-stepping */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ +#define TIF_ASCE_PRIMARY 6 /* primary asce is kernel asce */ #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */ #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ @@ -85,6 +85,7 @@ void arch_setup_new_exec(void); #define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY) #define _TIF_UPROBE BIT(TIF_UPROBE) #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) +#define _TIF_ASCE_PRIMARY BIT(TIF_ASCE_PRIMARY) #define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 8629d70ec38b..a43fc88c0050 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -19,6 +19,7 @@ #include <asm/extable.h> #include <asm/facility.h> #include <asm-generic/access_ok.h> +#include <asm/asce.h> #include <linux/instrumented.h> void debug_user_asce(int exit); @@ -478,6 +479,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, __uint128_t old, __uint128_t new, unsigned long key, int size) { + bool sacf_flag; int rc = 0; switch (size) { @@ -490,6 +492,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, _old = ((unsigned int)old & 0xff) << shift; _new = ((unsigned int)new & 0xff) << shift; mask = ~(0xff << shift); + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -524,6 +527,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [default_key] "J" (PAGE_DEFAULT_KEY), [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned char *)uval = prev >> shift; if (!count) rc = -EAGAIN; @@ -538,6 +542,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, _old = ((unsigned int)old & 0xffff) << shift; _new = ((unsigned int)new & 0xffff) << shift; mask = ~(0xffff << shift); + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -572,6 +577,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [default_key] "J" (PAGE_DEFAULT_KEY), [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned short *)uval = prev >> shift; if (!count) rc = -EAGAIN; @@ -580,6 +586,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, case 4: { unsigned int prev = old; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -595,12 +602,14 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned int *)uval = prev; return rc; } case 8: { unsigned long prev = old; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -616,12 +625,14 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned long *)uval = prev; return rc; } case 16: { __uint128_t prev = old; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -637,6 +648,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(__uint128_t *)uval = prev; return rc; } diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 46fb0ef6f984..b008402ec9aa 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -616,8 +616,9 @@ static inline int uv_remove_shared(unsigned long addr) return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS); } -int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], - struct uv_secret_list_item_hdr *secret); +int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list *list, + struct uv_secret_list_item_hdr *secret); int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size); extern int prot_virt_host; diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 841e05f7fa7e..95ecad9c7d7d 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -50,7 +50,6 @@ int main(void) OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); OFFSET(__PT_INT_CODE, pt_regs, int_code); OFFSET(__PT_FLAGS, pt_regs, flags); - OFFSET(__PT_CR1, pt_regs, cr1); OFFSET(__PT_LAST_BREAK, pt_regs, last_break); DEFINE(__PT_SIZE, sizeof(struct pt_regs)); BLANK(); diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c index 03f3a1e52430..c217a5e64094 100644 --- a/arch/s390/kernel/cert_store.c +++ b/arch/s390/kernel/cert_store.c @@ -138,7 +138,7 @@ static void cert_store_key_describe(const struct key *key, struct seq_file *m) * First 64 bytes of the key description is key name in EBCDIC CP 500. * Convert it to ASCII for displaying in /proc/keys. */ - strscpy(ascii, key->description, sizeof(ascii)); + strscpy(ascii, key->description); EBCASC_500(ascii, VC_NAME_LEN_BYTES); seq_puts(m, ascii); diff --git a/arch/s390/kernel/cpufeature.c b/arch/s390/kernel/cpufeature.c index 1b2ae42a0c15..76210f001028 100644 --- a/arch/s390/kernel/cpufeature.c +++ b/arch/s390/kernel/cpufeature.c @@ -5,11 +5,13 @@ #include <linux/cpufeature.h> #include <linux/bug.h> +#include <asm/machine.h> #include <asm/elf.h> enum { TYPE_HWCAP, TYPE_FACILITY, + TYPE_MACHINE, }; struct s390_cpu_feature { @@ -21,6 +23,7 @@ static struct s390_cpu_feature s390_cpu_features[MAX_CPU_FEATURES] = { [S390_CPU_FEATURE_MSA] = {.type = TYPE_HWCAP, .num = HWCAP_NR_MSA}, [S390_CPU_FEATURE_VXRS] = {.type = TYPE_HWCAP, .num = HWCAP_NR_VXRS}, [S390_CPU_FEATURE_UV] = {.type = TYPE_FACILITY, .num = 158}, + [S390_CPU_FEATURE_D288] = {.type = TYPE_MACHINE, .num = MFEATURE_DIAG288}, }; /* @@ -38,6 +41,8 @@ int cpu_have_feature(unsigned int num) return !!(elf_hwcap & BIT(feature->num)); case TYPE_FACILITY: return test_facility(feature->num); + case TYPE_MACHINE: + return test_machine_feature(feature->num); default: WARN_ON_ONCE(1); return 0; diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 4a981266b483..adb164223f8c 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -354,7 +354,7 @@ static void *nt_prpsinfo(void *ptr) memset(&prpsinfo, 0, sizeof(prpsinfo)); prpsinfo.pr_sname = 'R'; - strcpy(prpsinfo.pr_fname, "vmlinux"); + strscpy(prpsinfo.pr_fname, "vmlinux"); return nt_init(ptr, PRPSINFO, prpsinfo); } diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ce038e9205f7..2a41be2f7925 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -251,7 +251,7 @@ static debug_info_t *debug_info_alloc(const char *name, int pages_per_area, rc->level = level; rc->buf_size = buf_size; rc->entry_size = sizeof(debug_entry_t) + buf_size; - strscpy(rc->name, name, sizeof(rc->name)); + strscpy(rc->name, name); memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); memset(rc->debugfs_entries, 0, DEBUG_MAX_VIEWS * sizeof(struct dentry *)); refcount_set(&(rc->ref_count), 0); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 9980c17ba22d..0f00f4b06d51 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -116,7 +116,7 @@ _LPP_OFFSET = __LC_LPP .macro SIEEXIT sie_control,lowcore lg %r9,\sie_control # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_KERNEL_ASCE(\lowcore) # load primary asce + lctlg %c1,%c1,__LC_USER_ASCE(\lowcore) # load primary asce lg %r9,__LC_CURRENT(\lowcore) mvi __TI_sie(%r9),0 larl %r9,sie_exit # skip forward to sie_exit @@ -208,7 +208,7 @@ SYM_FUNC_START(__sie64a) lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE GET_LC %r14 - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r14) # load primary asce + lctlg %c1,%c1,__LC_USER_ASCE(%r14) # load primary asce lg %r14,__LC_CURRENT(%r14) mvi __TI_sie(%r14),0 SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) @@ -240,7 +240,6 @@ SYM_CODE_START(system_call) lghi %r14,0 .Lsysc_per: STBEAR __LC_LAST_BREAK(%r13) - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) lg %r15,__LC_KERNEL_STACK(%r13) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) @@ -261,7 +260,6 @@ SYM_CODE_START(system_call) lgr %r3,%r14 brasl %r14,__do_syscall STACKLEAK_ERASE - lctlg %c1,%c1,__LC_USER_ASCE(%r13) mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) BPON LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) @@ -278,7 +276,6 @@ SYM_CODE_START(ret_from_fork) brasl %r14,__ret_from_fork STACKLEAK_ERASE GET_LC %r13 - lctlg %c1,%c1,__LC_USER_ASCE(%r13) mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) BPON LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) @@ -299,10 +296,7 @@ SYM_CODE_START(pgm_check_handler) lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) xgr %r10,%r10 tmhh %r8,0x0001 # coming from user space? - jno .Lpgm_skip_asce - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) - j 3f # -> fault in user space -.Lpgm_skip_asce: + jo 3f # -> fault in user space #if IS_ENABLED(CONFIG_KVM) lg %r11,__LC_CURRENT(%r13) tm __TI_sie(%r11),0xff @@ -340,7 +334,6 @@ SYM_CODE_START(pgm_check_handler) tmhh %r8,0x0001 # returning to user space? jno .Lpgm_exit_kernel STACKLEAK_ERASE - lctlg %c1,%c1,__LC_USER_ASCE(%r13) BPON stpt __LC_EXIT_TIMER(%r13) .Lpgm_exit_kernel: @@ -384,8 +377,7 @@ SYM_CODE_START(\name) #endif 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f -1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) - lg %r15,__LC_KERNEL_STACK(%r13) +1: lg %r15,__LC_KERNEL_STACK(%r13) 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) @@ -408,7 +400,6 @@ SYM_CODE_START(\name) tmhh %r8,0x0001 # returning to user ? jno 2f STACKLEAK_ERASE - lctlg %c1,%c1,__LC_USER_ASCE(%r13) BPON stpt __LC_EXIT_TIMER(%r13) 2: LBEAR __PT_LAST_BREAK(%r11) @@ -476,8 +467,6 @@ SYM_CODE_START(mcck_int_handler) .Lmcck_user: lg %r15,__LC_MCCK_STACK(%r13) la %r11,STACK_FRAME_OVERHEAD(%r15) - stctg %c1,%c1,__PT_CR1(%r11) - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lay %r14,__LC_GPREGS_SAVE_AREA(%r13) mvc __PT_R0(128,%r11),0(%r14) @@ -495,7 +484,6 @@ SYM_CODE_START(mcck_int_handler) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check - lctlg %c1,%c1,__PT_CR1(%r11) lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ? diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 3b9d9ccfad63..ff15f91affde 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -270,7 +270,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ { \ if (len >= sizeof(_value)) \ return -E2BIG; \ - len = strscpy(_value, buf, sizeof(_value)); \ + len = strscpy(_value, buf); \ if ((ssize_t)len < 0) \ return len; \ strim(_value); \ @@ -2249,26 +2249,28 @@ static int __init s390_ipl_init(void) __initcall(s390_ipl_init); -static void __init strncpy_skip_quote(char *dst, char *src, int n) +static void __init strscpy_skip_quote(char *dst, char *src, int n) { int sx, dx; - dx = 0; - for (sx = 0; src[sx] != 0; sx++) { + if (!n) + return; + for (sx = 0, dx = 0; src[sx]; sx++) { if (src[sx] == '"') continue; - dst[dx++] = src[sx]; - if (dx >= n) + dst[dx] = src[sx]; + if (dx + 1 == n) break; + dx++; } + dst[dx] = '\0'; } static int __init vmcmd_on_reboot_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE); - vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_reboot, str, sizeof(vmcmd_on_reboot)); on_reboot_trigger.action = &vmcmd_action; return 1; } @@ -2278,8 +2280,7 @@ static int __init vmcmd_on_panic_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE); - vmcmd_on_panic[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_panic, str, sizeof(vmcmd_on_panic)); on_panic_trigger.action = &vmcmd_action; return 1; } @@ -2289,8 +2290,7 @@ static int __init vmcmd_on_halt_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE); - vmcmd_on_halt[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_halt, str, sizeof(vmcmd_on_halt)); on_halt_trigger.action = &vmcmd_action; return 1; } @@ -2300,8 +2300,7 @@ static int __init vmcmd_on_poff_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE); - vmcmd_on_poff[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_poff, str, sizeof(vmcmd_on_poff)); on_poff_trigger.action = &vmcmd_action; return 1; } diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index e657fad7e376..6a262e198e35 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -980,8 +980,6 @@ static int cfdiag_push_sample(struct perf_event *event, } overflow = perf_event_overflow(event, &data, ®s); - if (overflow) - event->pmu->stop(event, 0); perf_event_update_userpage(event); return overflow; diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 690a293eb10d..7ace1f9e4ccf 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -290,8 +290,8 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_NO_SPECIAL, 0x00f4); CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5); CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7); CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc); -CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); -CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); +CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x0108); +CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x0109); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index ad22799d8a7d..91469401f2c9 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1072,10 +1072,7 @@ static int perf_push_sample(struct perf_event *event, overflow = 0; if (perf_event_exclude(event, ®s, sde_regs)) goto out; - if (perf_event_overflow(event, &data, ®s)) { - overflow = 1; - event->pmu->stop(event, 0); - } + overflow = perf_event_overflow(event, &data, ®s); perf_event_update_userpage(event); out: return overflow; diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 80b1f7a29f11..11f70c1e2797 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -268,35 +268,35 @@ static int __init setup_elf_platform(void) add_device_randomness(&cpu_id, sizeof(cpu_id)); switch (cpu_id.machine) { default: /* Use "z10" as default. */ - strcpy(elf_platform, "z10"); + strscpy(elf_platform, "z10"); break; case 0x2817: case 0x2818: - strcpy(elf_platform, "z196"); + strscpy(elf_platform, "z196"); break; case 0x2827: case 0x2828: - strcpy(elf_platform, "zEC12"); + strscpy(elf_platform, "zEC12"); break; case 0x2964: case 0x2965: - strcpy(elf_platform, "z13"); + strscpy(elf_platform, "z13"); break; case 0x3906: case 0x3907: - strcpy(elf_platform, "z14"); + strscpy(elf_platform, "z14"); break; case 0x8561: case 0x8562: - strcpy(elf_platform, "z15"); + strscpy(elf_platform, "z15"); break; case 0x3931: case 0x3932: - strcpy(elf_platform, "z16"); + strscpy(elf_platform, "z16"); break; case 0x9175: case 0x9176: - strcpy(elf_platform, "z17"); + strscpy(elf_platform, "z17"); break; } return 0; diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 34b8d9e745df..e1240f6b29fa 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1524,13 +1524,6 @@ static const char *gpr_names[NUM_GPRS] = { "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", }; -unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) -{ - if (offset >= NUM_GPRS) - return 0; - return regs->gprs[offset]; -} - int regs_query_register_offset(const char *name) { unsigned long offset; @@ -1550,29 +1543,3 @@ const char *regs_query_register_name(unsigned int offset) return NULL; return gpr_names[offset]; } - -static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) -{ - unsigned long ksp = kernel_stack_pointer(regs); - - return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); -} - -/** - * regs_get_kernel_stack_nth() - get Nth entry of the stack - * @regs:pt_regs which contains kernel stack pointer. - * @n:stack entry number. - * - * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which - * is specifined by @regs. If the @n th entry is NOT in the kernel stack, - * this returns 0. - */ -unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) -{ - unsigned long addr; - - addr = kernel_stack_pointer(regs) + n * sizeof(long); - if (!regs_within_kernel_stack(regs, addr)) - return 0; - return READ_ONCE_NOCHECK(addr); -} diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 63f41dfaba85..81f12bb77f62 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -263,7 +263,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) abs_lc = get_abs_lowcore(); memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area)); put_abs_lowcore(abs_lc); - lc->cregs_save_area[1] = lc->kernel_asce; + lc->cregs_save_area[1] = lc->user_asce; lc->cregs_save_area[7] = lc->user_asce; save_access_regs((unsigned int *) lc->access_regs_save_area); arch_spin_lock_setup(cpu); diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 9a5d5be8acf4..4ab0b6b4866e 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -782,7 +782,12 @@ out_kobj: device_initcall(uv_sysfs_init); /* - * Find the secret with the secret_id in the provided list. + * Locate a secret in the list by its id. + * @secret_id: search pattern. + * @list: ephemeral buffer space + * @secret: output data, containing the secret's metadata. + * + * Search for a secret with the given secret_id in the Ultravisor secret store. * * Context: might sleep. */ @@ -803,12 +808,15 @@ static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN], /* * Do the actual search for `uv_get_secret_metadata`. + * @secret_id: search pattern. + * @list: ephemeral buffer space + * @secret: output data, containing the secret's metadata. * * Context: might sleep. */ -static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN], - struct uv_secret_list *list, - struct uv_secret_list_item_hdr *secret) +int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list *list, + struct uv_secret_list_item_hdr *secret) { u16 start_idx = 0; u16 list_rc; @@ -830,36 +838,7 @@ static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN], return -ENOENT; } - -/** - * uv_get_secret_metadata() - get secret metadata for a given secret id. - * @secret_id: search pattern. - * @secret: output data, containing the secret's metadata. - * - * Search for a secret with the given secret_id in the Ultravisor secret store. - * - * Context: might sleep. - * - * Return: - * * %0: - Found entry; secret->idx and secret->type are valid. - * * %ENOENT - No entry found. - * * %ENODEV: - Not supported: UV not available or command not available. - * * %EIO: - Other unexpected UV error. - */ -int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], - struct uv_secret_list_item_hdr *secret) -{ - struct uv_secret_list *buf; - int rc; - - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - return -ENOMEM; - rc = find_secret(secret_id, buf, secret); - kfree(buf); - return rc; -} -EXPORT_SYMBOL_GPL(uv_get_secret_metadata); +EXPORT_SYMBOL_GPL(uv_find_secret); /** * uv_retrieve_secret() - get the secret value for the secret index. diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 14bbfe50033c..cd35cdbfa871 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -3,6 +3,7 @@ # Makefile for s390-specific library files.. # +obj-y += crypto/ lib-y += delay.o string.o uaccess.o find.o spinlock.o tishift.o lib-y += csum-partial.o obj-y += mem.o xor.o @@ -26,4 +27,4 @@ lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o obj-$(CONFIG_CRC32_ARCH) += crc32-s390.o -crc32-s390-y := crc32-glue.o crc32le-vx.o crc32be-vx.o +crc32-s390-y := crc32.o crc32le-vx.o crc32be-vx.o diff --git a/arch/s390/lib/crc32-glue.c b/arch/s390/lib/crc32.c index 124214a27340..3c4b344417c1 100644 --- a/arch/s390/lib/crc32-glue.c +++ b/arch/s390/lib/crc32.c @@ -18,8 +18,6 @@ #define VX_ALIGNMENT 16L #define VX_ALIGN_MASK (VX_ALIGNMENT - 1) -static DEFINE_STATIC_KEY_FALSE(have_vxrs); - /* * DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension * @@ -34,8 +32,7 @@ static DEFINE_STATIC_KEY_FALSE(have_vxrs); unsigned long prealign, aligned, remaining; \ DECLARE_KERNEL_FPU_ONSTACK16(vxstate); \ \ - if (datalen < VX_MIN_LEN + VX_ALIGN_MASK || \ - !static_branch_likely(&have_vxrs)) \ + if (datalen < VX_MIN_LEN + VX_ALIGN_MASK || !cpu_has_vx()) \ return ___crc32_sw(crc, data, datalen); \ \ if ((unsigned long)data & VX_ALIGN_MASK) { \ @@ -64,25 +61,13 @@ DEFINE_CRC32_VX(crc32_le_arch, crc32_le_vgfm_16, crc32_le_base) DEFINE_CRC32_VX(crc32_be_arch, crc32_be_vgfm_16, crc32_be_base) DEFINE_CRC32_VX(crc32c_arch, crc32c_le_vgfm_16, crc32c_base) -static int __init crc32_s390_init(void) -{ - if (cpu_have_feature(S390_CPU_FEATURE_VXRS)) - static_branch_enable(&have_vxrs); - return 0; -} -arch_initcall(crc32_s390_init); - -static void __exit crc32_s390_exit(void) -{ -} -module_exit(crc32_s390_exit); - u32 crc32_optimizations(void) { - if (static_key_enabled(&have_vxrs)) + if (cpu_has_vx()) { return CRC32_LE_OPTIMIZATION | CRC32_BE_OPTIMIZATION | CRC32C_OPTIMIZATION; + } return 0; } EXPORT_SYMBOL(crc32_optimizations); diff --git a/arch/s390/lib/crypto/Kconfig b/arch/s390/lib/crypto/Kconfig new file mode 100644 index 000000000000..e3f855ef4393 --- /dev/null +++ b/arch/s390/lib/crypto/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_CHACHA_S390 + tristate + default CRYPTO_LIB_CHACHA + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_SHA256_S390 + tristate + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_LIB_SHA256_GENERIC diff --git a/arch/s390/lib/crypto/Makefile b/arch/s390/lib/crypto/Makefile new file mode 100644 index 000000000000..920197967f46 --- /dev/null +++ b/arch/s390/lib/crypto/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o +chacha_s390-y := chacha-glue.o chacha-s390.o + +obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256.o diff --git a/arch/s390/lib/crypto/chacha-glue.c b/arch/s390/lib/crypto/chacha-glue.c new file mode 100644 index 000000000000..f95ba3483bbc --- /dev/null +++ b/arch/s390/lib/crypto/chacha-glue.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ChaCha stream cipher (s390 optimized) + * + * Copyright IBM Corp. 2021 + */ + +#define KMSG_COMPONENT "chacha_s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <crypto/chacha.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sizes.h> +#include <asm/fpu.h> +#include "chacha-s390.h" + +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) +{ + /* TODO: implement hchacha_block_arch() in assembly */ + hchacha_block_generic(state, out, nrounds); +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + /* s390 chacha20 implementation has 20 rounds hard-coded, + * it cannot handle a block of data or less, but otherwise + * it can handle data of arbitrary size + */ + if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx()) { + chacha_crypt_generic(state, dst, src, bytes, nrounds); + } else { + DECLARE_KERNEL_FPU_ONSTACK32(vxstate); + + kernel_fpu_begin(&vxstate, KERNEL_VXR); + chacha20_vx(dst, src, bytes, &state->x[4], &state->x[12]); + kernel_fpu_end(&vxstate, KERNEL_VXR); + + state->x[12] += round_up(bytes, CHACHA_BLOCK_SIZE) / + CHACHA_BLOCK_SIZE; + } +} +EXPORT_SYMBOL(chacha_crypt_arch); + +bool chacha_is_arch_optimized(void) +{ + return cpu_has_vx(); +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +MODULE_DESCRIPTION("ChaCha stream cipher (s390 optimized)"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/s390/crypto/chacha-s390.S b/arch/s390/lib/crypto/chacha-s390.S index 63f3102678c0..63f3102678c0 100644 --- a/arch/s390/crypto/chacha-s390.S +++ b/arch/s390/lib/crypto/chacha-s390.S diff --git a/arch/s390/crypto/chacha-s390.h b/arch/s390/lib/crypto/chacha-s390.h index 733744ce30f5..733744ce30f5 100644 --- a/arch/s390/crypto/chacha-s390.h +++ b/arch/s390/lib/crypto/chacha-s390.h diff --git a/arch/s390/lib/crypto/sha256.c b/arch/s390/lib/crypto/sha256.c new file mode 100644 index 000000000000..7dfe120fafab --- /dev/null +++ b/arch/s390/lib/crypto/sha256.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256 optimized using the CP Assist for Cryptographic Functions (CPACF) + * + * Copyright 2025 Google LLC + */ +#include <asm/cpacf.h> +#include <crypto/internal/sha2.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_sha256); + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_cpacf_sha256)) + cpacf_kimd(CPACF_KIMD_SHA_256, state, data, + nblocks * SHA256_BLOCK_SIZE); + else + sha256_blocks_generic(state, data, nblocks); +} +EXPORT_SYMBOL_GPL(sha256_blocks_arch); + +bool sha256_is_arch_optimized(void) +{ + return static_key_enabled(&have_cpacf_sha256); +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +static int __init sha256_s390_mod_init(void) +{ + if (cpu_have_feature(S390_CPU_FEATURE_MSA) && + cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256)) + static_branch_enable(&have_cpacf_sha256); + return 0; +} +subsys_initcall(sha256_s390_mod_init); + +static void __exit sha256_s390_mod_exit(void) +{ +} +module_exit(sha256_s390_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 using the CP Assist for Cryptographic Functions (CPACF)"); diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index 373fa1f01937..099de76e8b1a 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -78,50 +78,6 @@ EXPORT_SYMBOL(strnlen); #endif /** - * strcpy - Copy a %NUL terminated string - * @dest: Where to copy the string to - * @src: Where to copy the string from - * - * returns a pointer to @dest - */ -#ifdef __HAVE_ARCH_STRCPY -char *strcpy(char *dest, const char *src) -{ - char *ret = dest; - - asm volatile( - " lghi 0,0\n" - "0: mvst %[dest],%[src]\n" - " jo 0b\n" - : [dest] "+&a" (dest), [src] "+&a" (src) - : - : "cc", "memory", "0"); - return ret; -} -EXPORT_SYMBOL(strcpy); -#endif - -/** - * strncpy - Copy a length-limited, %NUL-terminated string - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @n: The maximum number of bytes to copy - * - * The result is not %NUL-terminated if the source exceeds - * @n bytes. - */ -#ifdef __HAVE_ARCH_STRNCPY -char *strncpy(char *dest, const char *src, size_t n) -{ - size_t len = __strnend(src, n) - src; - memset(dest + len, 0, n - len); - memcpy(dest, src, len); - return dest; -} -EXPORT_SYMBOL(strncpy); -#endif - -/** * strcat - Append one %NUL-terminated string to another * @dest: The string to be appended to * @src: The string to append to it @@ -181,9 +137,6 @@ EXPORT_SYMBOL(strlcat); * @n: The maximum numbers of bytes to copy * * returns a pointer to @dest - * - * Note that in contrast to strncpy, strncat ensures the result is - * terminated. */ #ifdef __HAVE_ARCH_STRNCAT char *strncat(char *dest, const char *src, size_t n) diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index cec20db88479..fa7d98fa1320 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -17,17 +17,18 @@ #ifdef CONFIG_DEBUG_ENTRY void debug_user_asce(int exit) { + struct lowcore *lc = get_lowcore(); struct ctlreg cr1, cr7; local_ctl_store(1, &cr1); local_ctl_store(7, &cr7); - if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val) + if (cr1.val == lc->user_asce.val && cr7.val == lc->user_asce.val) return; panic("incorrect ASCE on kernel %s\n" "cr1: %016lx cr7: %016lx\n" "kernel: %016lx user: %016lx\n", exit ? "exit" : "entry", cr1.val, cr7.val, - get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val); + lc->kernel_asce.val, lc->user_asce.val); } #endif /*CONFIG_DEBUG_ENTRY */ diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index a6b8b8ea9086..f7da53e212f5 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -530,6 +530,14 @@ segment_modify_shared (char *name, int do_nonshared) return rc; } +static void __dcss_diag_purge_on_cpu_0(void *data) +{ + struct dcss_segment *seg = (struct dcss_segment *)data; + unsigned long dummy; + + dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); +} + /* * Decrease the use count of a DCSS segment and remove * it from the address space if nobody is using it @@ -538,7 +546,6 @@ segment_modify_shared (char *name, int do_nonshared) void segment_unload(char *name) { - unsigned long dummy; struct dcss_segment *seg; if (!machine_is_vm()) @@ -556,7 +563,14 @@ segment_unload(char *name) kfree(seg->res); vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); list_del(&seg->list); - dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); + /* + * Workaround for z/VM issue, where calling the DCSS unload diag on + * a non-IPL CPU would cause bogus sclp maximum memory detection on + * next IPL. + * IPL CPU 0 cannot be set offline, so the dcss_diag() call can + * directly be scheduled to that CPU. + */ + smp_call_function_single(0, __dcss_diag_purge_on_cpu_0, seg, 1); kfree(seg); out_unlock: mutex_unlock(&dcss_lock); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index e3a6f8ae156c..d177bea0bd73 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -38,11 +38,15 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; + struct ctlreg asce; /* change all active ASCEs to avoid the creation of new TLBs */ if (current->active_mm == mm) { - get_lowcore()->user_asce.val = mm->context.asce; - local_ctl_load(7, &get_lowcore()->user_asce); + asce.val = mm->context.asce; + get_lowcore()->user_asce = asce; + local_ctl_load(7, &asce); + if (!test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &asce); } __tlb_flush_local(); } @@ -52,6 +56,8 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) unsigned long *pgd = NULL, *p4d = NULL, *__pgd; unsigned long asce_limit = mm->context.asce_limit; + mmap_assert_write_locked(mm); + /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ VM_BUG_ON(asce_limit < _REGION2_SIZE); @@ -75,13 +81,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) spin_lock_bh(&mm->page_table_lock); - /* - * This routine gets called with mmap_lock lock held and there is - * no reason to optimize for the case of otherwise. However, if - * that would ever change, the below check will let us know. - */ - VM_BUG_ON(asce_limit != mm->context.asce_limit); - if (p4d) { __pgd = (unsigned long *) mm->pgd; p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 5bbdc4190b8b..cd6676c2d602 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -45,6 +45,7 @@ /* list of all detected zpci devices */ static LIST_HEAD(zpci_list); static DEFINE_SPINLOCK(zpci_list_lock); +static DEFINE_MUTEX(zpci_add_remove_lock); static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE); static DEFINE_SPINLOCK(zpci_domain_lock); @@ -70,6 +71,15 @@ EXPORT_SYMBOL_GPL(zpci_aipb); struct airq_iv *zpci_aif_sbv; EXPORT_SYMBOL_GPL(zpci_aif_sbv); +void zpci_zdev_put(struct zpci_dev *zdev) +{ + if (!zdev) + return; + mutex_lock(&zpci_add_remove_lock); + kref_put_lock(&zdev->kref, zpci_release_device, &zpci_list_lock); + mutex_unlock(&zpci_add_remove_lock); +} + struct zpci_dev *get_zdev_by_fid(u32 fid) { struct zpci_dev *tmp, *zdev = NULL; @@ -837,6 +847,7 @@ int zpci_add_device(struct zpci_dev *zdev) { int rc; + mutex_lock(&zpci_add_remove_lock); zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state); rc = zpci_init_iommu(zdev); if (rc) @@ -850,12 +861,14 @@ int zpci_add_device(struct zpci_dev *zdev) spin_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); spin_unlock(&zpci_list_lock); + mutex_unlock(&zpci_add_remove_lock); return 0; error_destroy_iommu: zpci_destroy_iommu(zdev); error: zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc); + mutex_unlock(&zpci_add_remove_lock); return rc; } @@ -925,21 +938,20 @@ int zpci_deconfigure_device(struct zpci_dev *zdev) * @zdev: the zpci_dev that was reserved * * Handle the case that a given zPCI function was reserved by another system. - * After a call to this function the zpci_dev can not be found via - * get_zdev_by_fid() anymore but may still be accessible via existing - * references though it will not be functional anymore. */ void zpci_device_reserved(struct zpci_dev *zdev) { - /* - * Remove device from zpci_list as it is going away. This also - * makes sure we ignore subsequent zPCI events for this device. - */ - spin_lock(&zpci_list_lock); - list_del(&zdev->entry); - spin_unlock(&zpci_list_lock); + lockdep_assert_held(&zdev->state_lock); + /* We may declare the device reserved multiple times */ + if (zdev->state == ZPCI_FN_STATE_RESERVED) + return; zdev->state = ZPCI_FN_STATE_RESERVED; zpci_dbg(3, "rsv fid:%x\n", zdev->fid); + /* + * The underlying device is gone. Allow the zdev to be freed + * as soon as all other references are gone by accounting for + * the removal as a dropped reference. + */ zpci_zdev_put(zdev); } @@ -947,13 +959,14 @@ void zpci_release_device(struct kref *kref) { struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); + lockdep_assert_held(&zpci_add_remove_lock); WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED); - - if (zdev->zbus->bus) - zpci_bus_remove_device(zdev, false); - - if (zdev_enabled(zdev)) - zpci_disable_device(zdev); + /* + * We already hold zpci_list_lock thanks to kref_put_lock(). + * This makes sure no new reference can be taken from the list. + */ + list_del(&zdev->entry); + spin_unlock(&zpci_list_lock); if (zdev->has_hp_slot) zpci_exit_slot(zdev); diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index e86a9419d233..ae3d7a9159bd 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -21,11 +21,8 @@ int zpci_bus_scan_device(struct zpci_dev *zdev); void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error); void zpci_release_device(struct kref *kref); -static inline void zpci_zdev_put(struct zpci_dev *zdev) -{ - if (zdev) - kref_put(&zdev->kref, zpci_release_device); -} + +void zpci_zdev_put(struct zpci_dev *zdev); static inline void zpci_zdev_get(struct zpci_dev *zdev) { diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 7bd7721c1239..2fbee3887d13 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -335,6 +335,22 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) zdev->state = ZPCI_FN_STATE_STANDBY; } +static void zpci_event_reappear(struct zpci_dev *zdev) +{ + lockdep_assert_held(&zdev->state_lock); + /* + * The zdev is in the reserved state. This means that it was presumed to + * go away but there are still undropped references. Now, the platform + * announced its availability again. Bring back the lingering zdev + * to standby. This is safe because we hold a temporary reference + * now so that it won't go away. Account for the re-appearance of the + * underlying device by incrementing the reference count. + */ + zdev->state = ZPCI_FN_STATE_STANDBY; + zpci_zdev_get(zdev); + zpci_dbg(1, "rea fid:%x, fh:%x\n", zdev->fid, zdev->fh); +} + static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); @@ -358,8 +374,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; } } else { + if (zdev->state == ZPCI_FN_STATE_RESERVED) + zpci_event_reappear(zdev); /* the configuration request may be stale */ - if (zdev->state != ZPCI_FN_STATE_STANDBY) + else if (zdev->state != ZPCI_FN_STATE_STANDBY) break; zdev->state = ZPCI_FN_STATE_CONFIGURED; } @@ -375,6 +393,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) break; } } else { + if (zdev->state == ZPCI_FN_STATE_RESERVED) + zpci_event_reappear(zdev); zpci_update_fh(zdev, ccdf->fh); } break; diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 5fcc1a3b04bd..51e7a28af899 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -32,8 +32,10 @@ static inline int __pcistb_mio_inuser( u64 len, u8 *status) { int cc, exception; + bool sacf_flag; exception = 1; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile ( " sacf 256\n" "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" @@ -44,6 +46,7 @@ static inline int __pcistb_mio_inuser( : CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception) : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src)) : CC_CLOBBER_LIST("memory")); + disable_sacf_uaccess(sacf_flag); *status = len >> 24 & 0xff; return exception ? -ENXIO : CC_TRANSFORM(cc); } @@ -54,6 +57,7 @@ static inline int __pcistg_mio_inuser( { union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; int cc, exception; + bool sacf_flag; u64 val = 0; u64 cnt = ulen; u8 tmp; @@ -64,6 +68,7 @@ static inline int __pcistg_mio_inuser( * address space. pcistg then uses the user mappings. */ exception = 1; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile ( " sacf 256\n" "0: llgc %[tmp],0(%[src])\n" @@ -81,6 +86,7 @@ static inline int __pcistg_mio_inuser( CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair) : : CC_CLOBBER_LIST("memory")); + disable_sacf_uaccess(sacf_flag); *status = ioaddr_len.odd >> 24 & 0xff; cc = exception ? -ENXIO : CC_TRANSFORM(cc); @@ -204,6 +210,7 @@ static inline int __pcilg_mio_inuser( u64 ulen, u8 *status) { union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; + bool sacf_flag; u64 cnt = ulen; int shift = ulen * 8; int cc, exception; @@ -215,6 +222,7 @@ static inline int __pcilg_mio_inuser( * user address @dst */ exception = 1; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile ( " sacf 256\n" "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n" @@ -236,10 +244,10 @@ static inline int __pcilg_mio_inuser( : [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception), CC_OUT(cc, cc), [val] "=d" (val), [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), - [shift] "+d" (shift) + [shift] "+a" (shift) : : CC_CLOBBER_LIST("memory")); - + disable_sacf_uaccess(sacf_flag); cc = exception ? -ENXIO : CC_TRANSFORM(cc); /* did we write everything to the user space buffer? */ if (!cc && cnt != 0) diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c index f9f3b14f70d5..730c01b225bd 100644 --- a/arch/sh/boards/mach-se/7343/irq.c +++ b/arch/sh/boards/mach-se/7343/irq.c @@ -47,8 +47,9 @@ static void __init se7343_domain_init(void) { int i; - se7343_irq_domain = irq_domain_add_linear(NULL, SE7343_FPGA_IRQ_NR, - &irq_domain_simple_ops, NULL); + se7343_irq_domain = irq_domain_create_linear(NULL, SE7343_FPGA_IRQ_NR, + &irq_domain_simple_ops, + NULL); if (unlikely(!se7343_irq_domain)) { printk("Failed to get IRQ domain\n"); return; @@ -70,7 +71,7 @@ static void __init se7343_gc_init(void) struct irq_chip_type *ct; unsigned int irq_base; - irq_base = irq_linear_revmap(se7343_irq_domain, 0); + irq_base = irq_find_mapping(se7343_irq_domain, 0); gc = irq_alloc_generic_chip(DRV_NAME, 1, irq_base, se7343_irq_regs, handle_level_irq); diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c index efa96edd47dc..49aa3a2b1b8f 100644 --- a/arch/sh/boards/mach-se/7722/irq.c +++ b/arch/sh/boards/mach-se/7722/irq.c @@ -46,7 +46,7 @@ static void __init se7722_domain_init(void) { int i; - se7722_irq_domain = irq_domain_add_linear(NULL, SE7722_FPGA_IRQ_NR, + se7722_irq_domain = irq_domain_create_linear(NULL, SE7722_FPGA_IRQ_NR, &irq_domain_simple_ops, NULL); if (unlikely(!se7722_irq_domain)) { printk("Failed to get IRQ domain\n"); @@ -69,7 +69,7 @@ static void __init se7722_gc_init(void) struct irq_chip_type *ct; unsigned int irq_base; - irq_base = irq_linear_revmap(se7722_irq_domain, 0); + irq_base = irq_find_mapping(se7722_irq_domain, 0); gc = irq_alloc_generic_chip(DRV_NAME, 1, irq_base, se7722_irq_regs, handle_level_irq); diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c index f82d3a6a844a..c13d51b29702 100644 --- a/arch/sh/boards/mach-x3proto/gpio.c +++ b/arch/sh/boards/mach-x3proto/gpio.c @@ -108,7 +108,7 @@ int __init x3proto_gpio_setup(void) if (unlikely(ret)) goto err_gpio; - x3proto_irq_domain = irq_domain_add_linear(NULL, NR_BASEBOARD_GPIOS, + x3proto_irq_domain = irq_domain_create_linear(NULL, NR_BASEBOARD_GPIOS, &x3proto_gpio_irq_ops, NULL); if (unlikely(!x3proto_irq_domain)) goto err_irq; diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index fc2010c241fb..31dbd8888aaa 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig @@ -87,6 +87,5 @@ CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_DEBUG_FS=y -CONFIG_CRYPTO_MANAGER=y # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index f1ba0fefe1f9..7a7c4dec2925 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -205,7 +205,7 @@ CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENTS=y CONFIG_KEYS=y CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m diff --git a/arch/sparc/crypto/Kconfig b/arch/sparc/crypto/Kconfig index e858597de89d..a6ba319c42dc 100644 --- a/arch/sparc/crypto/Kconfig +++ b/arch/sparc/crypto/Kconfig @@ -36,16 +36,6 @@ config CRYPTO_SHA1_SPARC64 Architecture: sparc64 -config CRYPTO_SHA256_SPARC64 - tristate "Hash functions: SHA-224 and SHA-256" - depends on SPARC64 - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: sparc64 using crypto instructions, when available - config CRYPTO_SHA512_SPARC64 tristate "Hash functions: SHA-384 and SHA-512" depends on SPARC64 diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile index a2d7fca40cb4..701c39edb0d7 100644 --- a/arch/sparc/crypto/Makefile +++ b/arch/sparc/crypto/Makefile @@ -4,7 +4,6 @@ # obj-$(CONFIG_CRYPTO_SHA1_SPARC64) += sha1-sparc64.o -obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o obj-$(CONFIG_CRYPTO_SHA512_SPARC64) += sha512-sparc64.o obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o @@ -13,7 +12,6 @@ obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o sha1-sparc64-y := sha1_asm.o sha1_glue.o -sha256-sparc64-y := sha256_asm.o sha256_glue.o sha512-sparc64-y := sha512_asm.o sha512_glue.o md5-sparc64-y := md5_asm.o md5_glue.o diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S index 155cefb98520..f291174a72a1 100644 --- a/arch/sparc/crypto/aes_asm.S +++ b/arch/sparc/crypto/aes_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - #define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \ AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \ diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 683150830356..359f22643b05 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -27,11 +27,10 @@ #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> +#include <asm/opcodes.h> #include <asm/pstate.h> #include <asm/elf.h> -#include "opcodes.h" - struct aes_ops { void (*encrypt)(const u64 *key, const u32 *input, u32 *output); void (*decrypt)(const u64 *key, const u32 *input, u32 *output); diff --git a/arch/sparc/crypto/camellia_asm.S b/arch/sparc/crypto/camellia_asm.S index dcdc9193fcd7..8471b346ef54 100644 --- a/arch/sparc/crypto/camellia_asm.S +++ b/arch/sparc/crypto/camellia_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - #define CAMELLIA_6ROUNDS(KEY_BASE, I0, I1) \ CAMELLIA_F(KEY_BASE + 0, I1, I0, I1) \ CAMELLIA_F(KEY_BASE + 2, I0, I1, I0) \ diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index aaa9714378e6..e7a1e1c42b99 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -15,11 +15,10 @@ #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> +#include <asm/opcodes.h> #include <asm/pstate.h> #include <asm/elf.h> -#include "opcodes.h" - #define CAMELLIA_MIN_KEY_SIZE 16 #define CAMELLIA_MAX_KEY_SIZE 32 #define CAMELLIA_BLOCK_SIZE 16 diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S index 7157468a679d..d534446cbef9 100644 --- a/arch/sparc/crypto/des_asm.S +++ b/arch/sparc/crypto/des_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - .align 32 ENTRY(des_sparc64_key_expand) /* %o0=input_key, %o1=output_key */ diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index a499102bf706..e50ec4cd57cd 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -16,11 +16,10 @@ #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> +#include <asm/opcodes.h> #include <asm/pstate.h> #include <asm/elf.h> -#include "opcodes.h" - struct des_sparc64_ctx { u64 encrypt_expkey[DES_EXPKEY_WORDS / 2]; u64 decrypt_expkey[DES_EXPKEY_WORDS / 2]; diff --git a/arch/sparc/crypto/md5_asm.S b/arch/sparc/crypto/md5_asm.S index 7a6637455f37..60b544e4d205 100644 --- a/arch/sparc/crypto/md5_asm.S +++ b/arch/sparc/crypto/md5_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(md5_sparc64_transform) /* %o0 = digest, %o1 = data, %o2 = rounds */ VISEntryHalf diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index 511db98d590a..b3615f0cdf62 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c @@ -14,121 +14,104 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/md5.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" +struct sparc_md5_state { + __le32 hash[MD5_HASH_WORDS]; + u64 byte_count; +}; -asmlinkage void md5_sparc64_transform(u32 *digest, const char *data, +asmlinkage void md5_sparc64_transform(__le32 *digest, const char *data, unsigned int rounds); static int md5_sparc64_init(struct shash_desc *desc) { - struct md5_state *mctx = shash_desc_ctx(desc); + struct sparc_md5_state *mctx = shash_desc_ctx(desc); - mctx->hash[0] = MD5_H0; - mctx->hash[1] = MD5_H1; - mctx->hash[2] = MD5_H2; - mctx->hash[3] = MD5_H3; - le32_to_cpu_array(mctx->hash, 4); + mctx->hash[0] = cpu_to_le32(MD5_H0); + mctx->hash[1] = cpu_to_le32(MD5_H1); + mctx->hash[2] = cpu_to_le32(MD5_H2); + mctx->hash[3] = cpu_to_le32(MD5_H3); mctx->byte_count = 0; return 0; } -static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) -{ - unsigned int done = 0; - - sctx->byte_count += len; - if (partial) { - done = MD5_HMAC_BLOCK_SIZE - partial; - memcpy((u8 *)sctx->block + partial, data, done); - md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1); - } - if (len - done >= MD5_HMAC_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE; - - md5_sparc64_transform(sctx->hash, data + done, rounds); - done += rounds * MD5_HMAC_BLOCK_SIZE; - } - - memcpy(sctx->block, data + done, len - done); -} - static int md5_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct md5_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < MD5_HMAC_BLOCK_SIZE) { - sctx->byte_count += len; - memcpy((u8 *)sctx->block + partial, data, len); - } else - __md5_sparc64_update(sctx, data, len, partial); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); - return 0; + sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE); + md5_sparc64_transform(sctx->hash, data, len / MD5_HMAC_BLOCK_SIZE); + return len - round_down(len, MD5_HMAC_BLOCK_SIZE); } /* Add padding and return the message digest. */ -static int md5_sparc64_final(struct shash_desc *desc, u8 *out) +static int md5_sparc64_finup(struct shash_desc *desc, const u8 *src, + unsigned int offset, u8 *out) { - struct md5_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - u32 *dst = (u32 *)out; - __le64 bits; - static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, }; - - bits = cpu_to_le64(sctx->byte_count << 3); - - /* Pad out to 56 mod 64 and append length */ - index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; - padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index); - - /* We need to fill a whole block for __md5_sparc64_update() */ - if (padlen <= 56) { - sctx->byte_count += padlen; - memcpy((u8 *)sctx->block + index, padding, padlen); - } else { - __md5_sparc64_update(sctx, padding, padlen, index); - } - __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); + __le64 block[MD5_BLOCK_WORDS] = {}; + u8 *p = memcpy(block, src, offset); + __le32 *dst = (__le32 *)out; + __le64 *pbits; + int i; + + src = p; + p += offset; + *p++ = 0x80; + sctx->byte_count += offset; + pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1]; + *pbits = cpu_to_le64(sctx->byte_count << 3); + md5_sparc64_transform(sctx->hash, src, (pbits - block + 1) / 8); + memzero_explicit(block, sizeof(block)); /* Store state in digest */ for (i = 0; i < MD5_HASH_WORDS; i++) dst[i] = sctx->hash[i]; - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - return 0; } static int md5_sparc64_export(struct shash_desc *desc, void *out) { - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); + union { + u8 *u8; + u32 *u32; + u64 *u64; + } p = { .u8 = out }; + int i; + for (i = 0; i < MD5_HASH_WORDS; i++) + put_unaligned(le32_to_cpu(sctx->hash[i]), p.u32++); + put_unaligned(sctx->byte_count, p.u64); return 0; } static int md5_sparc64_import(struct shash_desc *desc, const void *in) { - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u32 *u32; + const u64 *u64; + } p = { .u8 = in }; + int i; + for (i = 0; i < MD5_HASH_WORDS; i++) + sctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++)); + sctx->byte_count = get_unaligned(p.u64); return 0; } @@ -136,15 +119,16 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_sparc64_init, .update = md5_sparc64_update, - .final = md5_sparc64_final, + .finup = md5_sparc64_finup, .export = md5_sparc64_export, .import = md5_sparc64_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .descsize = sizeof(struct sparc_md5_state), + .statesize = sizeof(struct sparc_md5_state), .base = { .cra_name = "md5", .cra_driver_name= "md5-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/crypto/sha1_asm.S b/arch/sparc/crypto/sha1_asm.S index 7d8bf354f0e7..00b46bac1b08 100644 --- a/arch/sparc/crypto/sha1_asm.S +++ b/arch/sparc/crypto/sha1_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(sha1_sparc64_transform) /* %o0 = digest, %o1 = data, %o2 = rounds */ VISEntryHalf diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c index 06b7becfcb21..ef19d5023b1b 100644 --- a/arch/sparc/crypto/sha1_glue.c +++ b/arch/sparc/crypto/sha1_glue.c @@ -11,124 +11,44 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> +#include <linux/kernel.h> +#include <linux/module.h> -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" - -asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data, - unsigned int rounds); - -static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) -{ - unsigned int done = 0; - - sctx->count += len; - if (partial) { - done = SHA1_BLOCK_SIZE - partial; - memcpy(sctx->buffer + partial, data, done); - sha1_sparc64_transform(sctx->state, sctx->buffer, 1); - } - if (len - done >= SHA1_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; - - sha1_sparc64_transform(sctx->state, data + done, rounds); - done += rounds * SHA1_BLOCK_SIZE; - } - - memcpy(sctx->buffer, data + done, len - done); -} +asmlinkage void sha1_sparc64_transform(struct sha1_state *digest, + const u8 *data, int rounds); static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < SHA1_BLOCK_SIZE) { - sctx->count += len; - memcpy(sctx->buffer + partial, data, len); - } else - __sha1_sparc64_update(sctx, data, len, partial); - - return 0; + return sha1_base_do_update_blocks(desc, data, len, + sha1_sparc64_transform); } /* Add padding and return the message digest. */ -static int sha1_sparc64_final(struct shash_desc *desc, u8 *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - __be32 *dst = (__be32 *)out; - __be64 bits; - static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; - - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64 and append length */ - index = sctx->count % SHA1_BLOCK_SIZE; - padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); - - /* We need to fill a whole block for __sha1_sparc64_update() */ - if (padlen <= 56) { - sctx->count += padlen; - memcpy(sctx->buffer + index, padding, padlen); - } else { - __sha1_sparc64_update(sctx, padding, padlen, index); - } - __sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); - - /* Store state in digest */ - for (i = 0; i < 5; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int sha1_sparc64_export(struct shash_desc *desc, void *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha1_sparc64_import(struct shash_desc *desc, const void *in) +static int sha1_sparc64_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; + sha1_base_do_finup(desc, src, len, sha1_sparc64_transform); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_sparc64_update, - .final = sha1_sparc64_final, - .export = sha1_sparc64_export, - .import = sha1_sparc64_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = sha1_sparc64_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c deleted file mode 100644 index 285561a1cde5..000000000000 --- a/arch/sparc/crypto/sha256_glue.c +++ /dev/null @@ -1,210 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes. - * - * This is based largely upon crypto/sha256_generic.c - * - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> - -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" - -asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data, - unsigned int rounds); - -static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) -{ - unsigned int done = 0; - - sctx->count += len; - if (partial) { - done = SHA256_BLOCK_SIZE - partial; - memcpy(sctx->buf + partial, data, done); - sha256_sparc64_transform(sctx->state, sctx->buf, 1); - } - if (len - done >= SHA256_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE; - - sha256_sparc64_transform(sctx->state, data + done, rounds); - done += rounds * SHA256_BLOCK_SIZE; - } - - memcpy(sctx->buf, data + done, len - done); -} - -static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < SHA256_BLOCK_SIZE) { - sctx->count += len; - memcpy(sctx->buf + partial, data, len); - } else - __sha256_sparc64_update(sctx, data, len, partial); - - return 0; -} - -static int sha256_sparc64_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - __be32 *dst = (__be32 *)out; - __be64 bits; - static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; - - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64 and append length */ - index = sctx->count % SHA256_BLOCK_SIZE; - padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index); - - /* We need to fill a whole block for __sha256_sparc64_update() */ - if (padlen <= 56) { - sctx->count += padlen; - memcpy(sctx->buf + index, padding, padlen); - } else { - __sha256_sparc64_update(sctx, padding, padlen, index); - } - __sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[SHA256_DIGEST_SIZE]; - - sha256_sparc64_final(desc, D); - - memcpy(hash, D, SHA224_DIGEST_SIZE); - memzero_explicit(D, SHA256_DIGEST_SIZE); - - return 0; -} - -static int sha256_sparc64_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int sha256_sparc64_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; -} - -static struct shash_alg sha256_alg = { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = sha256_sparc64_update, - .final = sha256_sparc64_final, - .export = sha256_sparc64_export, - .import = sha256_sparc64_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg sha224_alg = { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = sha256_sparc64_update, - .final = sha224_sparc64_final, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static bool __init sparc64_has_sha256_opcode(void) -{ - unsigned long cfr; - - if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) - return false; - - __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); - if (!(cfr & CFR_SHA256)) - return false; - - return true; -} - -static int __init sha256_sparc64_mod_init(void) -{ - if (sparc64_has_sha256_opcode()) { - int ret = crypto_register_shash(&sha224_alg); - if (ret < 0) - return ret; - - ret = crypto_register_shash(&sha256_alg); - if (ret < 0) { - crypto_unregister_shash(&sha224_alg); - return ret; - } - - pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n"); - return 0; - } - pr_info("sparc64 sha256 opcode not available.\n"); - return -ENODEV; -} - -static void __exit sha256_sparc64_mod_fini(void) -{ - crypto_unregister_shash(&sha224_alg); - crypto_unregister_shash(&sha256_alg); -} - -module_init(sha256_sparc64_mod_init); -module_exit(sha256_sparc64_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); - -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha256"); - -#include "crop_devid.c" diff --git a/arch/sparc/crypto/sha512_asm.S b/arch/sparc/crypto/sha512_asm.S index b2f6e6728802..9932b4fe1b59 100644 --- a/arch/sparc/crypto/sha512_asm.S +++ b/arch/sparc/crypto/sha512_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(sha512_sparc64_transform) /* %o0 = digest, %o1 = data, %o2 = rounds */ VISEntry diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c index d66efa4ec59a..47b9277b6877 100644 --- a/arch/sparc/crypto/sha512_glue.c +++ b/arch/sparc/crypto/sha512_glue.c @@ -10,115 +10,42 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> - -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" +#include <linux/kernel.h> +#include <linux/module.h> asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data, unsigned int rounds); -static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) +static void sha512_block(struct sha512_state *sctx, const u8 *src, int blocks) { - unsigned int done = 0; - - if ((sctx->count[0] += len) < len) - sctx->count[1]++; - if (partial) { - done = SHA512_BLOCK_SIZE - partial; - memcpy(sctx->buf + partial, data, done); - sha512_sparc64_transform(sctx->state, sctx->buf, 1); - } - if (len - done >= SHA512_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; - - sha512_sparc64_transform(sctx->state, data + done, rounds); - done += rounds * SHA512_BLOCK_SIZE; - } - - memcpy(sctx->buf, data + done, len - done); + sha512_sparc64_transform(sctx->state, src, blocks); } static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha512_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < SHA512_BLOCK_SIZE) { - if ((sctx->count[0] += len) < len) - sctx->count[1]++; - memcpy(sctx->buf + partial, data, len); - } else - __sha512_sparc64_update(sctx, data, len, partial); - - return 0; + return sha512_base_do_update_blocks(desc, data, len, sha512_block); } -static int sha512_sparc64_final(struct shash_desc *desc, u8 *out) +static int sha512_sparc64_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha512_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - __be64 *dst = (__be64 *)out; - __be64 bits[2]; - static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, }; - - /* Save number of bits */ - bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); - - /* Pad out to 112 mod 128 and append length */ - index = sctx->count[0] % SHA512_BLOCK_SIZE; - padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index); - - /* We need to fill a whole block for __sha512_sparc64_update() */ - if (padlen <= 112) { - if ((sctx->count[0] += padlen) < padlen) - sctx->count[1]++; - memcpy(sctx->buf + index, padding, padlen); - } else { - __sha512_sparc64_update(sctx, padding, padlen, index); - } - __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be64(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[64]; - - sha512_sparc64_final(desc, D); - - memcpy(hash, D, 48); - memzero_explicit(D, 64); - - return 0; + sha512_base_do_finup(desc, src, len, sha512_block); + return sha512_base_finish(desc, out); } static struct shash_alg sha512 = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_sparc64_update, - .final = sha512_sparc64_final, - .descsize = sizeof(struct sha512_state), + .finup = sha512_sparc64_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name= "sha512-sparc64", @@ -132,8 +59,8 @@ static struct shash_alg sha384 = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_sparc64_update, - .final = sha384_sparc64_final, - .descsize = sizeof(struct sha512_state), + .finup = sha512_sparc64_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name= "sha384-sparc64", diff --git a/arch/sparc/crypto/opcodes.h b/arch/sparc/include/asm/opcodes.h index 417b6a10a337..ebfda6eb49b2 100644 --- a/arch/sparc/crypto/opcodes.h +++ b/arch/sparc/include/asm/opcodes.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _OPCODES_H -#define _OPCODES_H +#ifndef _SPARC_ASM_OPCODES_H +#define _SPARC_ASM_OPCODES_H #define SPARC_CR_OPCODE_PRIORITY 300 @@ -97,4 +97,4 @@ #define MOVXTOD_G7_F62 \ .word 0xbfb02307; -#endif /* _OPCODES_H */ +#endif /* _SPARC_ASM_OPCODES_H */ diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index f02a283a8e8f..cae4d33002a5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1668,8 +1668,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, if (!sparc_perf_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, &data, regs)) - sparc_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } finish_clock = sched_clock(); diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 5724d0f356eb..5cf9781d68b4 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -4,6 +4,7 @@ asflags-y := -ansi -DST_DIV0=0x02 +obj-y += crypto/ lib-$(CONFIG_SPARC32) += ashrdi3.o lib-$(CONFIG_SPARC32) += memcpy.o memset.o lib-y += strlen.o @@ -54,4 +55,4 @@ obj-$(CONFIG_SPARC64) += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o obj-$(CONFIG_SPARC64) += PeeCeeI.o obj-$(CONFIG_CRC32_ARCH) += crc32-sparc.o -crc32-sparc-y := crc32_glue.o crc32c_asm.o +crc32-sparc-y := crc32.o crc32c_asm.o diff --git a/arch/sparc/lib/crc32_glue.c b/arch/sparc/lib/crc32.c index a70752c729cf..40d4720a42a1 100644 --- a/arch/sparc/lib/crc32_glue.c +++ b/arch/sparc/lib/crc32.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Glue code for CRC32C optimized for sparc64 crypto opcodes. +/* CRC32c (Castagnoli), sparc64 crc32c opcode accelerated * * This is based largely upon arch/x86/crypto/crc32c-intel.c * @@ -17,7 +17,7 @@ #include <asm/pstate.h> #include <asm/elf.h> -static DEFINE_STATIC_KEY_FALSE(have_crc32c_opcode); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32c_opcode); u32 crc32_le_arch(u32 crc, const u8 *data, size_t len) { @@ -74,7 +74,7 @@ static int __init crc32_sparc_init(void) pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n"); return 0; } -arch_initcall(crc32_sparc_init); +subsys_initcall(crc32_sparc_init); static void __exit crc32_sparc_exit(void) { diff --git a/arch/sparc/lib/crc32c_asm.S b/arch/sparc/lib/crc32c_asm.S index ee454fa6aed6..4db873850f44 100644 --- a/arch/sparc/lib/crc32c_asm.S +++ b/arch/sparc/lib/crc32c_asm.S @@ -1,10 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> #include <asm/asi.h> -#include "../crypto/opcodes.h" - ENTRY(crc32c_sparc64) /* %o0=crc32p, %o1=data_ptr, %o2=len */ VISEntryHalf diff --git a/arch/sparc/lib/crypto/Kconfig b/arch/sparc/lib/crypto/Kconfig new file mode 100644 index 000000000000..e5c3e4d3dba6 --- /dev/null +++ b/arch/sparc/lib/crypto/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_SHA256_SPARC64 + tristate + depends on SPARC64 + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_LIB_SHA256_GENERIC diff --git a/arch/sparc/lib/crypto/Makefile b/arch/sparc/lib/crypto/Makefile new file mode 100644 index 000000000000..75ee244ad6f7 --- /dev/null +++ b/arch/sparc/lib/crypto/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o +sha256-sparc64-y := sha256.o sha256_asm.o diff --git a/arch/sparc/lib/crypto/sha256.c b/arch/sparc/lib/crypto/sha256.c new file mode 100644 index 000000000000..8bdec2db08b3 --- /dev/null +++ b/arch/sparc/lib/crypto/sha256.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SHA-256 accelerated using the sparc64 sha256 opcodes + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_opcodes); + +asmlinkage void sha256_sparc64_transform(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_sha256_opcodes)) + sha256_sparc64_transform(state, data, nblocks); + else + sha256_blocks_generic(state, data, nblocks); +} +EXPORT_SYMBOL_GPL(sha256_blocks_arch); + +bool sha256_is_arch_optimized(void) +{ + return static_key_enabled(&have_sha256_opcodes); +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +static int __init sha256_sparc64_mod_init(void) +{ + unsigned long cfr; + + if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) + return 0; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + if (!(cfr & CFR_SHA256)) + return 0; + + static_branch_enable(&have_sha256_opcodes); + pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n"); + return 0; +} +subsys_initcall(sha256_sparc64_mod_init); + +static void __exit sha256_sparc64_mod_exit(void) +{ +} +module_exit(sha256_sparc64_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 accelerated using the sparc64 sha256 opcodes"); diff --git a/arch/sparc/crypto/sha256_asm.S b/arch/sparc/lib/crypto/sha256_asm.S index 0b39ec7d7ca2..ddcdd3daf31e 100644 --- a/arch/sparc/crypto/sha256_asm.S +++ b/arch/sparc/lib/crypto/sha256_asm.S @@ -1,11 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(sha256_sparc64_transform) - /* %o0 = digest, %o1 = data, %o2 = rounds */ + /* %o0 = state, %o1 = data, %o2 = nblocks */ VISEntryHalf ld [%o0 + 0x00], %f0 ld [%o0 + 0x04], %f1 diff --git a/arch/um/include/asm/fpu/api.h b/arch/um/include/asm/fpu/api.h index 71bfd9ef3938..3abf67c83c40 100644 --- a/arch/um/include/asm/fpu/api.h +++ b/arch/um/include/asm/fpu/api.h @@ -2,6 +2,8 @@ #ifndef _ASM_UM_FPU_API_H #define _ASM_UM_FPU_API_H +#include <linux/types.h> + /* Copyright (c) 2020 Cambridge Greys Ltd * Copyright (c) 2020 Red Hat Inc. * A set of "dummy" defines to allow the direct inclusion diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index d4b3b6742ec8..2f5ee045bc7a 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -477,7 +477,7 @@ void *text_poke_copy(void *addr, const void *opcode, size_t len) return text_poke(addr, opcode, len); } -void text_poke_sync(void) +void smp_text_poke_sync_each_cpu(void) { } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e21cca404943..121f9f03bd5c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -153,6 +153,7 @@ config X86 select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64 select ARCH_WANTS_THP_SWAP if X86_64 select ARCH_HAS_PARANOID_L1D_FLUSH + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select BUILDTIME_TABLE_SORT select CLKEVT_I8253 select CLOCKSOURCE_WATCHDOG @@ -426,8 +427,7 @@ config DYNAMIC_PHYSICAL_MASK config PGTABLE_LEVELS int - default 5 if X86_5LEVEL - default 4 if X86_64 + default 5 if X86_64 default 3 if X86_PAE default 2 @@ -799,6 +799,7 @@ config PARAVIRT config PARAVIRT_XXL bool + depends on X86_64 config PARAVIRT_DEBUG bool "paravirt-ops debugging" @@ -1463,27 +1464,6 @@ config X86_PAE has the cost of more pagetable lookup overhead, and also consumes more pagetable space per process. -config X86_5LEVEL - bool "Enable 5-level page tables support" - default y - select DYNAMIC_MEMORY_LAYOUT - select SPARSEMEM_VMEMMAP - depends on X86_64 - help - 5-level paging enables access to larger address space: - up to 128 PiB of virtual address space and 4 PiB of - physical address space. - - It will be supported by future Intel CPUs. - - A kernel with the option enabled can be booted on machines that - support 4- or 5-level paging. - - See Documentation/arch/x86/x86_64/5level-paging.rst for more - information. - - Say N if unsure. - config X86_DIRECT_GBPAGES def_bool y depends on X86_64 @@ -1579,6 +1559,7 @@ config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_STATIC if X86_32 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 + select SPARSEMEM_VMEMMAP if X86_64 config ARCH_SPARSEMEM_DEFAULT def_bool X86_64 || (NUMA && X86_32) @@ -2167,17 +2148,10 @@ config PHYSICAL_ALIGN Don't change this unless you know what you are doing. -config DYNAMIC_MEMORY_LAYOUT - bool - help - This option makes base addresses of vmalloc and vmemmap as well as - __PAGE_OFFSET movable during boot. - config RANDOMIZE_MEMORY bool "Randomize the kernel memory sections" depends on X86_64 depends on RANDOMIZE_BASE - select DYNAMIC_MEMORY_LAYOUT default RANDOMIZE_BASE help Randomizes the base virtual address of kernel memory sections diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler index 6d20a6ce0507..c827f694fb72 100644 --- a/arch/x86/Kconfig.assembler +++ b/arch/x86/Kconfig.assembler @@ -6,20 +6,6 @@ config AS_AVX512 help Supported by binutils >= 2.25 and LLVM integrated assembler -config AS_SHA1_NI - def_bool $(as-instr,sha1msg1 %xmm0$(comma)%xmm1) - help - Supported by binutils >= 2.24 and LLVM integrated assembler - -config AS_SHA256_NI - def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1) - help - Supported by binutils >= 2.24 and LLVM integrated assembler -config AS_TPAUSE - def_bool $(as-instr,tpause %ecx) - help - Supported by binutils >= 2.31.1 and LLVM integrated assembler >= V7 - config AS_GFNI def_bool $(as-instr,vgf2p8mulb %xmm0$(comma)%xmm1$(comma)%xmm2) help diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 753b8763abae..f928cf6e3252 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -245,6 +245,30 @@ config MATOM endchoice +config CC_HAS_MARCH_NATIVE + # This flag might not be available in cross-compilers: + def_bool $(cc-option, -march=native) + # LLVM 18 has an easily triggered internal compiler error in core + # networking code with '-march=native' on certain systems: + # https://github.com/llvm/llvm-project/issues/72026 + # LLVM 19 introduces an optimization that resolves some high stack + # usage warnings that only appear wth '-march=native'. + depends on CC_IS_GCC || CLANG_VERSION >= 190100 + +config X86_NATIVE_CPU + bool "Build and optimize for local/native CPU" + depends on X86_64 + depends on CC_HAS_MARCH_NATIVE + help + Optimize for the current CPU used to compile the kernel. + Use this option if you intend to build the kernel for your + local machine. + + Note that such a kernel might not work optimally on a + different x86 machine. + + If unsure, say N. + config X86_GENERIC bool "Generic x86 support" depends on X86_32 diff --git a/arch/x86/Kconfig.cpufeatures b/arch/x86/Kconfig.cpufeatures index e12d5b7e39a2..250c10627ab3 100644 --- a/arch/x86/Kconfig.cpufeatures +++ b/arch/x86/Kconfig.cpufeatures @@ -132,10 +132,6 @@ config X86_DISABLED_FEATURE_OSPKE def_bool y depends on !X86_INTEL_MEMORY_PROTECTION_KEYS -config X86_DISABLED_FEATURE_LA57 - def_bool y - depends on !X86_5LEVEL - config X86_DISABLED_FEATURE_PTI def_bool y depends on !MITIGATION_PAGE_TABLE_ISOLATION diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 594723005d95..1913d342969b 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -173,8 +173,13 @@ else # Use -mskip-rax-setup if supported. KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup) +ifdef CONFIG_X86_NATIVE_CPU + KBUILD_CFLAGS += -march=native + KBUILD_RUSTFLAGS += -Ctarget-cpu=native +else KBUILD_CFLAGS += -march=x86-64 -mtune=generic KBUILD_RUSTFLAGS += -Ctarget-cpu=x86-64 -Ztune-cpu=generic +endif KBUILD_CFLAGS += -mno-red-zone KBUILD_CFLAGS += -mcmodel=kernel @@ -281,6 +286,7 @@ archprepare: $(cpufeaturemasks.hdr) ### # Kernel objects +core-y += arch/x86/boot/startup/ libs-y += arch/x86/lib/ # drivers-y are linked after core-y diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S index aa9b96457584..cf4a6155714e 100644 --- a/arch/x86/boot/bioscall.S +++ b/arch/x86/boot/bioscall.S @@ -32,7 +32,7 @@ intcall: movw %dx, %si movw %sp, %di movw $11, %cx - rep; movsl + rep movsl /* Pop full state from the stack */ popal @@ -67,7 +67,7 @@ intcall: jz 4f movw %sp, %si movw $11, %cx - rep; movsl + rep movsl 4: addw $44, %sp /* Restore state and return */ diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 38f17a1e1e36..60580836daf7 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -34,7 +34,7 @@ extern struct setup_header hdr; extern struct boot_params boot_params; -#define cpu_relax() asm volatile("rep; nop") +#define cpu_relax() asm volatile("pause") static inline void io_delay(void) { @@ -155,14 +155,14 @@ static inline void wrgs32(u32 v, addr_t addr) static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len) { bool diff; - asm volatile("fs; repe; cmpsb" CC_SET(nz) + asm volatile("fs repe cmpsb" CC_SET(nz) : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len) { bool diff; - asm volatile("gs; repe; cmpsb" CC_SET(nz) + asm volatile("gs repe cmpsb" CC_SET(nz) : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index fdbce022db55..f4f7b22d8113 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -44,10 +44,10 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h -# sev.c indirectly includes inat-table.h which is generated during +# sev-decode-insn.c indirectly includes inat-table.c which is generated during # compilation and stored in $(objtree). Add the directory to the includes so # that the compiler finds it even with out-of-tree builds (make O=/some/path). -CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/ +CFLAGS_sev-handle-vc.o += -I$(objtree)/arch/x86/lib/ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ @@ -73,7 +73,7 @@ LDFLAGS_vmlinux += -T hostprogs := mkpiggy HOST_EXTRACFLAGS += -I$(srctree)/tools/include -sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p' +sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABbCDGRSTtVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p' quiet_cmd_voffset = VOFFSET $@ cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@ @@ -96,8 +96,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o - vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o - vmlinux-objs-y += $(obj)/la57toggle.o + vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o $(obj)/sev-handle-vc.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o @@ -106,6 +105,7 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a +vmlinux-libs-$(CONFIG_X86_64) += $(objtree)/arch/x86/boot/startup/lib.a $(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index eafd4f185e77..d9dab940ff62 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -35,7 +35,6 @@ #include <asm/bootparam.h> #include <asm/desc_defs.h> #include <asm/trapnr.h> -#include "pgtable.h" /* * Fix alignment at 16 bytes. Following CONFIG_FUNCTION_ALIGNMENT will result diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 1cdcd4aaf395..94b5991da001 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -14,7 +14,6 @@ #include "misc.h" #include "error.h" -#include "pgtable.h" #include "../string.h" #include "../voffset.h" #include <asm/bootparam_utils.h> diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index dd8d1a85f671..db1048621ea2 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -136,6 +136,9 @@ static inline void console_init(void) #endif #ifdef CONFIG_AMD_MEM_ENCRYPT +struct es_em_ctxt; +struct insn; + void sev_enable(struct boot_params *bp); void snp_check_features(void); void sev_es_shutdown_ghcb(void); @@ -143,6 +146,11 @@ extern bool sev_es_check_ghcb_fault(unsigned long address); void snp_set_page_private(unsigned long paddr); void snp_set_page_shared(unsigned long paddr); void sev_prep_identity_maps(unsigned long top_level_pgt); + +enum es_result vc_decode_insn(struct es_em_ctxt *ctxt); +bool insn_has_rep_prefix(struct insn *insn); +void sev_insn_decode_init(void); +bool early_setup_ghcb(void); #else static inline void sev_enable(struct boot_params *bp) { diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h deleted file mode 100644 index 6d595abe06b3..000000000000 --- a/arch/x86/boot/compressed/pgtable.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef BOOT_COMPRESSED_PAGETABLE_H -#define BOOT_COMPRESSED_PAGETABLE_H - -#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE) - -#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE -#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0 - -#ifndef __ASSEMBLER__ - -extern unsigned long *trampoline_32bit; - -extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl); - -extern const u16 trampoline_ljmp_imm_offset; - -#endif /* __ASSEMBLER__ */ -#endif /* BOOT_COMPRESSED_PAGETABLE_H */ diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index d8c5de40669d..bdd26050dff7 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -4,19 +4,16 @@ #include <asm/bootparam_utils.h> #include <asm/e820/types.h> #include <asm/processor.h> -#include "pgtable.h" #include "../string.h" #include "efi.h" #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */ #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */ -#ifdef CONFIG_X86_5LEVEL /* __pgtable_l5_enabled needs to be in .data to avoid being cleared along with .bss */ unsigned int __section(".data") __pgtable_l5_enabled; unsigned int __section(".data") pgdir_shift = 39; unsigned int __section(".data") ptrs_per_p4d = 1; -#endif /* Buffer to preserve trampoline memory */ static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; @@ -115,18 +112,13 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable) * Check if LA57 is desired and supported. * * There are several parts to the check: - * - if the kernel supports 5-level paging: CONFIG_X86_5LEVEL=y * - if user asked to disable 5-level paging: no5lvl in cmdline * - if the machine supports 5-level paging: * + CPUID leaf 7 is supported * + the leaf has the feature bit set - * - * That's substitute for boot_cpu_has() in early boot code. */ - if (IS_ENABLED(CONFIG_X86_5LEVEL) && - !cmdline_find_option_bool("no5lvl") && - native_cpuid_eax(0) >= 7 && - (native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) { + if (!cmdline_find_option_bool("no5lvl") && + native_cpuid_eax(0) >= 7 && (native_cpuid_ecx(7) & BIT(16))) { l5_required = true; /* Initialize variables for 5-level paging */ diff --git a/arch/x86/boot/compressed/sev-handle-vc.c b/arch/x86/boot/compressed/sev-handle-vc.c new file mode 100644 index 000000000000..89dd02de2a0f --- /dev/null +++ b/arch/x86/boot/compressed/sev-handle-vc.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "misc.h" +#include "sev.h" + +#include <linux/kernel.h> +#include <linux/string.h> +#include <asm/insn.h> +#include <asm/pgtable_types.h> +#include <asm/ptrace.h> +#include <asm/sev.h> +#include <asm/trapnr.h> +#include <asm/trap_pf.h> +#include <asm/fpu/xcr.h> + +#define __BOOT_COMPRESSED + +/* Basic instruction decoding support needed */ +#include "../../lib/inat.c" +#include "../../lib/insn.c" + +/* + * Copy a version of this function here - insn-eval.c can't be used in + * pre-decompression code. + */ +bool insn_has_rep_prefix(struct insn *insn) +{ + insn_byte_t p; + int i; + + insn_get_prefixes(insn); + + for_each_insn_prefix(insn, i, p) { + if (p == 0xf2 || p == 0xf3) + return true; + } + + return false; +} + +enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + int ret; + + memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); + + ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); + if (ret < 0) + return ES_DECODE_FAILED; + + return ES_OK; +} + +extern void sev_insn_decode_init(void) __alias(inat_init_tables); + +/* + * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and + * doesn't use segments. + */ +static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) +{ + return 0UL; +} + +static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, + void *dst, char *buf, size_t size) +{ + memcpy(dst, buf, size); + + return ES_OK; +} + +static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, + void *src, char *buf, size_t size) +{ + memcpy(buf, src, size); + + return ES_OK; +} + +static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) +{ + return ES_OK; +} + +static bool fault_in_kernel_space(unsigned long address) +{ + return false; +} + +#define sev_printk(fmt, ...) + +#include "../../coco/sev/vc-shared.c" + +void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code) +{ + struct es_em_ctxt ctxt; + enum es_result result; + + if (!boot_ghcb && !early_setup_ghcb()) + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); + + vc_ghcb_invalidate(boot_ghcb); + result = vc_init_em_ctxt(&ctxt, regs, exit_code); + if (result != ES_OK) + goto finish; + + result = vc_check_opcode_bytes(&ctxt, exit_code); + if (result != ES_OK) + goto finish; + + switch (exit_code) { + case SVM_EXIT_RDTSC: + case SVM_EXIT_RDTSCP: + result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); + break; + case SVM_EXIT_IOIO: + result = vc_handle_ioio(boot_ghcb, &ctxt); + break; + case SVM_EXIT_CPUID: + result = vc_handle_cpuid(boot_ghcb, &ctxt); + break; + default: + result = ES_UNSUPPORTED; + break; + } + +finish: + if (result == ES_OK) + vc_finish_insn(&ctxt); + else if (result != ES_RETRY) + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); +} diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index 0003e4416efd..fd1b67dfea22 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -21,99 +21,14 @@ #include <asm/fpu/xcr.h> #include <asm/ptrace.h> #include <asm/svm.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include "error.h" -#include "../msr.h" +#include "sev.h" static struct ghcb boot_ghcb_page __aligned(PAGE_SIZE); struct ghcb *boot_ghcb; -/* - * Copy a version of this function here - insn-eval.c can't be used in - * pre-decompression code. - */ -static bool insn_has_rep_prefix(struct insn *insn) -{ - insn_byte_t p; - int i; - - insn_get_prefixes(insn); - - for_each_insn_prefix(insn, i, p) { - if (p == 0xf2 || p == 0xf3) - return true; - } - - return false; -} - -/* - * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and - * doesn't use segments. - */ -static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) -{ - return 0UL; -} - -static inline u64 sev_es_rd_ghcb_msr(void) -{ - struct msr m; - - boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m); - - return m.q; -} - -static inline void sev_es_wr_ghcb_msr(u64 val) -{ - struct msr m; - - m.q = val; - boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m); -} - -static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) -{ - char buffer[MAX_INSN_SIZE]; - int ret; - - memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); - - ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); - if (ret < 0) - return ES_DECODE_FAILED; - - return ES_OK; -} - -static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, - void *dst, char *buf, size_t size) -{ - memcpy(dst, buf, size); - - return ES_OK; -} - -static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, - void *src, char *buf, size_t size) -{ - memcpy(buf, src, size); - - return ES_OK; -} - -static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) -{ - return ES_OK; -} - -static bool fault_in_kernel_space(unsigned long address) -{ - return false; -} - #undef __init #define __init @@ -122,24 +37,27 @@ static bool fault_in_kernel_space(unsigned long address) #define __BOOT_COMPRESSED -/* Basic instruction decoding support needed */ -#include "../../lib/inat.c" -#include "../../lib/insn.c" +extern struct svsm_ca *boot_svsm_caa; +extern u64 boot_svsm_caa_pa; -/* Include code for early handlers */ -#include "../../coco/sev/shared.c" - -static struct svsm_ca *svsm_get_caa(void) +struct svsm_ca *svsm_get_caa(void) { return boot_svsm_caa; } -static u64 svsm_get_caa_pa(void) +u64 svsm_get_caa_pa(void) { return boot_svsm_caa_pa; } -static int svsm_perform_call_protocol(struct svsm_call *call) +int svsm_perform_call_protocol(struct svsm_call *call); + +u8 snp_vmpl; + +/* Include code for early handlers */ +#include "../../boot/startup/sev-shared.c" + +int svsm_perform_call_protocol(struct svsm_call *call) { struct ghcb *ghcb; int ret; @@ -157,7 +75,7 @@ static int svsm_perform_call_protocol(struct svsm_call *call) return ret; } -bool sev_snp_enabled(void) +static bool sev_snp_enabled(void) { return sev_status & MSR_AMD64_SEV_SNP_ENABLED; } @@ -212,7 +130,7 @@ void snp_set_page_shared(unsigned long paddr) __page_state_change(paddr, SNP_PAGE_STATE_SHARED); } -static bool early_setup_ghcb(void) +bool early_setup_ghcb(void) { if (set_page_decrypted((unsigned long)&boot_ghcb_page)) return false; @@ -223,7 +141,7 @@ static bool early_setup_ghcb(void) boot_ghcb = &boot_ghcb_page; /* Initialize lookup tables for the instruction decoder */ - inat_init_tables(); + sev_insn_decode_init(); /* SNP guest requires the GHCB GPA must be registered */ if (sev_snp_enabled()) @@ -296,46 +214,6 @@ bool sev_es_check_ghcb_fault(unsigned long address) return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page); } -void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code) -{ - struct es_em_ctxt ctxt; - enum es_result result; - - if (!boot_ghcb && !early_setup_ghcb()) - sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); - - vc_ghcb_invalidate(boot_ghcb); - result = vc_init_em_ctxt(&ctxt, regs, exit_code); - if (result != ES_OK) - goto finish; - - result = vc_check_opcode_bytes(&ctxt, exit_code); - if (result != ES_OK) - goto finish; - - switch (exit_code) { - case SVM_EXIT_RDTSC: - case SVM_EXIT_RDTSCP: - result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); - break; - case SVM_EXIT_IOIO: - result = vc_handle_ioio(boot_ghcb, &ctxt); - break; - case SVM_EXIT_CPUID: - result = vc_handle_cpuid(boot_ghcb, &ctxt); - break; - default: - result = ES_UNSUPPORTED; - break; - } - -finish: - if (result == ES_OK) - vc_finish_insn(&ctxt); - else if (result != ES_RETRY) - sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); -} - /* * SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need * guest side implementation for proper functioning of the guest. If any diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h index d3900384b8ab..92f79c21939c 100644 --- a/arch/x86/boot/compressed/sev.h +++ b/arch/x86/boot/compressed/sev.h @@ -10,14 +10,31 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT -bool sev_snp_enabled(void); +#include "../msr.h" + void snp_accept_memory(phys_addr_t start, phys_addr_t end); u64 sev_get_status(void); bool early_is_sevsnp_guest(void); +static inline u64 sev_es_rd_ghcb_msr(void) +{ + struct msr m; + + boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m); + + return m.q; +} + +static inline void sev_es_wr_ghcb_msr(u64 val) +{ + struct msr m; + + m.q = val; + boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m); +} + #else -static inline bool sev_snp_enabled(void) { return false; } static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } static inline u64 sev_get_status(void) { return 0; } static inline bool early_is_sevsnp_guest(void) { return false; } diff --git a/arch/x86/boot/compressed/string.c b/arch/x86/boot/compressed/string.c index 81fc1eaa3229..9af19d9614cb 100644 --- a/arch/x86/boot/compressed/string.c +++ b/arch/x86/boot/compressed/string.c @@ -15,9 +15,9 @@ static void *____memcpy(void *dest, const void *src, size_t n) { int d0, d1, d2; asm volatile( - "rep ; movsl\n\t" + "rep movsl\n\t" "movl %4,%%ecx\n\t" - "rep ; movsb\n\t" + "rep movsb" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src) : "memory"); @@ -29,9 +29,9 @@ static void *____memcpy(void *dest, const void *src, size_t n) { long d0, d1, d2; asm volatile( - "rep ; movsq\n\t" + "rep movsq\n\t" "movq %4,%%rcx\n\t" - "rep ; movsb\n\t" + "rep movsb" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src) : "memory"); diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S index 6afd05e819d2..3973a67cd04e 100644 --- a/arch/x86/boot/copy.S +++ b/arch/x86/boot/copy.S @@ -22,10 +22,10 @@ SYM_FUNC_START_NOALIGN(memcpy) movw %dx, %si pushw %cx shrw $2, %cx - rep; movsl + rep movsl popw %cx andw $3, %cx - rep; movsb + rep movsb popw %di popw %si retl @@ -38,10 +38,10 @@ SYM_FUNC_START_NOALIGN(memset) imull $0x01010101,%eax pushw %cx shrw $2, %cx - rep; stosl + rep stosl popw %cx andw $3, %cx - rep; stosb + rep stosb popw %di retl SYM_FUNC_END(memset) diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index b5c79f43359b..e30649e44d8f 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -361,12 +361,8 @@ xloadflags: #endif #ifdef CONFIG_X86_64 -#ifdef CONFIG_X86_5LEVEL #define XLF56 (XLF_5LEVEL|XLF_5LEVEL_ENABLED) #else -#define XLF56 XLF_5LEVEL -#endif -#else #define XLF56 0 #endif @@ -585,7 +581,7 @@ start_of_setup: xorl %eax, %eax subw %di, %cx shrw $2, %cx - rep; stosl + rep stosl # Jump to C code (should not return) calll main diff --git a/arch/x86/boot/startup/Makefile b/arch/x86/boot/startup/Makefile new file mode 100644 index 000000000000..b514f7e81332 --- /dev/null +++ b/arch/x86/boot/startup/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_AFLAGS += -D__DISABLE_EXPORTS +KBUILD_CFLAGS += -D__DISABLE_EXPORTS -mcmodel=small -fPIC \ + -Os -DDISABLE_BRANCH_PROFILING \ + $(DISABLE_STACKLEAK_PLUGIN) \ + -fno-stack-protector -D__NO_FORTIFY \ + -fno-jump-tables \ + -include $(srctree)/include/linux/hidden.h + +# disable ftrace hooks and LTO +KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS)) +KASAN_SANITIZE := n +KCSAN_SANITIZE := n +KMSAN_SANITIZE := n +UBSAN_SANITIZE := n +KCOV_INSTRUMENT := n + +obj-$(CONFIG_X86_64) += gdt_idt.o map_kernel.o +obj-$(CONFIG_AMD_MEM_ENCRYPT) += sme.o sev-startup.o + +lib-$(CONFIG_X86_64) += la57toggle.o +lib-$(CONFIG_EFI_MIXED) += efi-mixed.o + +# +# Disable objtool validation for all library code, which is intended +# to be linked into the decompressor or the EFI stub but not vmlinux +# +$(patsubst %.o,$(obj)/%.o,$(lib-y)): OBJECT_FILES_NON_STANDARD := y diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/arch/x86/boot/startup/efi-mixed.S index e04ed99bc449..e04ed99bc449 100644 --- a/drivers/firmware/efi/libstub/x86-mixed.S +++ b/arch/x86/boot/startup/efi-mixed.S diff --git a/arch/x86/boot/startup/gdt_idt.c b/arch/x86/boot/startup/gdt_idt.c new file mode 100644 index 000000000000..a3112a69b06a --- /dev/null +++ b/arch/x86/boot/startup/gdt_idt.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/linkage.h> +#include <linux/types.h> + +#include <asm/desc.h> +#include <asm/init.h> +#include <asm/setup.h> +#include <asm/sev.h> +#include <asm/trapnr.h> + +/* + * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is + * used until the idt_table takes over. On the boot CPU this happens in + * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases + * this happens in the functions called from head_64.S. + * + * The idt_table can't be used that early because all the code modifying it is + * in idt.c and can be instrumented by tracing or KASAN, which both don't work + * during early CPU bringup. Also the idt_table has the runtime vectors + * configured which require certain CPU state to be setup already (like TSS), + * which also hasn't happened yet in early CPU bringup. + */ +static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; + +/* This may run while still in the direct mapping */ +void __head startup_64_load_idt(void *vc_handler) +{ + struct desc_ptr desc = { + .address = (unsigned long)rip_rel_ptr(bringup_idt_table), + .size = sizeof(bringup_idt_table) - 1, + }; + struct idt_data data; + gate_desc idt_desc; + + /* @vc_handler is set only for a VMM Communication Exception */ + if (vc_handler) { + init_idt_data(&data, X86_TRAP_VC, vc_handler); + idt_init_desc(&idt_desc, &data); + native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc); + } + + native_load_idt(&desc); +} + +/* + * Setup boot CPU state needed before kernel switches to virtual addresses. + */ +void __head startup_64_setup_gdt_idt(void) +{ + struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page); + void *handler = NULL; + + struct desc_ptr startup_gdt_descr = { + .address = (unsigned long)gp->gdt, + .size = GDT_SIZE - 1, + }; + + /* Load GDT */ + native_load_gdt(&startup_gdt_descr); + + /* New GDT is live - reload data segment registers */ + asm volatile("movl %%eax, %%ds\n" + "movl %%eax, %%ss\n" + "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); + + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) + handler = rip_rel_ptr(vc_no_ghcb); + + startup_64_load_idt(handler); +} diff --git a/arch/x86/boot/compressed/la57toggle.S b/arch/x86/boot/startup/la57toggle.S index 9ee002387eb1..370075b4d95b 100644 --- a/arch/x86/boot/compressed/la57toggle.S +++ b/arch/x86/boot/startup/la57toggle.S @@ -5,7 +5,6 @@ #include <asm/boot.h> #include <asm/msr.h> #include <asm/processor-flags.h> -#include "pgtable.h" /* * This is the 32-bit trampoline that will be copied over to low memory. It diff --git a/arch/x86/boot/startup/map_kernel.c b/arch/x86/boot/startup/map_kernel.c new file mode 100644 index 000000000000..332dbe6688c4 --- /dev/null +++ b/arch/x86/boot/startup/map_kernel.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/init.h> +#include <linux/linkage.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/pgtable.h> + +#include <asm/init.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <asm/sev.h> + +extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; +extern unsigned int next_early_pgt; + +static inline bool check_la57_support(void) +{ + /* + * 5-level paging is detected and enabled at kernel decompression + * stage. Only check if it has been enabled there. + */ + if (!(native_read_cr4() & X86_CR4_LA57)) + return false; + + __pgtable_l5_enabled = 1; + pgdir_shift = 48; + ptrs_per_p4d = 512; + + return true; +} + +static unsigned long __head sme_postprocess_startup(struct boot_params *bp, + pmdval_t *pmd, + unsigned long p2v_offset) +{ + unsigned long paddr, paddr_end; + int i; + + /* Encrypt the kernel and related (if SME is active) */ + sme_encrypt_kernel(bp); + + /* + * Clear the memory encryption mask from the .bss..decrypted section. + * The bss section will be memset to zero later in the initialization so + * there is no need to zero it after changing the memory encryption + * attribute. + */ + if (sme_get_me_mask()) { + paddr = (unsigned long)rip_rel_ptr(__start_bss_decrypted); + paddr_end = (unsigned long)rip_rel_ptr(__end_bss_decrypted); + + for (; paddr < paddr_end; paddr += PMD_SIZE) { + /* + * On SNP, transition the page to shared in the RMP table so that + * it is consistent with the page table attribute change. + * + * __start_bss_decrypted has a virtual address in the high range + * mapping (kernel .text). PVALIDATE, by way of + * early_snp_set_memory_shared(), requires a valid virtual + * address but the kernel is currently running off of the identity + * mapping so use the PA to get a *currently* valid virtual address. + */ + early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD); + + i = pmd_index(paddr - p2v_offset); + pmd[i] -= sme_get_me_mask(); + } + } + + /* + * Return the SME encryption mask (if SME is active) to be used as a + * modifier for the initial pgdir entry programmed into CR3. + */ + return sme_get_me_mask(); +} + +/* + * This code is compiled using PIC codegen because it will execute from the + * early 1:1 mapping of memory, which deviates from the mapping expected by the + * linker. Due to this deviation, taking the address of a global variable will + * produce an ambiguous result when using the plain & operator. Instead, + * rip_rel_ptr() must be used, which will return the RIP-relative address in + * the 1:1 mapping of memory. Kernel virtual addresses can be determined by + * subtracting p2v_offset from the RIP-relative address. + */ +unsigned long __head __startup_64(unsigned long p2v_offset, + struct boot_params *bp) +{ + pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts); + unsigned long physaddr = (unsigned long)rip_rel_ptr(_text); + unsigned long va_text, va_end; + unsigned long pgtable_flags; + unsigned long load_delta; + pgdval_t *pgd; + p4dval_t *p4d; + pudval_t *pud; + pmdval_t *pmd, pmd_entry; + bool la57; + int i; + + la57 = check_la57_support(); + + /* Is the address too large? */ + if (physaddr >> MAX_PHYSMEM_BITS) + for (;;); + + /* + * Compute the delta between the address I am compiled to run at + * and the address I am actually running at. + */ + phys_base = load_delta = __START_KERNEL_map + p2v_offset; + + /* Is the address not 2M aligned? */ + if (load_delta & ~PMD_MASK) + for (;;); + + va_text = physaddr - p2v_offset; + va_end = (unsigned long)rip_rel_ptr(_end) - p2v_offset; + + /* Include the SME encryption mask in the fixup value */ + load_delta += sme_get_me_mask(); + + /* Fixup the physical addresses in the page table */ + + pgd = rip_rel_ptr(early_top_pgt); + pgd[pgd_index(__START_KERNEL_map)] += load_delta; + + if (la57) { + p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt); + p4d[MAX_PTRS_PER_P4D - 1] += load_delta; + + pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; + } + + level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta; + level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta; + + for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) + level2_fixmap_pgt[i].pmd += load_delta; + + /* + * Set up the identity mapping for the switchover. These + * entries should *NOT* have the global bit set! This also + * creates a bunch of nonsense entries but that is fine -- + * it avoids problems around wraparound. + */ + + pud = &early_pgts[0]->pmd; + pmd = &early_pgts[1]->pmd; + next_early_pgt = 2; + + pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); + + if (la57) { + p4d = &early_pgts[next_early_pgt++]->pmd; + + i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; + pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; + pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; + + i = physaddr >> P4D_SHIFT; + p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; + p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; + } else { + i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; + pgd[i + 0] = (pgdval_t)pud + pgtable_flags; + pgd[i + 1] = (pgdval_t)pud + pgtable_flags; + } + + i = physaddr >> PUD_SHIFT; + pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; + pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; + + pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; + pmd_entry += sme_get_me_mask(); + pmd_entry += physaddr; + + for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) { + int idx = i + (physaddr >> PMD_SHIFT); + + pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; + } + + /* + * Fixup the kernel text+data virtual addresses. Note that + * we might write invalid pmds, when the kernel is relocated + * cleanup_highmap() fixes this up along with the mappings + * beyond _end. + * + * Only the region occupied by the kernel image has so far + * been checked against the table of usable memory regions + * provided by the firmware, so invalidate pages outside that + * region. A page table entry that maps to a reserved area of + * memory would allow processor speculation into that area, + * and on some hardware (particularly the UV platform) even + * speculative access to some reserved areas is caught as an + * error, causing the BIOS to halt the system. + */ + + pmd = rip_rel_ptr(level2_kernel_pgt); + + /* invalidate pages before the kernel image */ + for (i = 0; i < pmd_index(va_text); i++) + pmd[i] &= ~_PAGE_PRESENT; + + /* fixup pages that are part of the kernel image */ + for (; i <= pmd_index(va_end); i++) + if (pmd[i] & _PAGE_PRESENT) + pmd[i] += load_delta; + + /* invalidate pages after the kernel image */ + for (; i < PTRS_PER_PMD; i++) + pmd[i] &= ~_PAGE_PRESENT; + + return sme_postprocess_startup(bp, pmd, p2v_offset); +} diff --git a/arch/x86/coco/sev/shared.c b/arch/x86/boot/startup/sev-shared.c index 2e4122f8aa6b..7a706db87b93 100644 --- a/arch/x86/coco/sev/shared.c +++ b/arch/x86/boot/startup/sev-shared.c @@ -14,76 +14,23 @@ #ifndef __BOOT_COMPRESSED #define error(v) pr_err(v) #define has_cpuflag(f) boot_cpu_has(f) -#define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__) -#define sev_printk_rtl(fmt, ...) printk_ratelimited(fmt, ##__VA_ARGS__) #else #undef WARN #define WARN(condition, format...) (!!(condition)) -#define sev_printk(fmt, ...) -#define sev_printk_rtl(fmt, ...) #undef vc_forward_exception #define vc_forward_exception(c) panic("SNP: Hypervisor requested exception\n") #endif /* * SVSM related information: - * When running under an SVSM, the VMPL that Linux is executing at must be - * non-zero. The VMPL is therefore used to indicate the presence of an SVSM. - * * During boot, the page tables are set up as identity mapped and later * changed to use kernel virtual addresses. Maintain separate virtual and * physical addresses for the CAA to allow SVSM functions to be used during * early boot, both with identity mapped virtual addresses and proper kernel * virtual addresses. */ -u8 snp_vmpl __ro_after_init; -EXPORT_SYMBOL_GPL(snp_vmpl); -static struct svsm_ca *boot_svsm_caa __ro_after_init; -static u64 boot_svsm_caa_pa __ro_after_init; - -static struct svsm_ca *svsm_get_caa(void); -static u64 svsm_get_caa_pa(void); -static int svsm_perform_call_protocol(struct svsm_call *call); - -/* I/O parameters for CPUID-related helpers */ -struct cpuid_leaf { - u32 fn; - u32 subfn; - u32 eax; - u32 ebx; - u32 ecx; - u32 edx; -}; - -/* - * Individual entries of the SNP CPUID table, as defined by the SNP - * Firmware ABI, Revision 0.9, Section 7.1, Table 14. - */ -struct snp_cpuid_fn { - u32 eax_in; - u32 ecx_in; - u64 xcr0_in; - u64 xss_in; - u32 eax; - u32 ebx; - u32 ecx; - u32 edx; - u64 __reserved; -} __packed; - -/* - * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9, - * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit - * of 64 entries per CPUID table. - */ -#define SNP_CPUID_COUNT_MAX 64 - -struct snp_cpuid_table { - u32 count; - u32 __reserved1; - u64 __reserved2; - struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX]; -} __packed; +struct svsm_ca *boot_svsm_caa __ro_after_init; +u64 boot_svsm_caa_pa __ro_after_init; /* * Since feature negotiation related variables are set early in the boot @@ -107,7 +54,7 @@ static u32 cpuid_std_range_max __ro_after_init; static u32 cpuid_hyp_range_max __ro_after_init; static u32 cpuid_ext_range_max __ro_after_init; -static bool __init sev_es_check_cpu_features(void) +bool __init sev_es_check_cpu_features(void) { if (!has_cpuflag(X86_FEATURE_RDRAND)) { error("RDRAND instruction not supported - no trusted source of randomness available\n"); @@ -117,7 +64,7 @@ static bool __init sev_es_check_cpu_features(void) return true; } -static void __head __noreturn +void __head __noreturn sev_es_terminate(unsigned int set, unsigned int reason) { u64 val = GHCB_MSR_TERM_REQ; @@ -136,7 +83,7 @@ sev_es_terminate(unsigned int set, unsigned int reason) /* * The hypervisor features are available from GHCB version 2 onward. */ -static u64 get_hv_features(void) +u64 get_hv_features(void) { u64 val; @@ -153,7 +100,7 @@ static u64 get_hv_features(void) return GHCB_MSR_HV_FT_RESP_VAL(val); } -static void snp_register_ghcb_early(unsigned long paddr) +void snp_register_ghcb_early(unsigned long paddr) { unsigned long pfn = paddr >> PAGE_SHIFT; u64 val; @@ -169,7 +116,7 @@ static void snp_register_ghcb_early(unsigned long paddr) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER); } -static bool sev_es_negotiate_protocol(void) +bool sev_es_negotiate_protocol(void) { u64 val; @@ -190,39 +137,6 @@ static bool sev_es_negotiate_protocol(void) return true; } -static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb) -{ - ghcb->save.sw_exit_code = 0; - __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); -} - -static bool vc_decoding_needed(unsigned long exit_code) -{ - /* Exceptions don't require to decode the instruction */ - return !(exit_code >= SVM_EXIT_EXCP_BASE && - exit_code <= SVM_EXIT_LAST_EXCP); -} - -static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, - struct pt_regs *regs, - unsigned long exit_code) -{ - enum es_result ret = ES_OK; - - memset(ctxt, 0, sizeof(*ctxt)); - ctxt->regs = regs; - - if (vc_decoding_needed(exit_code)) - ret = vc_decode_insn(ctxt); - - return ret; -} - -static void vc_finish_insn(struct es_em_ctxt *ctxt) -{ - ctxt->regs->ip += ctxt->insn.length; -} - static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { u32 ret; @@ -344,7 +258,7 @@ static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) * Fill in protocol and format specifiers. This can be called very early * in the boot, so use rip-relative references as needed. */ - ghcb->protocol_version = RIP_REL_REF(ghcb_version); + ghcb->protocol_version = ghcb_version; ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL); @@ -371,10 +285,10 @@ static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) return svsm_process_result_codes(call); } -static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, - struct es_em_ctxt *ctxt, - u64 exit_code, u64 exit_info_1, - u64 exit_info_2) +enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + u64 exit_code, u64 exit_info_1, + u64 exit_info_2) { /* Fill in protocol and format specifiers */ ghcb->protocol_version = ghcb_version; @@ -473,9 +387,9 @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid * while running with the initial identity mapping as well as the * switch-over to kernel virtual addresses later. */ -static const struct snp_cpuid_table *snp_cpuid_get_table(void) +const struct snp_cpuid_table *snp_cpuid_get_table(void) { - return &RIP_REL_REF(cpuid_table_copy); + return rip_rel_ptr(&cpuid_table_copy); } /* @@ -672,7 +586,7 @@ snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt, * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value * should be treated as fatal by caller. */ -static int __head +int __head snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) { const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); @@ -701,9 +615,9 @@ snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; /* Skip post-processing for out-of-range zero leafs. */ - if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) || - (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) || - (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max)))) + if (!(leaf->fn <= cpuid_std_range_max || + (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) || + (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max))) return 0; } @@ -782,391 +696,6 @@ fail: sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); } -static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt, - unsigned long address, - bool write) -{ - if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) { - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.error_code = X86_PF_USER; - ctxt->fi.cr2 = address; - if (write) - ctxt->fi.error_code |= X86_PF_WRITE; - - return ES_EXCEPTION; - } - - return ES_OK; -} - -static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, - void *src, char *buf, - unsigned int data_size, - unsigned int count, - bool backwards) -{ - int i, b = backwards ? -1 : 1; - unsigned long address = (unsigned long)src; - enum es_result ret; - - ret = vc_insn_string_check(ctxt, address, false); - if (ret != ES_OK) - return ret; - - for (i = 0; i < count; i++) { - void *s = src + (i * data_size * b); - char *d = buf + (i * data_size); - - ret = vc_read_mem(ctxt, s, d, data_size); - if (ret != ES_OK) - break; - } - - return ret; -} - -static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, - void *dst, char *buf, - unsigned int data_size, - unsigned int count, - bool backwards) -{ - int i, s = backwards ? -1 : 1; - unsigned long address = (unsigned long)dst; - enum es_result ret; - - ret = vc_insn_string_check(ctxt, address, true); - if (ret != ES_OK) - return ret; - - for (i = 0; i < count; i++) { - void *d = dst + (i * data_size * s); - char *b = buf + (i * data_size); - - ret = vc_write_mem(ctxt, d, b, data_size); - if (ret != ES_OK) - break; - } - - return ret; -} - -#define IOIO_TYPE_STR BIT(2) -#define IOIO_TYPE_IN 1 -#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR) -#define IOIO_TYPE_OUT 0 -#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR) - -#define IOIO_REP BIT(3) - -#define IOIO_ADDR_64 BIT(9) -#define IOIO_ADDR_32 BIT(8) -#define IOIO_ADDR_16 BIT(7) - -#define IOIO_DATA_32 BIT(6) -#define IOIO_DATA_16 BIT(5) -#define IOIO_DATA_8 BIT(4) - -#define IOIO_SEG_ES (0 << 10) -#define IOIO_SEG_DS (3 << 10) - -static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) -{ - struct insn *insn = &ctxt->insn; - size_t size; - u64 port; - - *exitinfo = 0; - - switch (insn->opcode.bytes[0]) { - /* INS opcodes */ - case 0x6c: - case 0x6d: - *exitinfo |= IOIO_TYPE_INS; - *exitinfo |= IOIO_SEG_ES; - port = ctxt->regs->dx & 0xffff; - break; - - /* OUTS opcodes */ - case 0x6e: - case 0x6f: - *exitinfo |= IOIO_TYPE_OUTS; - *exitinfo |= IOIO_SEG_DS; - port = ctxt->regs->dx & 0xffff; - break; - - /* IN immediate opcodes */ - case 0xe4: - case 0xe5: - *exitinfo |= IOIO_TYPE_IN; - port = (u8)insn->immediate.value & 0xffff; - break; - - /* OUT immediate opcodes */ - case 0xe6: - case 0xe7: - *exitinfo |= IOIO_TYPE_OUT; - port = (u8)insn->immediate.value & 0xffff; - break; - - /* IN register opcodes */ - case 0xec: - case 0xed: - *exitinfo |= IOIO_TYPE_IN; - port = ctxt->regs->dx & 0xffff; - break; - - /* OUT register opcodes */ - case 0xee: - case 0xef: - *exitinfo |= IOIO_TYPE_OUT; - port = ctxt->regs->dx & 0xffff; - break; - - default: - return ES_DECODE_FAILED; - } - - *exitinfo |= port << 16; - - switch (insn->opcode.bytes[0]) { - case 0x6c: - case 0x6e: - case 0xe4: - case 0xe6: - case 0xec: - case 0xee: - /* Single byte opcodes */ - *exitinfo |= IOIO_DATA_8; - size = 1; - break; - default: - /* Length determined by instruction parsing */ - *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 - : IOIO_DATA_32; - size = (insn->opnd_bytes == 2) ? 2 : 4; - } - - switch (insn->addr_bytes) { - case 2: - *exitinfo |= IOIO_ADDR_16; - break; - case 4: - *exitinfo |= IOIO_ADDR_32; - break; - case 8: - *exitinfo |= IOIO_ADDR_64; - break; - } - - if (insn_has_rep_prefix(insn)) - *exitinfo |= IOIO_REP; - - return vc_ioio_check(ctxt, (u16)port, size); -} - -static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) -{ - struct pt_regs *regs = ctxt->regs; - u64 exit_info_1, exit_info_2; - enum es_result ret; - - ret = vc_ioio_exitinfo(ctxt, &exit_info_1); - if (ret != ES_OK) - return ret; - - if (exit_info_1 & IOIO_TYPE_STR) { - - /* (REP) INS/OUTS */ - - bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); - unsigned int io_bytes, exit_bytes; - unsigned int ghcb_count, op_count; - unsigned long es_base; - u64 sw_scratch; - - /* - * For the string variants with rep prefix the amount of in/out - * operations per #VC exception is limited so that the kernel - * has a chance to take interrupts and re-schedule while the - * instruction is emulated. - */ - io_bytes = (exit_info_1 >> 4) & 0x7; - ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; - - op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; - exit_info_2 = min(op_count, ghcb_count); - exit_bytes = exit_info_2 * io_bytes; - - es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); - - /* Read bytes of OUTS into the shared buffer */ - if (!(exit_info_1 & IOIO_TYPE_IN)) { - ret = vc_insn_string_read(ctxt, - (void *)(es_base + regs->si), - ghcb->shared_buffer, io_bytes, - exit_info_2, df); - if (ret) - return ret; - } - - /* - * Issue an VMGEXIT to the HV to consume the bytes from the - * shared buffer or to have it write them into the shared buffer - * depending on the instruction: OUTS or INS. - */ - sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); - ghcb_set_sw_scratch(ghcb, sw_scratch); - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, - exit_info_1, exit_info_2); - if (ret != ES_OK) - return ret; - - /* Read bytes from shared buffer into the guest's destination. */ - if (exit_info_1 & IOIO_TYPE_IN) { - ret = vc_insn_string_write(ctxt, - (void *)(es_base + regs->di), - ghcb->shared_buffer, io_bytes, - exit_info_2, df); - if (ret) - return ret; - - if (df) - regs->di -= exit_bytes; - else - regs->di += exit_bytes; - } else { - if (df) - regs->si -= exit_bytes; - else - regs->si += exit_bytes; - } - - if (exit_info_1 & IOIO_REP) - regs->cx -= exit_info_2; - - ret = regs->cx ? ES_RETRY : ES_OK; - - } else { - - /* IN/OUT into/from rAX */ - - int bits = (exit_info_1 & 0x70) >> 1; - u64 rax = 0; - - if (!(exit_info_1 & IOIO_TYPE_IN)) - rax = lower_bits(regs->ax, bits); - - ghcb_set_rax(ghcb, rax); - - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); - if (ret != ES_OK) - return ret; - - if (exit_info_1 & IOIO_TYPE_IN) { - if (!ghcb_rax_is_valid(ghcb)) - return ES_VMM_ERROR; - regs->ax = lower_bits(ghcb->save.rax, bits); - } - } - - return ret; -} - -static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt) -{ - struct pt_regs *regs = ctxt->regs; - struct cpuid_leaf leaf; - int ret; - - leaf.fn = regs->ax; - leaf.subfn = regs->cx; - ret = snp_cpuid(ghcb, ctxt, &leaf); - if (!ret) { - regs->ax = leaf.eax; - regs->bx = leaf.ebx; - regs->cx = leaf.ecx; - regs->dx = leaf.edx; - } - - return ret; -} - -static enum es_result vc_handle_cpuid(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - struct pt_regs *regs = ctxt->regs; - u32 cr4 = native_read_cr4(); - enum es_result ret; - int snp_cpuid_ret; - - snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt); - if (!snp_cpuid_ret) - return ES_OK; - if (snp_cpuid_ret != -EOPNOTSUPP) - return ES_VMM_ERROR; - - ghcb_set_rax(ghcb, regs->ax); - ghcb_set_rcx(ghcb, regs->cx); - - if (cr4 & X86_CR4_OSXSAVE) - /* Safe to read xcr0 */ - ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); - else - /* xgetbv will cause #GP - use reset value for xcr0 */ - ghcb_set_xcr0(ghcb, 1); - - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); - if (ret != ES_OK) - return ret; - - if (!(ghcb_rax_is_valid(ghcb) && - ghcb_rbx_is_valid(ghcb) && - ghcb_rcx_is_valid(ghcb) && - ghcb_rdx_is_valid(ghcb))) - return ES_VMM_ERROR; - - regs->ax = ghcb->save.rax; - regs->bx = ghcb->save.rbx; - regs->cx = ghcb->save.rcx; - regs->dx = ghcb->save.rdx; - - return ES_OK; -} - -static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, - struct es_em_ctxt *ctxt, - unsigned long exit_code) -{ - bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); - enum es_result ret; - - /* - * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure - * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP - * instructions are being intercepted. If this should occur and Secure - * TSC is enabled, guest execution should be terminated as the guest - * cannot rely on the TSC value provided by the hypervisor. - */ - if (sev_status & MSR_AMD64_SNP_SECURE_TSC) - return ES_VMM_ERROR; - - ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); - if (ret != ES_OK) - return ret; - - if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && - (!rdtscp || ghcb_rcx_is_valid(ghcb)))) - return ES_VMM_ERROR; - - ctxt->regs->ax = ghcb->save.rax; - ctxt->regs->dx = ghcb->save.rdx; - if (rdtscp) - ctxt->regs->cx = ghcb->save.rcx; - - return ES_OK; -} - struct cc_setup_data { struct setup_data header; u32 cc_blob_address; @@ -1224,36 +753,14 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info) const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; if (fn->eax_in == 0x0) - RIP_REL_REF(cpuid_std_range_max) = fn->eax; + cpuid_std_range_max = fn->eax; else if (fn->eax_in == 0x40000000) - RIP_REL_REF(cpuid_hyp_range_max) = fn->eax; + cpuid_hyp_range_max = fn->eax; else if (fn->eax_in == 0x80000000) - RIP_REL_REF(cpuid_ext_range_max) = fn->eax; + cpuid_ext_range_max = fn->eax; } } -static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size, - int ret, u64 svsm_ret) -{ - WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n", - pfn, action, page_size, ret, svsm_ret); - - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); -} - -static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret) -{ - unsigned int page_size; - bool action; - u64 pfn; - - pfn = pc->entry[pc->cur_index].pfn; - action = pc->entry[pc->cur_index].action; - page_size = pc->entry[pc->cur_index].page_size; - - __pval_terminate(pfn, action, page_size, ret, svsm_ret); -} - static void __head svsm_pval_4k_page(unsigned long paddr, bool validate) { struct svsm_pvalidate_call *pc; @@ -1296,11 +803,7 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, { int ret; - /* - * This can be called very early during boot, so use rIP-relative - * references as needed. - */ - if (RIP_REL_REF(snp_vmpl)) { + if (snp_vmpl) { svsm_pval_4k_page(paddr, validate); } else { ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate); @@ -1309,351 +812,6 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, } } -static void pval_pages(struct snp_psc_desc *desc) -{ - struct psc_entry *e; - unsigned long vaddr; - unsigned int size; - unsigned int i; - bool validate; - u64 pfn; - int rc; - - for (i = 0; i <= desc->hdr.end_entry; i++) { - e = &desc->entries[i]; - - pfn = e->gfn; - vaddr = (unsigned long)pfn_to_kaddr(pfn); - size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; - validate = e->operation == SNP_PAGE_STATE_PRIVATE; - - rc = pvalidate(vaddr, size, validate); - if (!rc) - continue; - - if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) { - unsigned long vaddr_end = vaddr + PMD_SIZE; - - for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) { - rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate); - if (rc) - __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0); - } - } else { - __pval_terminate(pfn, validate, size, rc, 0); - } - } -} - -static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, - struct svsm_pvalidate_call *pc) -{ - struct svsm_pvalidate_entry *pe; - - /* Nothing in the CA yet */ - pc->num_entries = 0; - pc->cur_index = 0; - - pe = &pc->entry[0]; - - while (pfn < pfn_end) { - pe->page_size = RMP_PG_SIZE_4K; - pe->action = action; - pe->ignore_cf = 0; - pe->pfn = pfn; - - pe++; - pfn++; - - pc->num_entries++; - if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) - break; - } - - return pfn; -} - -static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry, - struct svsm_pvalidate_call *pc) -{ - struct svsm_pvalidate_entry *pe; - struct psc_entry *e; - - /* Nothing in the CA yet */ - pc->num_entries = 0; - pc->cur_index = 0; - - pe = &pc->entry[0]; - e = &desc->entries[desc_entry]; - - while (desc_entry <= desc->hdr.end_entry) { - pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; - pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; - pe->ignore_cf = 0; - pe->pfn = e->gfn; - - pe++; - e++; - - desc_entry++; - pc->num_entries++; - if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) - break; - } - - return desc_entry; -} - -static void svsm_pval_pages(struct snp_psc_desc *desc) -{ - struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY]; - unsigned int i, pv_4k_count = 0; - struct svsm_pvalidate_call *pc; - struct svsm_call call = {}; - unsigned long flags; - bool action; - u64 pc_pa; - int ret; - - /* - * This can be called very early in the boot, use native functions in - * order to avoid paravirt issues. - */ - flags = native_local_irq_save(); - - /* - * The SVSM calling area (CA) can support processing 510 entries at a - * time. Loop through the Page State Change descriptor until the CA is - * full or the last entry in the descriptor is reached, at which time - * the SVSM is invoked. This repeats until all entries in the descriptor - * are processed. - */ - call.caa = svsm_get_caa(); - - pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; - pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); - - /* Protocol 0, Call ID 1 */ - call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); - call.rcx = pc_pa; - - for (i = 0; i <= desc->hdr.end_entry;) { - i = svsm_build_ca_from_psc_desc(desc, i, pc); - - do { - ret = svsm_perform_call_protocol(&call); - if (!ret) - continue; - - /* - * Check if the entry failed because of an RMP mismatch (a - * PVALIDATE at 2M was requested, but the page is mapped in - * the RMP as 4K). - */ - - if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH && - pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { - /* Save this entry for post-processing at 4K */ - pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; - - /* Skip to the next one unless at the end of the list */ - pc->cur_index++; - if (pc->cur_index < pc->num_entries) - ret = -EAGAIN; - else - ret = 0; - } - } while (ret == -EAGAIN); - - if (ret) - svsm_pval_terminate(pc, ret, call.rax_out); - } - - /* Process any entries that failed to be validated at 2M and validate them at 4K */ - for (i = 0; i < pv_4k_count; i++) { - u64 pfn, pfn_end; - - action = pv_4k[i].action; - pfn = pv_4k[i].pfn; - pfn_end = pfn + 512; - - while (pfn < pfn_end) { - pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc); - - ret = svsm_perform_call_protocol(&call); - if (ret) - svsm_pval_terminate(pc, ret, call.rax_out); - } - } - - native_local_irq_restore(flags); -} - -static void pvalidate_pages(struct snp_psc_desc *desc) -{ - if (snp_vmpl) - svsm_pval_pages(desc); - else - pval_pages(desc); -} - -static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc) -{ - int cur_entry, end_entry, ret = 0; - struct snp_psc_desc *data; - struct es_em_ctxt ctxt; - - vc_ghcb_invalidate(ghcb); - - /* Copy the input desc into GHCB shared buffer */ - data = (struct snp_psc_desc *)ghcb->shared_buffer; - memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc))); - - /* - * As per the GHCB specification, the hypervisor can resume the guest - * before processing all the entries. Check whether all the entries - * are processed. If not, then keep retrying. Note, the hypervisor - * will update the data memory directly to indicate the status, so - * reference the data->hdr everywhere. - * - * The strategy here is to wait for the hypervisor to change the page - * state in the RMP table before guest accesses the memory pages. If the - * page state change was not successful, then later memory access will - * result in a crash. - */ - cur_entry = data->hdr.cur_entry; - end_entry = data->hdr.end_entry; - - while (data->hdr.cur_entry <= data->hdr.end_entry) { - ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); - - /* This will advance the shared buffer data points to. */ - ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0); - - /* - * Page State Change VMGEXIT can pass error code through - * exit_info_2. - */ - if (WARN(ret || ghcb->save.sw_exit_info_2, - "SNP: PSC failed ret=%d exit_info_2=%llx\n", - ret, ghcb->save.sw_exit_info_2)) { - ret = 1; - goto out; - } - - /* Verify that reserved bit is not set */ - if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) { - ret = 1; - goto out; - } - - /* - * Sanity check that entry processing is not going backwards. - * This will happen only if hypervisor is tricking us. - */ - if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry, -"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n", - end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) { - ret = 1; - goto out; - } - } - -out: - return ret; -} - -static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt, - unsigned long exit_code) -{ - unsigned int opcode = (unsigned int)ctxt->insn.opcode.value; - u8 modrm = ctxt->insn.modrm.value; - - switch (exit_code) { - - case SVM_EXIT_IOIO: - case SVM_EXIT_NPF: - /* handled separately */ - return ES_OK; - - case SVM_EXIT_CPUID: - if (opcode == 0xa20f) - return ES_OK; - break; - - case SVM_EXIT_INVD: - if (opcode == 0x080f) - return ES_OK; - break; - - case SVM_EXIT_MONITOR: - /* MONITOR and MONITORX instructions generate the same error code */ - if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa)) - return ES_OK; - break; - - case SVM_EXIT_MWAIT: - /* MWAIT and MWAITX instructions generate the same error code */ - if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb)) - return ES_OK; - break; - - case SVM_EXIT_MSR: - /* RDMSR */ - if (opcode == 0x320f || - /* WRMSR */ - opcode == 0x300f) - return ES_OK; - break; - - case SVM_EXIT_RDPMC: - if (opcode == 0x330f) - return ES_OK; - break; - - case SVM_EXIT_RDTSC: - if (opcode == 0x310f) - return ES_OK; - break; - - case SVM_EXIT_RDTSCP: - if (opcode == 0x010f && modrm == 0xf9) - return ES_OK; - break; - - case SVM_EXIT_READ_DR7: - if (opcode == 0x210f && - X86_MODRM_REG(ctxt->insn.modrm.value) == 7) - return ES_OK; - break; - - case SVM_EXIT_VMMCALL: - if (opcode == 0x010f && modrm == 0xd9) - return ES_OK; - - break; - - case SVM_EXIT_WRITE_DR7: - if (opcode == 0x230f && - X86_MODRM_REG(ctxt->insn.modrm.value) == 7) - return ES_OK; - break; - - case SVM_EXIT_WBINVD: - if (opcode == 0x90f) - return ES_OK; - break; - - default: - break; - } - - sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n", - opcode, exit_code, ctxt->regs->ip); - - return ES_UNSUPPORTED; -} - /* * Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM * services needed when not running in VMPL0. @@ -1681,7 +839,7 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info) * routine is running identity mapped when called, both by the decompressor * code and the early kernel code. */ - if (!rmpadjust((unsigned long)&RIP_REL_REF(boot_ghcb_page), RMP_PG_SIZE_4K, 1)) + if (!rmpadjust((unsigned long)rip_rel_ptr(&boot_ghcb_page), RMP_PG_SIZE_4K, 1)) return false; /* @@ -1698,7 +856,7 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info) if (!secrets_page->svsm_guest_vmpl) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_VMPL0); - RIP_REL_REF(snp_vmpl) = secrets_page->svsm_guest_vmpl; + snp_vmpl = secrets_page->svsm_guest_vmpl; caa = secrets_page->svsm_caa; @@ -1713,8 +871,8 @@ static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info) * The CA is identity mapped when this routine is called, both by the * decompressor code and the early kernel code. */ - RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)caa; - RIP_REL_REF(boot_svsm_caa_pa) = caa; + boot_svsm_caa = (struct svsm_ca *)caa; + boot_svsm_caa_pa = caa; /* Advertise the SVSM presence via CPUID. */ cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table(); diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c new file mode 100644 index 000000000000..0b7e3b950183 --- /dev/null +++ b/arch/x86/boot/startup/sev-startup.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#define pr_fmt(fmt) "SEV: " fmt + +#include <linux/percpu-defs.h> +#include <linux/cc_platform.h> +#include <linux/printk.h> +#include <linux/mm_types.h> +#include <linux/set_memory.h> +#include <linux/memblock.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/cpumask.h> +#include <linux/efi.h> +#include <linux/io.h> +#include <linux/psp-sev.h> +#include <uapi/linux/sev-guest.h> + +#include <asm/init.h> +#include <asm/cpu_entry_area.h> +#include <asm/stacktrace.h> +#include <asm/sev.h> +#include <asm/sev-internal.h> +#include <asm/insn-eval.h> +#include <asm/fpu/xcr.h> +#include <asm/processor.h> +#include <asm/realmode.h> +#include <asm/setup.h> +#include <asm/traps.h> +#include <asm/svm.h> +#include <asm/smp.h> +#include <asm/cpu.h> +#include <asm/apic.h> +#include <asm/cpuid/api.h> +#include <asm/cmdline.h> + +/* For early boot hypervisor communication in SEV-ES enabled guests */ +struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE); + +/* + * Needs to be in the .data section because we need it NULL before bss is + * cleared + */ +struct ghcb *boot_ghcb __section(".data"); + +/* Bitmap of SEV features supported by the hypervisor */ +u64 sev_hv_features __ro_after_init; + +/* Secrets page physical address from the CC blob */ +u64 sev_secrets_pa __ro_after_init; + +/* For early boot SVSM communication */ +struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); + +DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); +DEFINE_PER_CPU(u64, svsm_caa_pa); + +/* + * Nothing shall interrupt this code path while holding the per-CPU + * GHCB. The backup GHCB is only for NMIs interrupting this path. + * + * Callers must disable local interrupts around it. + */ +noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state) +{ + struct sev_es_runtime_data *data; + struct ghcb *ghcb; + + WARN_ON(!irqs_disabled()); + + data = this_cpu_read(runtime_data); + ghcb = &data->ghcb_page; + + if (unlikely(data->ghcb_active)) { + /* GHCB is already in use - save its contents */ + + if (unlikely(data->backup_ghcb_active)) { + /* + * Backup-GHCB is also already in use. There is no way + * to continue here so just kill the machine. To make + * panic() work, mark GHCBs inactive so that messages + * can be printed out. + */ + data->ghcb_active = false; + data->backup_ghcb_active = false; + + instrumentation_begin(); + panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use"); + instrumentation_end(); + } + + /* Mark backup_ghcb active before writing to it */ + data->backup_ghcb_active = true; + + state->ghcb = &data->backup_ghcb; + + /* Backup GHCB content */ + *state->ghcb = *ghcb; + } else { + state->ghcb = NULL; + data->ghcb_active = true; + } + + return ghcb; +} + +/* Include code shared with pre-decompression boot stage */ +#include "sev-shared.c" + +noinstr void __sev_put_ghcb(struct ghcb_state *state) +{ + struct sev_es_runtime_data *data; + struct ghcb *ghcb; + + WARN_ON(!irqs_disabled()); + + data = this_cpu_read(runtime_data); + ghcb = &data->ghcb_page; + + if (state->ghcb) { + /* Restore GHCB from Backup */ + *ghcb = *state->ghcb; + data->backup_ghcb_active = false; + state->ghcb = NULL; + } else { + /* + * Invalidate the GHCB so a VMGEXIT instruction issued + * from userspace won't appear to be valid. + */ + vc_ghcb_invalidate(ghcb); + data->ghcb_active = false; + } +} + +int svsm_perform_call_protocol(struct svsm_call *call) +{ + struct ghcb_state state; + unsigned long flags; + struct ghcb *ghcb; + int ret; + + /* + * This can be called very early in the boot, use native functions in + * order to avoid paravirt issues. + */ + flags = native_local_irq_save(); + + if (sev_cfg.ghcbs_initialized) + ghcb = __sev_get_ghcb(&state); + else if (boot_ghcb) + ghcb = boot_ghcb; + else + ghcb = NULL; + + do { + ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call) + : svsm_perform_msr_protocol(call); + } while (ret == -EAGAIN); + + if (sev_cfg.ghcbs_initialized) + __sev_put_ghcb(&state); + + native_local_irq_restore(flags); + + return ret; +} + +void __head +early_set_pages_state(unsigned long vaddr, unsigned long paddr, + unsigned long npages, enum psc_op op) +{ + unsigned long paddr_end; + u64 val; + + vaddr = vaddr & PAGE_MASK; + + paddr = paddr & PAGE_MASK; + paddr_end = paddr + (npages << PAGE_SHIFT); + + while (paddr < paddr_end) { + /* Page validation must be rescinded before changing to shared */ + if (op == SNP_PAGE_STATE_SHARED) + pvalidate_4k_page(vaddr, paddr, false); + + /* + * Use the MSR protocol because this function can be called before + * the GHCB is established. + */ + sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op)); + VMGEXIT(); + + val = sev_es_rd_ghcb_msr(); + + if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) + goto e_term; + + if (GHCB_MSR_PSC_RESP_VAL(val)) + goto e_term; + + /* Page validation must be performed after changing to private */ + if (op == SNP_PAGE_STATE_PRIVATE) + pvalidate_4k_page(vaddr, paddr, true); + + vaddr += PAGE_SIZE; + paddr += PAGE_SIZE; + } + + return; + +e_term: + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); +} + +void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, + unsigned long npages) +{ + /* + * This can be invoked in early boot while running identity mapped, so + * use an open coded check for SNP instead of using cc_platform_has(). + * This eliminates worries about jump tables or checking boot_cpu_data + * in the cc_platform_has() function. + */ + if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) + return; + + /* + * Ask the hypervisor to mark the memory pages as private in the RMP + * table. + */ + early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE); +} + +void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, + unsigned long npages) +{ + /* + * This can be invoked in early boot while running identity mapped, so + * use an open coded check for SNP instead of using cc_platform_has(). + * This eliminates worries about jump tables or checking boot_cpu_data + * in the cc_platform_has() function. + */ + if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) + return; + + /* Ask hypervisor to mark the memory pages shared in the RMP table. */ + early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED); +} + +/* + * Initial set up of SNP relies on information provided by the + * Confidential Computing blob, which can be passed to the kernel + * in the following ways, depending on how it is booted: + * + * - when booted via the boot/decompress kernel: + * - via boot_params + * + * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH): + * - via a setup_data entry, as defined by the Linux Boot Protocol + * + * Scan for the blob in that order. + */ +static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp) +{ + struct cc_blob_sev_info *cc_info; + + /* Boot kernel would have passed the CC blob via boot_params. */ + if (bp->cc_blob_address) { + cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address; + goto found_cc_info; + } + + /* + * If kernel was booted directly, without the use of the + * boot/decompression kernel, the CC blob may have been passed via + * setup_data instead. + */ + cc_info = find_cc_blob_setup_data(bp); + if (!cc_info) + return NULL; + +found_cc_info: + if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC) + snp_abort(); + + return cc_info; +} + +static __head void svsm_setup(struct cc_blob_sev_info *cc_info) +{ + struct svsm_call call = {}; + int ret; + u64 pa; + + /* + * Record the SVSM Calling Area address (CAA) if the guest is not + * running at VMPL0. The CA will be used to communicate with the + * SVSM to perform the SVSM services. + */ + if (!svsm_setup_ca(cc_info)) + return; + + /* + * It is very early in the boot and the kernel is running identity + * mapped but without having adjusted the pagetables to where the + * kernel was loaded (physbase), so the get the CA address using + * RIP-relative addressing. + */ + pa = (u64)rip_rel_ptr(&boot_svsm_ca_page); + + /* + * Switch over to the boot SVSM CA while the current CA is still + * addressable. There is no GHCB at this point so use the MSR protocol. + * + * SVSM_CORE_REMAP_CA call: + * RAX = 0 (Protocol=0, CallID=0) + * RCX = New CA GPA + */ + call.caa = svsm_get_caa(); + call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA); + call.rcx = pa; + ret = svsm_perform_call_protocol(&call); + if (ret) + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL); + + boot_svsm_caa = (struct svsm_ca *)pa; + boot_svsm_caa_pa = pa; +} + +bool __head snp_init(struct boot_params *bp) +{ + struct cc_blob_sev_info *cc_info; + + if (!bp) + return false; + + cc_info = find_cc_blob(bp); + if (!cc_info) + return false; + + if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE) + sev_secrets_pa = cc_info->secrets_phys; + else + return false; + + setup_cpuid_table(cc_info); + + svsm_setup(cc_info); + + /* + * The CC blob will be used later to access the secrets page. Cache + * it here like the boot kernel does. + */ + bp->cc_blob_address = (u32)(unsigned long)cc_info; + + return true; +} + +void __head __noreturn snp_abort(void) +{ + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); +} diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/boot/startup/sme.c index 5eecdd92da10..70ea1748c0a7 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/boot/startup/sme.c @@ -45,8 +45,6 @@ #include <asm/coco.h> #include <asm/sev.h> -#include "mm_internal.h" - #define PGD_FLAGS _KERNPG_TABLE_NOENC #define P4D_FLAGS _KERNPG_TABLE_NOENC #define PUD_FLAGS _KERNPG_TABLE_NOENC @@ -299,8 +297,7 @@ void __head sme_encrypt_kernel(struct boot_params *bp) * instrumentation or checking boot_cpu_data in the cc_platform_has() * function. */ - if (!sme_get_me_mask() || - RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED) + if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) return; /* @@ -318,8 +315,8 @@ void __head sme_encrypt_kernel(struct boot_params *bp) * memory from being cached. */ - kernel_start = (unsigned long)RIP_REL_REF(_text); - kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE); + kernel_start = (unsigned long)rip_rel_ptr(_text); + kernel_end = ALIGN((unsigned long)rip_rel_ptr(_end), PMD_SIZE); kernel_len = kernel_end - kernel_start; initrd_start = 0; @@ -345,7 +342,7 @@ void __head sme_encrypt_kernel(struct boot_params *bp) * pagetable structures for the encryption of the kernel * pagetable structures for workarea (in case not currently mapped) */ - execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea); + execute_start = workarea_start = (unsigned long)rip_rel_ptr(sme_workarea); execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; execute_len = execute_end - execute_start; @@ -526,7 +523,7 @@ void __head sme_enable(struct boot_params *bp) me_mask = 1UL << (ebx & 0x3f); /* Check the SEV MSR whether SEV or SME is enabled */ - RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV); + sev_status = msr = native_rdmsrq(MSR_AMD64_SEV); feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; /* @@ -557,13 +554,22 @@ void __head sme_enable(struct boot_params *bp) return; /* For SME, check the SYSCFG MSR */ - msr = __rdmsr(MSR_AMD64_SYSCFG); + msr = native_rdmsrq(MSR_AMD64_SYSCFG); if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) return; } - RIP_REL_REF(sme_me_mask) = me_mask; - RIP_REL_REF(physical_mask) &= ~me_mask; - RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD; + sme_me_mask = me_mask; + physical_mask &= ~me_mask; + cc_vendor = CC_VENDOR_AMD; cc_set_mask(me_mask); } + +#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION +/* Local version for startup code, which never operates on user page tables */ +__weak +pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) +{ + return pgd; +} +#endif diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index 84f7a883ce1e..f35369bb14c5 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -32,7 +32,7 @@ int memcmp(const void *s1, const void *s2, size_t len) { bool diff; - asm("repe; cmpsb" CC_SET(nz) + asm("repe cmpsb" CC_SET(nz) : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index f2e96905b3fe..0641c8c46aee 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -292,7 +292,7 @@ static void restore_screen(void) "shrw %%cx ; " "jnc 1f ; " "stosw \n\t" - "1: rep;stosl ; " + "1: rep stosl ; " "popw %%es" : "+D" (dst), "+c" (npad) : "bdS" (video_segment), diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c index 9a0ddda3aa69..d4610af68114 100644 --- a/arch/x86/coco/core.c +++ b/arch/x86/coco/core.c @@ -18,7 +18,9 @@ #include <asm/processor.h> enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE; +SYM_PIC_ALIAS(cc_vendor); u64 cc_mask __ro_after_init; +SYM_PIC_ALIAS(cc_mask); static struct cc_attr_flags { __u64 host_sev_snp : 1, diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile index dcb06dc8b5ae..db3255b979bd 100644 --- a/arch/x86/coco/sev/Makefile +++ b/arch/x86/coco/sev/Makefile @@ -1,22 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += core.o - -# jump tables are emitted using absolute references in non-PIC code -# so they cannot be used in the early SEV startup code -CFLAGS_core.o += -fno-jump-tables - -ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_core.o = -pg -endif - -KASAN_SANITIZE_core.o := n -KMSAN_SANITIZE_core.o := n -KCOV_INSTRUMENT_core.o := n - -# With some compiler versions the generated code results in boot hangs, caused -# by several compilation units. To be safe, disable all instrumentation. -KCSAN_SANITIZE := n +obj-y += core.o sev-nmi.o vc-handle.o # Clang 14 and older may fail to respect __no_sanitize_undefined when inlining -UBSAN_SANITIZE := n +UBSAN_SANITIZE_sev-nmi.o := n + +# GCC may fail to respect __no_sanitize_address when inlining +KASAN_SANITIZE_sev-nmi.o := n diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index 36beaac713c1..b2569257acd3 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -31,6 +31,7 @@ #include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> #include <asm/sev.h> +#include <asm/sev-internal.h> #include <asm/insn-eval.h> #include <asm/fpu/xcr.h> #include <asm/processor.h> @@ -41,10 +42,9 @@ #include <asm/smp.h> #include <asm/cpu.h> #include <asm/apic.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/cmdline.h> - -#define DR7_RESET_VALUE 0x400 +#include <asm/msr.h> /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ #define AP_INIT_CS_LIMIT 0xffff @@ -81,21 +81,6 @@ static const char * const sev_status_feat_names[] = { [MSR_AMD64_SNP_SMT_PROT_BIT] = "SMTProt", }; -/* For early boot hypervisor communication in SEV-ES enabled guests */ -static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE); - -/* - * Needs to be in the .data section because we need it NULL before bss is - * cleared - */ -static struct ghcb *boot_ghcb __section(".data"); - -/* Bitmap of SEV features supported by the hypervisor */ -static u64 sev_hv_features __ro_after_init; - -/* Secrets page physical address from the CC blob */ -static u64 secrets_pa __ro_after_init; - /* * For Secure TSC guests, the BSP fetches TSC_INFO using SNP guest messaging and * initializes snp_tsc_scale and snp_tsc_offset. These values are replicated @@ -105,558 +90,196 @@ static u64 snp_tsc_scale __ro_after_init; static u64 snp_tsc_offset __ro_after_init; static u64 snp_tsc_freq_khz __ro_after_init; -/* #VC handler runtime per-CPU data */ -struct sev_es_runtime_data { - struct ghcb ghcb_page; - - /* - * Reserve one page per CPU as backup storage for the unencrypted GHCB. - * It is needed when an NMI happens while the #VC handler uses the real - * GHCB, and the NMI handler itself is causing another #VC exception. In - * that case the GHCB content of the first handler needs to be backed up - * and restored. - */ - struct ghcb backup_ghcb; - - /* - * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions. - * There is no need for it to be atomic, because nothing is written to - * the GHCB between the read and the write of ghcb_active. So it is safe - * to use it when a nested #VC exception happens before the write. - * - * This is necessary for example in the #VC->NMI->#VC case when the NMI - * happens while the first #VC handler uses the GHCB. When the NMI code - * raises a second #VC handler it might overwrite the contents of the - * GHCB written by the first handler. To avoid this the content of the - * GHCB is saved and restored when the GHCB is detected to be in use - * already. - */ - bool ghcb_active; - bool backup_ghcb_active; - - /* - * Cached DR7 value - write it on DR7 writes and return it on reads. - * That value will never make it to the real hardware DR7 as debugging - * is currently unsupported in SEV-ES guests. - */ - unsigned long dr7; -}; - -struct ghcb_state { - struct ghcb *ghcb; -}; - -/* For early boot SVSM communication */ -static struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); - -static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); -static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); -static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); -static DEFINE_PER_CPU(u64, svsm_caa_pa); - -static __always_inline bool on_vc_stack(struct pt_regs *regs) -{ - unsigned long sp = regs->sp; - - /* User-mode RSP is not trusted */ - if (user_mode(regs)) - return false; - - /* SYSCALL gap still has user-mode RSP */ - if (ip_within_syscall_gap(regs)) - return false; - - return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC))); -} +DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data); +DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa); /* - * This function handles the case when an NMI is raised in the #VC - * exception handler entry code, before the #VC handler has switched off - * its IST stack. In this case, the IST entry for #VC must be adjusted, - * so that any nested #VC exception will not overwrite the stack - * contents of the interrupted #VC handler. - * - * The IST entry is adjusted unconditionally so that it can be also be - * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a - * nested sev_es_ist_exit() call may adjust back the IST entry too - * early. - * - * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run - * on the NMI IST stack, as they are only called from NMI handling code - * right now. + * SVSM related information: + * When running under an SVSM, the VMPL that Linux is executing at must be + * non-zero. The VMPL is therefore used to indicate the presence of an SVSM. */ -void noinstr __sev_es_ist_enter(struct pt_regs *regs) -{ - unsigned long old_ist, new_ist; - - /* Read old IST entry */ - new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); - - /* - * If NMI happened while on the #VC IST stack, set the new IST - * value below regs->sp, so that the interrupted stack frame is - * not overwritten by subsequent #VC exceptions. - */ - if (on_vc_stack(regs)) - new_ist = regs->sp; - - /* - * Reserve additional 8 bytes and store old IST value so this - * adjustment can be unrolled in __sev_es_ist_exit(). - */ - new_ist -= sizeof(old_ist); - *(unsigned long *)new_ist = old_ist; - - /* Set new IST entry */ - this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist); -} +u8 snp_vmpl __ro_after_init; +EXPORT_SYMBOL_GPL(snp_vmpl); -void noinstr __sev_es_ist_exit(void) +static u64 __init get_snp_jump_table_addr(void) { - unsigned long ist; + struct snp_secrets_page *secrets; + void __iomem *mem; + u64 addr; - /* Read IST entry */ - ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE); + if (!mem) { + pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n"); + return 0; + } - if (WARN_ON(ist == __this_cpu_ist_top_va(VC))) - return; + secrets = (__force struct snp_secrets_page *)mem; - /* Read back old IST entry and write it to the TSS */ - this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist); + addr = secrets->os_area.ap_jump_table_pa; + iounmap(mem); + + return addr; } -/* - * Nothing shall interrupt this code path while holding the per-CPU - * GHCB. The backup GHCB is only for NMIs interrupting this path. - * - * Callers must disable local interrupts around it. - */ -static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state) +static u64 __init get_jump_table_addr(void) { - struct sev_es_runtime_data *data; + struct ghcb_state state; + unsigned long flags; struct ghcb *ghcb; + u64 ret = 0; - WARN_ON(!irqs_disabled()); - - data = this_cpu_read(runtime_data); - ghcb = &data->ghcb_page; - - if (unlikely(data->ghcb_active)) { - /* GHCB is already in use - save its contents */ - - if (unlikely(data->backup_ghcb_active)) { - /* - * Backup-GHCB is also already in use. There is no way - * to continue here so just kill the machine. To make - * panic() work, mark GHCBs inactive so that messages - * can be printed out. - */ - data->ghcb_active = false; - data->backup_ghcb_active = false; - - instrumentation_begin(); - panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use"); - instrumentation_end(); - } - - /* Mark backup_ghcb active before writing to it */ - data->backup_ghcb_active = true; + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return get_snp_jump_table_addr(); - state->ghcb = &data->backup_ghcb; + local_irq_save(flags); - /* Backup GHCB content */ - *state->ghcb = *ghcb; - } else { - state->ghcb = NULL; - data->ghcb_active = true; - } + ghcb = __sev_get_ghcb(&state); - return ghcb; -} + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); + ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE); + ghcb_set_sw_exit_info_2(ghcb, 0); -static inline u64 sev_es_rd_ghcb_msr(void) -{ - return __rdmsr(MSR_AMD64_SEV_ES_GHCB); -} + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); -static __always_inline void sev_es_wr_ghcb_msr(u64 val) -{ - u32 low, high; + if (ghcb_sw_exit_info_1_is_valid(ghcb) && + ghcb_sw_exit_info_2_is_valid(ghcb)) + ret = ghcb->save.sw_exit_info_2; - low = (u32)(val); - high = (u32)(val >> 32); + __sev_put_ghcb(&state); - native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); -} + local_irq_restore(flags); -static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, - unsigned char *buffer) -{ - return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); + return ret; } -static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) +static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size, + int ret, u64 svsm_ret) { - char buffer[MAX_INSN_SIZE]; - int insn_bytes; - - insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); - if (insn_bytes == 0) { - /* Nothing could be copied */ - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; - ctxt->fi.cr2 = ctxt->regs->ip; - return ES_EXCEPTION; - } else if (insn_bytes == -EINVAL) { - /* Effective RIP could not be calculated */ - ctxt->fi.vector = X86_TRAP_GP; - ctxt->fi.error_code = 0; - ctxt->fi.cr2 = 0; - return ES_EXCEPTION; - } - - if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes)) - return ES_DECODE_FAILED; + WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n", + pfn, action, page_size, ret, svsm_ret); - if (ctxt->insn.immediate.got) - return ES_OK; - else - return ES_DECODE_FAILED; + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); } -static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt) +static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret) { - char buffer[MAX_INSN_SIZE]; - int res, ret; + unsigned int page_size; + bool action; + u64 pfn; - res = vc_fetch_insn_kernel(ctxt, buffer); - if (res) { - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.error_code = X86_PF_INSTR; - ctxt->fi.cr2 = ctxt->regs->ip; - return ES_EXCEPTION; - } + pfn = pc->entry[pc->cur_index].pfn; + action = pc->entry[pc->cur_index].action; + page_size = pc->entry[pc->cur_index].page_size; - ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); - if (ret < 0) - return ES_DECODE_FAILED; - else - return ES_OK; + __pval_terminate(pfn, action, page_size, ret, svsm_ret); } -static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +static void pval_pages(struct snp_psc_desc *desc) { - if (user_mode(ctxt->regs)) - return __vc_decode_user_insn(ctxt); - else - return __vc_decode_kern_insn(ctxt); -} + struct psc_entry *e; + unsigned long vaddr; + unsigned int size; + unsigned int i; + bool validate; + u64 pfn; + int rc; -static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, - char *dst, char *buf, size_t size) -{ - unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; + for (i = 0; i <= desc->hdr.end_entry; i++) { + e = &desc->entries[i]; - /* - * This function uses __put_user() independent of whether kernel or user - * memory is accessed. This works fine because __put_user() does no - * sanity checks of the pointer being accessed. All that it does is - * to report when the access failed. - * - * Also, this function runs in atomic context, so __put_user() is not - * allowed to sleep. The page-fault handler detects that it is running - * in atomic context and will not try to take mmap_sem and handle the - * fault, so additional pagefault_enable()/disable() calls are not - * needed. - * - * The access can't be done via copy_to_user() here because - * vc_write_mem() must not use string instructions to access unsafe - * memory. The reason is that MOVS is emulated by the #VC handler by - * splitting the move up into a read and a write and taking a nested #VC - * exception on whatever of them is the MMIO access. Using string - * instructions here would cause infinite nesting. - */ - switch (size) { - case 1: { - u8 d1; - u8 __user *target = (u8 __user *)dst; - - memcpy(&d1, buf, 1); - if (__put_user(d1, target)) - goto fault; - break; - } - case 2: { - u16 d2; - u16 __user *target = (u16 __user *)dst; + pfn = e->gfn; + vaddr = (unsigned long)pfn_to_kaddr(pfn); + size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; + validate = e->operation == SNP_PAGE_STATE_PRIVATE; - memcpy(&d2, buf, 2); - if (__put_user(d2, target)) - goto fault; - break; - } - case 4: { - u32 d4; - u32 __user *target = (u32 __user *)dst; + rc = pvalidate(vaddr, size, validate); + if (!rc) + continue; - memcpy(&d4, buf, 4); - if (__put_user(d4, target)) - goto fault; - break; - } - case 8: { - u64 d8; - u64 __user *target = (u64 __user *)dst; + if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) { + unsigned long vaddr_end = vaddr + PMD_SIZE; - memcpy(&d8, buf, 8); - if (__put_user(d8, target)) - goto fault; - break; - } - default: - WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); - return ES_UNSUPPORTED; + for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) { + rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate); + if (rc) + __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0); + } + } else { + __pval_terminate(pfn, validate, size, rc, 0); + } } - - return ES_OK; - -fault: - if (user_mode(ctxt->regs)) - error_code |= X86_PF_USER; - - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.error_code = error_code; - ctxt->fi.cr2 = (unsigned long)dst; - - return ES_EXCEPTION; } -static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, - char *src, char *buf, size_t size) +static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, + struct svsm_pvalidate_call *pc) { - unsigned long error_code = X86_PF_PROT; - - /* - * This function uses __get_user() independent of whether kernel or user - * memory is accessed. This works fine because __get_user() does no - * sanity checks of the pointer being accessed. All that it does is - * to report when the access failed. - * - * Also, this function runs in atomic context, so __get_user() is not - * allowed to sleep. The page-fault handler detects that it is running - * in atomic context and will not try to take mmap_sem and handle the - * fault, so additional pagefault_enable()/disable() calls are not - * needed. - * - * The access can't be done via copy_from_user() here because - * vc_read_mem() must not use string instructions to access unsafe - * memory. The reason is that MOVS is emulated by the #VC handler by - * splitting the move up into a read and a write and taking a nested #VC - * exception on whatever of them is the MMIO access. Using string - * instructions here would cause infinite nesting. - */ - switch (size) { - case 1: { - u8 d1; - u8 __user *s = (u8 __user *)src; - - if (__get_user(d1, s)) - goto fault; - memcpy(buf, &d1, 1); - break; - } - case 2: { - u16 d2; - u16 __user *s = (u16 __user *)src; - - if (__get_user(d2, s)) - goto fault; - memcpy(buf, &d2, 2); - break; - } - case 4: { - u32 d4; - u32 __user *s = (u32 __user *)src; - - if (__get_user(d4, s)) - goto fault; - memcpy(buf, &d4, 4); - break; - } - case 8: { - u64 d8; - u64 __user *s = (u64 __user *)src; - if (__get_user(d8, s)) - goto fault; - memcpy(buf, &d8, 8); - break; - } - default: - WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); - return ES_UNSUPPORTED; - } - - return ES_OK; - -fault: - if (user_mode(ctxt->regs)) - error_code |= X86_PF_USER; + struct svsm_pvalidate_entry *pe; - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.error_code = error_code; - ctxt->fi.cr2 = (unsigned long)src; + /* Nothing in the CA yet */ + pc->num_entries = 0; + pc->cur_index = 0; - return ES_EXCEPTION; -} + pe = &pc->entry[0]; -static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, - unsigned long vaddr, phys_addr_t *paddr) -{ - unsigned long va = (unsigned long)vaddr; - unsigned int level; - phys_addr_t pa; - pgd_t *pgd; - pte_t *pte; + while (pfn < pfn_end) { + pe->page_size = RMP_PG_SIZE_4K; + pe->action = action; + pe->ignore_cf = 0; + pe->pfn = pfn; - pgd = __va(read_cr3_pa()); - pgd = &pgd[pgd_index(va)]; - pte = lookup_address_in_pgd(pgd, va, &level); - if (!pte) { - ctxt->fi.vector = X86_TRAP_PF; - ctxt->fi.cr2 = vaddr; - ctxt->fi.error_code = 0; + pe++; + pfn++; - if (user_mode(ctxt->regs)) - ctxt->fi.error_code |= X86_PF_USER; - - return ES_EXCEPTION; + pc->num_entries++; + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) + break; } - if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC)) - /* Emulated MMIO to/from encrypted memory not supported */ - return ES_UNSUPPORTED; - - pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; - pa |= va & ~page_level_mask(level); - - *paddr = pa; - - return ES_OK; + return pfn; } -static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) +static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry, + struct svsm_pvalidate_call *pc) { - BUG_ON(size > 4); - - if (user_mode(ctxt->regs)) { - struct thread_struct *t = ¤t->thread; - struct io_bitmap *iobm = t->io_bitmap; - size_t idx; - - if (!iobm) - goto fault; - - for (idx = port; idx < port + size; ++idx) { - if (test_bit(idx, iobm->bitmap)) - goto fault; - } - } - - return ES_OK; + struct svsm_pvalidate_entry *pe; + struct psc_entry *e; -fault: - ctxt->fi.vector = X86_TRAP_GP; - ctxt->fi.error_code = 0; + /* Nothing in the CA yet */ + pc->num_entries = 0; + pc->cur_index = 0; - return ES_EXCEPTION; -} + pe = &pc->entry[0]; + e = &desc->entries[desc_entry]; -static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt) -{ - long error_code = ctxt->fi.error_code; - int trapnr = ctxt->fi.vector; + while (desc_entry <= desc->hdr.end_entry) { + pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; + pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; + pe->ignore_cf = 0; + pe->pfn = e->gfn; - ctxt->regs->orig_ax = ctxt->fi.error_code; + pe++; + e++; - switch (trapnr) { - case X86_TRAP_GP: - exc_general_protection(ctxt->regs, error_code); - break; - case X86_TRAP_UD: - exc_invalid_op(ctxt->regs); - break; - case X86_TRAP_PF: - write_cr2(ctxt->fi.cr2); - exc_page_fault(ctxt->regs, error_code); - break; - case X86_TRAP_AC: - exc_alignment_check(ctxt->regs, error_code); - break; - default: - pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n"); - BUG(); + desc_entry++; + pc->num_entries++; + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) + break; } -} - -/* Include code shared with pre-decompression boot stage */ -#include "shared.c" - -static inline struct svsm_ca *svsm_get_caa(void) -{ - /* - * Use rIP-relative references when called early in the boot. If - * ->use_cas is set, then it is late in the boot and no need - * to worry about rIP-relative references. - */ - if (RIP_REL_REF(sev_cfg).use_cas) - return this_cpu_read(svsm_caa); - else - return RIP_REL_REF(boot_svsm_caa); -} - -static u64 svsm_get_caa_pa(void) -{ - /* - * Use rIP-relative references when called early in the boot. If - * ->use_cas is set, then it is late in the boot and no need - * to worry about rIP-relative references. - */ - if (RIP_REL_REF(sev_cfg).use_cas) - return this_cpu_read(svsm_caa_pa); - else - return RIP_REL_REF(boot_svsm_caa_pa); -} - -static noinstr void __sev_put_ghcb(struct ghcb_state *state) -{ - struct sev_es_runtime_data *data; - struct ghcb *ghcb; - - WARN_ON(!irqs_disabled()); - - data = this_cpu_read(runtime_data); - ghcb = &data->ghcb_page; - if (state->ghcb) { - /* Restore GHCB from Backup */ - *ghcb = *state->ghcb; - data->backup_ghcb_active = false; - state->ghcb = NULL; - } else { - /* - * Invalidate the GHCB so a VMGEXIT instruction issued - * from userspace won't appear to be valid. - */ - vc_ghcb_invalidate(ghcb); - data->ghcb_active = false; - } + return desc_entry; } -static int svsm_perform_call_protocol(struct svsm_call *call) +static void svsm_pval_pages(struct snp_psc_desc *desc) { - struct ghcb_state state; + struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY]; + unsigned int i, pv_4k_count = 0; + struct svsm_pvalidate_call *pc; + struct svsm_call call = {}; unsigned long flags; - struct ghcb *ghcb; + bool action; + u64 pc_pa; int ret; /* @@ -666,180 +289,145 @@ static int svsm_perform_call_protocol(struct svsm_call *call) flags = native_local_irq_save(); /* - * Use rip-relative references when called early in the boot. If - * ghcbs_initialized is set, then it is late in the boot and no need - * to worry about rip-relative references in called functions. + * The SVSM calling area (CA) can support processing 510 entries at a + * time. Loop through the Page State Change descriptor until the CA is + * full or the last entry in the descriptor is reached, at which time + * the SVSM is invoked. This repeats until all entries in the descriptor + * are processed. */ - if (RIP_REL_REF(sev_cfg).ghcbs_initialized) - ghcb = __sev_get_ghcb(&state); - else if (RIP_REL_REF(boot_ghcb)) - ghcb = RIP_REL_REF(boot_ghcb); - else - ghcb = NULL; + call.caa = svsm_get_caa(); - do { - ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call) - : svsm_perform_msr_protocol(call); - } while (ret == -EAGAIN); + pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; + pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); - if (RIP_REL_REF(sev_cfg).ghcbs_initialized) - __sev_put_ghcb(&state); + /* Protocol 0, Call ID 1 */ + call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); + call.rcx = pc_pa; - native_local_irq_restore(flags); + for (i = 0; i <= desc->hdr.end_entry;) { + i = svsm_build_ca_from_psc_desc(desc, i, pc); - return ret; -} + do { + ret = svsm_perform_call_protocol(&call); + if (!ret) + continue; -void noinstr __sev_es_nmi_complete(void) -{ - struct ghcb_state state; - struct ghcb *ghcb; + /* + * Check if the entry failed because of an RMP mismatch (a + * PVALIDATE at 2M was requested, but the page is mapped in + * the RMP as 4K). + */ - ghcb = __sev_get_ghcb(&state); + if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH && + pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { + /* Save this entry for post-processing at 4K */ + pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; + + /* Skip to the next one unless at the end of the list */ + pc->cur_index++; + if (pc->cur_index < pc->num_entries) + ret = -EAGAIN; + else + ret = 0; + } + } while (ret == -EAGAIN); - vc_ghcb_invalidate(ghcb); - ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); - ghcb_set_sw_exit_info_1(ghcb, 0); - ghcb_set_sw_exit_info_2(ghcb, 0); + if (ret) + svsm_pval_terminate(pc, ret, call.rax_out); + } - sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); - VMGEXIT(); + /* Process any entries that failed to be validated at 2M and validate them at 4K */ + for (i = 0; i < pv_4k_count; i++) { + u64 pfn, pfn_end; - __sev_put_ghcb(&state); -} + action = pv_4k[i].action; + pfn = pv_4k[i].pfn; + pfn_end = pfn + 512; -static u64 __init get_snp_jump_table_addr(void) -{ - struct snp_secrets_page *secrets; - void __iomem *mem; - u64 addr; + while (pfn < pfn_end) { + pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc); - mem = ioremap_encrypted(secrets_pa, PAGE_SIZE); - if (!mem) { - pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n"); - return 0; + ret = svsm_perform_call_protocol(&call); + if (ret) + svsm_pval_terminate(pc, ret, call.rax_out); + } } - secrets = (__force struct snp_secrets_page *)mem; - - addr = secrets->os_area.ap_jump_table_pa; - iounmap(mem); - - return addr; + native_local_irq_restore(flags); } -static u64 __init get_jump_table_addr(void) +static void pvalidate_pages(struct snp_psc_desc *desc) { - struct ghcb_state state; - unsigned long flags; - struct ghcb *ghcb; - u64 ret = 0; - - if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) - return get_snp_jump_table_addr(); - - local_irq_save(flags); + if (snp_vmpl) + svsm_pval_pages(desc); + else + pval_pages(desc); +} - ghcb = __sev_get_ghcb(&state); +static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc) +{ + int cur_entry, end_entry, ret = 0; + struct snp_psc_desc *data; + struct es_em_ctxt ctxt; vc_ghcb_invalidate(ghcb); - ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); - ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE); - ghcb_set_sw_exit_info_2(ghcb, 0); - sev_es_wr_ghcb_msr(__pa(ghcb)); - VMGEXIT(); - - if (ghcb_sw_exit_info_1_is_valid(ghcb) && - ghcb_sw_exit_info_2_is_valid(ghcb)) - ret = ghcb->save.sw_exit_info_2; - - __sev_put_ghcb(&state); - - local_irq_restore(flags); + /* Copy the input desc into GHCB shared buffer */ + data = (struct snp_psc_desc *)ghcb->shared_buffer; + memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc))); - return ret; -} - -static void __head -early_set_pages_state(unsigned long vaddr, unsigned long paddr, - unsigned long npages, enum psc_op op) -{ - unsigned long paddr_end; - u64 val; - - vaddr = vaddr & PAGE_MASK; + /* + * As per the GHCB specification, the hypervisor can resume the guest + * before processing all the entries. Check whether all the entries + * are processed. If not, then keep retrying. Note, the hypervisor + * will update the data memory directly to indicate the status, so + * reference the data->hdr everywhere. + * + * The strategy here is to wait for the hypervisor to change the page + * state in the RMP table before guest accesses the memory pages. If the + * page state change was not successful, then later memory access will + * result in a crash. + */ + cur_entry = data->hdr.cur_entry; + end_entry = data->hdr.end_entry; - paddr = paddr & PAGE_MASK; - paddr_end = paddr + (npages << PAGE_SHIFT); + while (data->hdr.cur_entry <= data->hdr.end_entry) { + ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); - while (paddr < paddr_end) { - /* Page validation must be rescinded before changing to shared */ - if (op == SNP_PAGE_STATE_SHARED) - pvalidate_4k_page(vaddr, paddr, false); + /* This will advance the shared buffer data points to. */ + ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0); /* - * Use the MSR protocol because this function can be called before - * the GHCB is established. + * Page State Change VMGEXIT can pass error code through + * exit_info_2. */ - sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op)); - VMGEXIT(); - - val = sev_es_rd_ghcb_msr(); - - if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) - goto e_term; - - if (GHCB_MSR_PSC_RESP_VAL(val)) - goto e_term; + if (WARN(ret || ghcb->save.sw_exit_info_2, + "SNP: PSC failed ret=%d exit_info_2=%llx\n", + ret, ghcb->save.sw_exit_info_2)) { + ret = 1; + goto out; + } - /* Page validation must be performed after changing to private */ - if (op == SNP_PAGE_STATE_PRIVATE) - pvalidate_4k_page(vaddr, paddr, true); + /* Verify that reserved bit is not set */ + if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) { + ret = 1; + goto out; + } - vaddr += PAGE_SIZE; - paddr += PAGE_SIZE; + /* + * Sanity check that entry processing is not going backwards. + * This will happen only if hypervisor is tricking us. + */ + if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry, +"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n", + end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) { + ret = 1; + goto out; + } } - return; - -e_term: - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); -} - -void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, - unsigned long npages) -{ - /* - * This can be invoked in early boot while running identity mapped, so - * use an open coded check for SNP instead of using cc_platform_has(). - * This eliminates worries about jump tables or checking boot_cpu_data - * in the cc_platform_has() function. - */ - if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED)) - return; - - /* - * Ask the hypervisor to mark the memory pages as private in the RMP - * table. - */ - early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE); -} - -void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, - unsigned long npages) -{ - /* - * This can be invoked in early boot while running identity mapped, so - * use an open coded check for SNP instead of using cc_platform_has(). - * This eliminates worries about jump tables or checking boot_cpu_data - * in the cc_platform_has() function. - */ - if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED)) - return; - - /* Ask hypervisor to mark the memory pages shared in the RMP table. */ - early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED); +out: + return ret; } static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr, @@ -1492,90 +1080,6 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd) return 0; } -/* Writes to the SVSM CAA MSR are ignored */ -static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write) -{ - if (write) - return ES_OK; - - regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa)); - regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa)); - - return ES_OK; -} - -/* - * TSC related accesses should not exit to the hypervisor when a guest is - * executing with Secure TSC enabled, so special handling is required for - * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ. - */ -static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write) -{ - u64 tsc; - - /* - * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled. - * Terminate the SNP guest when the interception is enabled. - */ - if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ) - return ES_VMM_ERROR; - - /* - * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC - * to return undefined values, so ignore all writes. - * - * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use - * the value returned by rdtsc_ordered(). - */ - if (write) { - WARN_ONCE(1, "TSC MSR writes are verboten!\n"); - return ES_OK; - } - - tsc = rdtsc_ordered(); - regs->ax = lower_32_bits(tsc); - regs->dx = upper_32_bits(tsc); - - return ES_OK; -} - -static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) -{ - struct pt_regs *regs = ctxt->regs; - enum es_result ret; - bool write; - - /* Is it a WRMSR? */ - write = ctxt->insn.opcode.bytes[1] == 0x30; - - switch (regs->cx) { - case MSR_SVSM_CAA: - return __vc_handle_msr_caa(regs, write); - case MSR_IA32_TSC: - case MSR_AMD64_GUEST_TSC_FREQ: - if (sev_status & MSR_AMD64_SNP_SECURE_TSC) - return __vc_handle_secure_tsc_msrs(regs, write); - break; - default: - break; - } - - ghcb_set_rcx(ghcb, regs->cx); - if (write) { - ghcb_set_rax(ghcb, regs->ax); - ghcb_set_rdx(ghcb, regs->dx); - } - - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0); - - if ((ret == ES_OK) && !write) { - regs->ax = ghcb->save.rax; - regs->dx = ghcb->save.rdx; - } - - return ret; -} - static void snp_register_per_cpu_ghcb(void) { struct sev_es_runtime_data *data; @@ -1788,748 +1292,6 @@ void __init sev_es_init_vc_handling(void) initial_vc_handler = (unsigned long)kernel_exc_vmm_communication; } -static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt) -{ - int trapnr = ctxt->fi.vector; - - if (trapnr == X86_TRAP_PF) - native_write_cr2(ctxt->fi.cr2); - - ctxt->regs->orig_ax = ctxt->fi.error_code; - do_early_exception(ctxt->regs, trapnr); -} - -static long *vc_insn_get_rm(struct es_em_ctxt *ctxt) -{ - long *reg_array; - int offset; - - reg_array = (long *)ctxt->regs; - offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); - - if (offset < 0) - return NULL; - - offset /= sizeof(long); - - return reg_array + offset; -} -static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, - unsigned int bytes, bool read) -{ - u64 exit_code, exit_info_1, exit_info_2; - unsigned long ghcb_pa = __pa(ghcb); - enum es_result res; - phys_addr_t paddr; - void __user *ref; - - ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); - if (ref == (void __user *)-1L) - return ES_UNSUPPORTED; - - exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; - - res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr); - if (res != ES_OK) { - if (res == ES_EXCEPTION && !read) - ctxt->fi.error_code |= X86_PF_WRITE; - - return res; - } - - exit_info_1 = paddr; - /* Can never be greater than 8 */ - exit_info_2 = bytes; - - ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); - - return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); -} - -/* - * The MOVS instruction has two memory operands, which raises the - * problem that it is not known whether the access to the source or the - * destination caused the #VC exception (and hence whether an MMIO read - * or write operation needs to be emulated). - * - * Instead of playing games with walking page-tables and trying to guess - * whether the source or destination is an MMIO range, split the move - * into two operations, a read and a write with only one memory operand. - * This will cause a nested #VC exception on the MMIO address which can - * then be handled. - * - * This implementation has the benefit that it also supports MOVS where - * source _and_ destination are MMIO regions. - * - * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a - * rare operation. If it turns out to be a performance problem the split - * operations can be moved to memcpy_fromio() and memcpy_toio(). - */ -static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt, - unsigned int bytes) -{ - unsigned long ds_base, es_base; - unsigned char *src, *dst; - unsigned char buffer[8]; - enum es_result ret; - bool rep; - int off; - - ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS); - es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); - - if (ds_base == -1L || es_base == -1L) { - ctxt->fi.vector = X86_TRAP_GP; - ctxt->fi.error_code = 0; - return ES_EXCEPTION; - } - - src = ds_base + (unsigned char *)ctxt->regs->si; - dst = es_base + (unsigned char *)ctxt->regs->di; - - ret = vc_read_mem(ctxt, src, buffer, bytes); - if (ret != ES_OK) - return ret; - - ret = vc_write_mem(ctxt, dst, buffer, bytes); - if (ret != ES_OK) - return ret; - - if (ctxt->regs->flags & X86_EFLAGS_DF) - off = -bytes; - else - off = bytes; - - ctxt->regs->si += off; - ctxt->regs->di += off; - - rep = insn_has_rep_prefix(&ctxt->insn); - if (rep) - ctxt->regs->cx -= 1; - - if (!rep || ctxt->regs->cx == 0) - return ES_OK; - else - return ES_RETRY; -} - -static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) -{ - struct insn *insn = &ctxt->insn; - enum insn_mmio_type mmio; - unsigned int bytes = 0; - enum es_result ret; - u8 sign_byte; - long *reg_data; - - mmio = insn_decode_mmio(insn, &bytes); - if (mmio == INSN_MMIO_DECODE_FAILED) - return ES_DECODE_FAILED; - - if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) { - reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs); - if (!reg_data) - return ES_DECODE_FAILED; - } - - if (user_mode(ctxt->regs)) - return ES_UNSUPPORTED; - - switch (mmio) { - case INSN_MMIO_WRITE: - memcpy(ghcb->shared_buffer, reg_data, bytes); - ret = vc_do_mmio(ghcb, ctxt, bytes, false); - break; - case INSN_MMIO_WRITE_IMM: - memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); - ret = vc_do_mmio(ghcb, ctxt, bytes, false); - break; - case INSN_MMIO_READ: - ret = vc_do_mmio(ghcb, ctxt, bytes, true); - if (ret) - break; - - /* Zero-extend for 32-bit operation */ - if (bytes == 4) - *reg_data = 0; - - memcpy(reg_data, ghcb->shared_buffer, bytes); - break; - case INSN_MMIO_READ_ZERO_EXTEND: - ret = vc_do_mmio(ghcb, ctxt, bytes, true); - if (ret) - break; - - /* Zero extend based on operand size */ - memset(reg_data, 0, insn->opnd_bytes); - memcpy(reg_data, ghcb->shared_buffer, bytes); - break; - case INSN_MMIO_READ_SIGN_EXTEND: - ret = vc_do_mmio(ghcb, ctxt, bytes, true); - if (ret) - break; - - if (bytes == 1) { - u8 *val = (u8 *)ghcb->shared_buffer; - - sign_byte = (*val & 0x80) ? 0xff : 0x00; - } else { - u16 *val = (u16 *)ghcb->shared_buffer; - - sign_byte = (*val & 0x8000) ? 0xff : 0x00; - } - - /* Sign extend based on operand size */ - memset(reg_data, sign_byte, insn->opnd_bytes); - memcpy(reg_data, ghcb->shared_buffer, bytes); - break; - case INSN_MMIO_MOVS: - ret = vc_handle_mmio_movs(ctxt, bytes); - break; - default: - ret = ES_UNSUPPORTED; - break; - } - - return ret; -} - -static enum es_result vc_handle_dr7_write(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - struct sev_es_runtime_data *data = this_cpu_read(runtime_data); - long val, *reg = vc_insn_get_rm(ctxt); - enum es_result ret; - - if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) - return ES_VMM_ERROR; - - if (!reg) - return ES_DECODE_FAILED; - - val = *reg; - - /* Upper 32 bits must be written as zeroes */ - if (val >> 32) { - ctxt->fi.vector = X86_TRAP_GP; - ctxt->fi.error_code = 0; - return ES_EXCEPTION; - } - - /* Clear out other reserved bits and set bit 10 */ - val = (val & 0xffff23ffL) | BIT(10); - - /* Early non-zero writes to DR7 are not supported */ - if (!data && (val & ~DR7_RESET_VALUE)) - return ES_UNSUPPORTED; - - /* Using a value of 0 for ExitInfo1 means RAX holds the value */ - ghcb_set_rax(ghcb, val); - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); - if (ret != ES_OK) - return ret; - - if (data) - data->dr7 = val; - - return ES_OK; -} - -static enum es_result vc_handle_dr7_read(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - struct sev_es_runtime_data *data = this_cpu_read(runtime_data); - long *reg = vc_insn_get_rm(ctxt); - - if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) - return ES_VMM_ERROR; - - if (!reg) - return ES_DECODE_FAILED; - - if (data) - *reg = data->dr7; - else - *reg = DR7_RESET_VALUE; - - return ES_OK; -} - -static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); -} - -static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) -{ - enum es_result ret; - - ghcb_set_rcx(ghcb, ctxt->regs->cx); - - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); - if (ret != ES_OK) - return ret; - - if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb))) - return ES_VMM_ERROR; - - ctxt->regs->ax = ghcb->save.rax; - ctxt->regs->dx = ghcb->save.rdx; - - return ES_OK; -} - -static enum es_result vc_handle_monitor(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - /* - * Treat it as a NOP and do not leak a physical address to the - * hypervisor. - */ - return ES_OK; -} - -static enum es_result vc_handle_mwait(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - /* Treat the same as MONITOR/MONITORX */ - return ES_OK; -} - -static enum es_result vc_handle_vmmcall(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - enum es_result ret; - - ghcb_set_rax(ghcb, ctxt->regs->ax); - ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); - - if (x86_platform.hyper.sev_es_hcall_prepare) - x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); - - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); - if (ret != ES_OK) - return ret; - - if (!ghcb_rax_is_valid(ghcb)) - return ES_VMM_ERROR; - - ctxt->regs->ax = ghcb->save.rax; - - /* - * Call sev_es_hcall_finish() after regs->ax is already set. - * This allows the hypervisor handler to overwrite it again if - * necessary. - */ - if (x86_platform.hyper.sev_es_hcall_finish && - !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs)) - return ES_VMM_ERROR; - - return ES_OK; -} - -static enum es_result vc_handle_trap_ac(struct ghcb *ghcb, - struct es_em_ctxt *ctxt) -{ - /* - * Calling ecx_alignment_check() directly does not work, because it - * enables IRQs and the GHCB is active. Forward the exception and call - * it later from vc_forward_exception(). - */ - ctxt->fi.vector = X86_TRAP_AC; - ctxt->fi.error_code = 0; - return ES_EXCEPTION; -} - -static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, - struct ghcb *ghcb, - unsigned long exit_code) -{ - enum es_result result = vc_check_opcode_bytes(ctxt, exit_code); - - if (result != ES_OK) - return result; - - switch (exit_code) { - case SVM_EXIT_READ_DR7: - result = vc_handle_dr7_read(ghcb, ctxt); - break; - case SVM_EXIT_WRITE_DR7: - result = vc_handle_dr7_write(ghcb, ctxt); - break; - case SVM_EXIT_EXCP_BASE + X86_TRAP_AC: - result = vc_handle_trap_ac(ghcb, ctxt); - break; - case SVM_EXIT_RDTSC: - case SVM_EXIT_RDTSCP: - result = vc_handle_rdtsc(ghcb, ctxt, exit_code); - break; - case SVM_EXIT_RDPMC: - result = vc_handle_rdpmc(ghcb, ctxt); - break; - case SVM_EXIT_INVD: - pr_err_ratelimited("#VC exception for INVD??? Seriously???\n"); - result = ES_UNSUPPORTED; - break; - case SVM_EXIT_CPUID: - result = vc_handle_cpuid(ghcb, ctxt); - break; - case SVM_EXIT_IOIO: - result = vc_handle_ioio(ghcb, ctxt); - break; - case SVM_EXIT_MSR: - result = vc_handle_msr(ghcb, ctxt); - break; - case SVM_EXIT_VMMCALL: - result = vc_handle_vmmcall(ghcb, ctxt); - break; - case SVM_EXIT_WBINVD: - result = vc_handle_wbinvd(ghcb, ctxt); - break; - case SVM_EXIT_MONITOR: - result = vc_handle_monitor(ghcb, ctxt); - break; - case SVM_EXIT_MWAIT: - result = vc_handle_mwait(ghcb, ctxt); - break; - case SVM_EXIT_NPF: - result = vc_handle_mmio(ghcb, ctxt); - break; - default: - /* - * Unexpected #VC exception - */ - result = ES_UNSUPPORTED; - } - - return result; -} - -static __always_inline bool is_vc2_stack(unsigned long sp) -{ - return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2)); -} - -static __always_inline bool vc_from_invalid_context(struct pt_regs *regs) -{ - unsigned long sp, prev_sp; - - sp = (unsigned long)regs; - prev_sp = regs->sp; - - /* - * If the code was already executing on the VC2 stack when the #VC - * happened, let it proceed to the normal handling routine. This way the - * code executing on the VC2 stack can cause #VC exceptions to get handled. - */ - return is_vc2_stack(sp) && !is_vc2_stack(prev_sp); -} - -static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code) -{ - struct ghcb_state state; - struct es_em_ctxt ctxt; - enum es_result result; - struct ghcb *ghcb; - bool ret = true; - - ghcb = __sev_get_ghcb(&state); - - vc_ghcb_invalidate(ghcb); - result = vc_init_em_ctxt(&ctxt, regs, error_code); - - if (result == ES_OK) - result = vc_handle_exitcode(&ctxt, ghcb, error_code); - - __sev_put_ghcb(&state); - - /* Done - now check the result */ - switch (result) { - case ES_OK: - vc_finish_insn(&ctxt); - break; - case ES_UNSUPPORTED: - pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n", - error_code, regs->ip); - ret = false; - break; - case ES_VMM_ERROR: - pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", - error_code, regs->ip); - ret = false; - break; - case ES_DECODE_FAILED: - pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", - error_code, regs->ip); - ret = false; - break; - case ES_EXCEPTION: - vc_forward_exception(&ctxt); - break; - case ES_RETRY: - /* Nothing to do */ - break; - default: - pr_emerg("Unknown result in %s():%d\n", __func__, result); - /* - * Emulating the instruction which caused the #VC exception - * failed - can't continue so print debug information - */ - BUG(); - } - - return ret; -} - -static __always_inline bool vc_is_db(unsigned long error_code) -{ - return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB; -} - -/* - * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode - * and will panic when an error happens. - */ -DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication) -{ - irqentry_state_t irq_state; - - /* - * With the current implementation it is always possible to switch to a - * safe stack because #VC exceptions only happen at known places, like - * intercepted instructions or accesses to MMIO areas/IO ports. They can - * also happen with code instrumentation when the hypervisor intercepts - * #DB, but the critical paths are forbidden to be instrumented, so #DB - * exceptions currently also only happen in safe places. - * - * But keep this here in case the noinstr annotations are violated due - * to bug elsewhere. - */ - if (unlikely(vc_from_invalid_context(regs))) { - instrumentation_begin(); - panic("Can't handle #VC exception from unsupported context\n"); - instrumentation_end(); - } - - /* - * Handle #DB before calling into !noinstr code to avoid recursive #DB. - */ - if (vc_is_db(error_code)) { - exc_debug(regs); - return; - } - - irq_state = irqentry_nmi_enter(regs); - - instrumentation_begin(); - - if (!vc_raw_handle_exception(regs, error_code)) { - /* Show some debug info */ - show_regs(regs); - - /* Ask hypervisor to sev_es_terminate */ - sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); - - /* If that fails and we get here - just panic */ - panic("Returned from Terminate-Request to Hypervisor\n"); - } - - instrumentation_end(); - irqentry_nmi_exit(regs, irq_state); -} - -/* - * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode - * and will kill the current task with SIGBUS when an error happens. - */ -DEFINE_IDTENTRY_VC_USER(exc_vmm_communication) -{ - /* - * Handle #DB before calling into !noinstr code to avoid recursive #DB. - */ - if (vc_is_db(error_code)) { - noist_exc_debug(regs); - return; - } - - irqentry_enter_from_user_mode(regs); - instrumentation_begin(); - - if (!vc_raw_handle_exception(regs, error_code)) { - /* - * Do not kill the machine if user-space triggered the - * exception. Send SIGBUS instead and let user-space deal with - * it. - */ - force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0); - } - - instrumentation_end(); - irqentry_exit_to_user_mode(regs); -} - -bool __init handle_vc_boot_ghcb(struct pt_regs *regs) -{ - unsigned long exit_code = regs->orig_ax; - struct es_em_ctxt ctxt; - enum es_result result; - - vc_ghcb_invalidate(boot_ghcb); - - result = vc_init_em_ctxt(&ctxt, regs, exit_code); - if (result == ES_OK) - result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code); - - /* Done - now check the result */ - switch (result) { - case ES_OK: - vc_finish_insn(&ctxt); - break; - case ES_UNSUPPORTED: - early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", - exit_code, regs->ip); - goto fail; - case ES_VMM_ERROR: - early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", - exit_code, regs->ip); - goto fail; - case ES_DECODE_FAILED: - early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", - exit_code, regs->ip); - goto fail; - case ES_EXCEPTION: - vc_early_forward_exception(&ctxt); - break; - case ES_RETRY: - /* Nothing to do */ - break; - default: - BUG(); - } - - return true; - -fail: - show_regs(regs); - - sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); -} - -/* - * Initial set up of SNP relies on information provided by the - * Confidential Computing blob, which can be passed to the kernel - * in the following ways, depending on how it is booted: - * - * - when booted via the boot/decompress kernel: - * - via boot_params - * - * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH): - * - via a setup_data entry, as defined by the Linux Boot Protocol - * - * Scan for the blob in that order. - */ -static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp) -{ - struct cc_blob_sev_info *cc_info; - - /* Boot kernel would have passed the CC blob via boot_params. */ - if (bp->cc_blob_address) { - cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address; - goto found_cc_info; - } - - /* - * If kernel was booted directly, without the use of the - * boot/decompression kernel, the CC blob may have been passed via - * setup_data instead. - */ - cc_info = find_cc_blob_setup_data(bp); - if (!cc_info) - return NULL; - -found_cc_info: - if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC) - snp_abort(); - - return cc_info; -} - -static __head void svsm_setup(struct cc_blob_sev_info *cc_info) -{ - struct svsm_call call = {}; - int ret; - u64 pa; - - /* - * Record the SVSM Calling Area address (CAA) if the guest is not - * running at VMPL0. The CA will be used to communicate with the - * SVSM to perform the SVSM services. - */ - if (!svsm_setup_ca(cc_info)) - return; - - /* - * It is very early in the boot and the kernel is running identity - * mapped but without having adjusted the pagetables to where the - * kernel was loaded (physbase), so the get the CA address using - * RIP-relative addressing. - */ - pa = (u64)&RIP_REL_REF(boot_svsm_ca_page); - - /* - * Switch over to the boot SVSM CA while the current CA is still - * addressable. There is no GHCB at this point so use the MSR protocol. - * - * SVSM_CORE_REMAP_CA call: - * RAX = 0 (Protocol=0, CallID=0) - * RCX = New CA GPA - */ - call.caa = svsm_get_caa(); - call.rax = SVSM_CORE_CALL(SVSM_CORE_REMAP_CA); - call.rcx = pa; - ret = svsm_perform_call_protocol(&call); - if (ret) - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SVSM_CA_REMAP_FAIL); - - RIP_REL_REF(boot_svsm_caa) = (struct svsm_ca *)pa; - RIP_REL_REF(boot_svsm_caa_pa) = pa; -} - -bool __head snp_init(struct boot_params *bp) -{ - struct cc_blob_sev_info *cc_info; - - if (!bp) - return false; - - cc_info = find_cc_blob(bp); - if (!cc_info) - return false; - - if (cc_info->secrets_phys && cc_info->secrets_len == PAGE_SIZE) - secrets_pa = cc_info->secrets_phys; - else - return false; - - setup_cpuid_table(cc_info); - - svsm_setup(cc_info); - - /* - * The CC blob will be used later to access the secrets page. Cache - * it here like the boot kernel does. - */ - bp->cc_blob_address = (u32)(unsigned long)cc_info; - - return true; -} - -void __head __noreturn snp_abort(void) -{ - sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); -} - /* * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are * enabled, as the alternative (fallback) logic for DMI probing in the legacy @@ -2910,7 +1672,7 @@ struct snp_msg_desc *snp_msg_alloc(void) if (!mdesc) return ERR_PTR(-ENOMEM); - mem = ioremap_encrypted(secrets_pa, PAGE_SIZE); + mem = ioremap_encrypted(sev_secrets_pa, PAGE_SIZE); if (!mem) goto e_free_mdesc; @@ -3353,7 +2115,7 @@ void __init snp_secure_tsc_init(void) return; setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); - rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); + rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000); x86_platform.calibrate_cpu = securetsc_get_tsc_khz; diff --git a/arch/x86/coco/sev/sev-nmi.c b/arch/x86/coco/sev/sev-nmi.c new file mode 100644 index 000000000000..d8dfaddfb367 --- /dev/null +++ b/arch/x86/coco/sev/sev-nmi.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#define pr_fmt(fmt) "SEV: " fmt + +#include <linux/bug.h> +#include <linux/kernel.h> + +#include <asm/cpu_entry_area.h> +#include <asm/msr.h> +#include <asm/ptrace.h> +#include <asm/sev.h> +#include <asm/sev-internal.h> + +static __always_inline bool on_vc_stack(struct pt_regs *regs) +{ + unsigned long sp = regs->sp; + + /* User-mode RSP is not trusted */ + if (user_mode(regs)) + return false; + + /* SYSCALL gap still has user-mode RSP */ + if (ip_within_syscall_gap(regs)) + return false; + + return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC))); +} + +/* + * This function handles the case when an NMI is raised in the #VC + * exception handler entry code, before the #VC handler has switched off + * its IST stack. In this case, the IST entry for #VC must be adjusted, + * so that any nested #VC exception will not overwrite the stack + * contents of the interrupted #VC handler. + * + * The IST entry is adjusted unconditionally so that it can be also be + * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a + * nested sev_es_ist_exit() call may adjust back the IST entry too + * early. + * + * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run + * on the NMI IST stack, as they are only called from NMI handling code + * right now. + */ +void noinstr __sev_es_ist_enter(struct pt_regs *regs) +{ + unsigned long old_ist, new_ist; + + /* Read old IST entry */ + new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + /* + * If NMI happened while on the #VC IST stack, set the new IST + * value below regs->sp, so that the interrupted stack frame is + * not overwritten by subsequent #VC exceptions. + */ + if (on_vc_stack(regs)) + new_ist = regs->sp; + + /* + * Reserve additional 8 bytes and store old IST value so this + * adjustment can be unrolled in __sev_es_ist_exit(). + */ + new_ist -= sizeof(old_ist); + *(unsigned long *)new_ist = old_ist; + + /* Set new IST entry */ + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist); +} + +void noinstr __sev_es_ist_exit(void) +{ + unsigned long ist; + + /* Read IST entry */ + ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]); + + if (WARN_ON(ist == __this_cpu_ist_top_va(VC))) + return; + + /* Read back old IST entry and write it to the TSS */ + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist); +} + +void noinstr __sev_es_nmi_complete(void) +{ + struct ghcb_state state; + struct ghcb *ghcb; + + ghcb = __sev_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); + ghcb_set_sw_exit_info_1(ghcb, 0); + ghcb_set_sw_exit_info_2(ghcb, 0); + + sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); + VMGEXIT(); + + __sev_put_ghcb(&state); +} diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c new file mode 100644 index 000000000000..0989d98da130 --- /dev/null +++ b/arch/x86/coco/sev/vc-handle.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2019 SUSE + * + * Author: Joerg Roedel <jroedel@suse.de> + */ + +#define pr_fmt(fmt) "SEV: " fmt + +#include <linux/sched/debug.h> /* For show_regs() */ +#include <linux/cc_platform.h> +#include <linux/printk.h> +#include <linux/mm_types.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/psp-sev.h> +#include <uapi/linux/sev-guest.h> + +#include <asm/init.h> +#include <asm/stacktrace.h> +#include <asm/sev.h> +#include <asm/sev-internal.h> +#include <asm/insn-eval.h> +#include <asm/fpu/xcr.h> +#include <asm/processor.h> +#include <asm/setup.h> +#include <asm/traps.h> +#include <asm/svm.h> +#include <asm/smp.h> +#include <asm/cpu.h> +#include <asm/apic.h> +#include <asm/cpuid/api.h> + +static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned long vaddr, phys_addr_t *paddr) +{ + unsigned long va = (unsigned long)vaddr; + unsigned int level; + phys_addr_t pa; + pgd_t *pgd; + pte_t *pte; + + pgd = __va(read_cr3_pa()); + pgd = &pgd[pgd_index(va)]; + pte = lookup_address_in_pgd(pgd, va, &level); + if (!pte) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.cr2 = vaddr; + ctxt->fi.error_code = 0; + + if (user_mode(ctxt->regs)) + ctxt->fi.error_code |= X86_PF_USER; + + return ES_EXCEPTION; + } + + if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC)) + /* Emulated MMIO to/from encrypted memory not supported */ + return ES_UNSUPPORTED; + + pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; + pa |= va & ~page_level_mask(level); + + *paddr = pa; + + return ES_OK; +} + +static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) +{ + BUG_ON(size > 4); + + if (user_mode(ctxt->regs)) { + struct thread_struct *t = ¤t->thread; + struct io_bitmap *iobm = t->io_bitmap; + size_t idx; + + if (!iobm) + goto fault; + + for (idx = port; idx < port + size; ++idx) { + if (test_bit(idx, iobm->bitmap)) + goto fault; + } + } + + return ES_OK; + +fault: + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + + return ES_EXCEPTION; +} + +void vc_forward_exception(struct es_em_ctxt *ctxt) +{ + long error_code = ctxt->fi.error_code; + int trapnr = ctxt->fi.vector; + + ctxt->regs->orig_ax = ctxt->fi.error_code; + + switch (trapnr) { + case X86_TRAP_GP: + exc_general_protection(ctxt->regs, error_code); + break; + case X86_TRAP_UD: + exc_invalid_op(ctxt->regs); + break; + case X86_TRAP_PF: + write_cr2(ctxt->fi.cr2); + exc_page_fault(ctxt->regs, error_code); + break; + case X86_TRAP_AC: + exc_alignment_check(ctxt->regs, error_code); + break; + default: + pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n"); + BUG(); + } +} + +static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, + unsigned char *buffer) +{ + return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); +} + +static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + int insn_bytes; + + insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); + if (insn_bytes == 0) { + /* Nothing could be copied */ + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; + } else if (insn_bytes == -EINVAL) { + /* Effective RIP could not be calculated */ + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + ctxt->fi.cr2 = 0; + return ES_EXCEPTION; + } + + if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes)) + return ES_DECODE_FAILED; + + if (ctxt->insn.immediate.got) + return ES_OK; + else + return ES_DECODE_FAILED; +} + +static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt) +{ + char buffer[MAX_INSN_SIZE]; + int res, ret; + + res = vc_fetch_insn_kernel(ctxt, buffer); + if (res) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_INSTR; + ctxt->fi.cr2 = ctxt->regs->ip; + return ES_EXCEPTION; + } + + ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); + if (ret < 0) + return ES_DECODE_FAILED; + else + return ES_OK; +} + +static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) +{ + if (user_mode(ctxt->regs)) + return __vc_decode_user_insn(ctxt); + else + return __vc_decode_kern_insn(ctxt); +} + +static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, + char *dst, char *buf, size_t size) +{ + unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; + + /* + * This function uses __put_user() independent of whether kernel or user + * memory is accessed. This works fine because __put_user() does no + * sanity checks of the pointer being accessed. All that it does is + * to report when the access failed. + * + * Also, this function runs in atomic context, so __put_user() is not + * allowed to sleep. The page-fault handler detects that it is running + * in atomic context and will not try to take mmap_sem and handle the + * fault, so additional pagefault_enable()/disable() calls are not + * needed. + * + * The access can't be done via copy_to_user() here because + * vc_write_mem() must not use string instructions to access unsafe + * memory. The reason is that MOVS is emulated by the #VC handler by + * splitting the move up into a read and a write and taking a nested #VC + * exception on whatever of them is the MMIO access. Using string + * instructions here would cause infinite nesting. + */ + switch (size) { + case 1: { + u8 d1; + u8 __user *target = (u8 __user *)dst; + + memcpy(&d1, buf, 1); + if (__put_user(d1, target)) + goto fault; + break; + } + case 2: { + u16 d2; + u16 __user *target = (u16 __user *)dst; + + memcpy(&d2, buf, 2); + if (__put_user(d2, target)) + goto fault; + break; + } + case 4: { + u32 d4; + u32 __user *target = (u32 __user *)dst; + + memcpy(&d4, buf, 4); + if (__put_user(d4, target)) + goto fault; + break; + } + case 8: { + u64 d8; + u64 __user *target = (u64 __user *)dst; + + memcpy(&d8, buf, 8); + if (__put_user(d8, target)) + goto fault; + break; + } + default: + WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); + return ES_UNSUPPORTED; + } + + return ES_OK; + +fault: + if (user_mode(ctxt->regs)) + error_code |= X86_PF_USER; + + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = error_code; + ctxt->fi.cr2 = (unsigned long)dst; + + return ES_EXCEPTION; +} + +static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, + char *src, char *buf, size_t size) +{ + unsigned long error_code = X86_PF_PROT; + + /* + * This function uses __get_user() independent of whether kernel or user + * memory is accessed. This works fine because __get_user() does no + * sanity checks of the pointer being accessed. All that it does is + * to report when the access failed. + * + * Also, this function runs in atomic context, so __get_user() is not + * allowed to sleep. The page-fault handler detects that it is running + * in atomic context and will not try to take mmap_sem and handle the + * fault, so additional pagefault_enable()/disable() calls are not + * needed. + * + * The access can't be done via copy_from_user() here because + * vc_read_mem() must not use string instructions to access unsafe + * memory. The reason is that MOVS is emulated by the #VC handler by + * splitting the move up into a read and a write and taking a nested #VC + * exception on whatever of them is the MMIO access. Using string + * instructions here would cause infinite nesting. + */ + switch (size) { + case 1: { + u8 d1; + u8 __user *s = (u8 __user *)src; + + if (__get_user(d1, s)) + goto fault; + memcpy(buf, &d1, 1); + break; + } + case 2: { + u16 d2; + u16 __user *s = (u16 __user *)src; + + if (__get_user(d2, s)) + goto fault; + memcpy(buf, &d2, 2); + break; + } + case 4: { + u32 d4; + u32 __user *s = (u32 __user *)src; + + if (__get_user(d4, s)) + goto fault; + memcpy(buf, &d4, 4); + break; + } + case 8: { + u64 d8; + u64 __user *s = (u64 __user *)src; + if (__get_user(d8, s)) + goto fault; + memcpy(buf, &d8, 8); + break; + } + default: + WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); + return ES_UNSUPPORTED; + } + + return ES_OK; + +fault: + if (user_mode(ctxt->regs)) + error_code |= X86_PF_USER; + + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = error_code; + ctxt->fi.cr2 = (unsigned long)src; + + return ES_EXCEPTION; +} + +#define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__) + +#include "vc-shared.c" + +/* Writes to the SVSM CAA MSR are ignored */ +static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write) +{ + if (write) + return ES_OK; + + regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa)); + regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa)); + + return ES_OK; +} + +/* + * TSC related accesses should not exit to the hypervisor when a guest is + * executing with Secure TSC enabled, so special handling is required for + * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ. + */ +static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write) +{ + u64 tsc; + + /* + * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled. + * Terminate the SNP guest when the interception is enabled. + */ + if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ) + return ES_VMM_ERROR; + + /* + * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC + * to return undefined values, so ignore all writes. + * + * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use + * the value returned by rdtsc_ordered(). + */ + if (write) { + WARN_ONCE(1, "TSC MSR writes are verboten!\n"); + return ES_OK; + } + + tsc = rdtsc_ordered(); + regs->ax = lower_32_bits(tsc); + regs->dx = upper_32_bits(tsc); + + return ES_OK; +} + +static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + enum es_result ret; + bool write; + + /* Is it a WRMSR? */ + write = ctxt->insn.opcode.bytes[1] == 0x30; + + switch (regs->cx) { + case MSR_SVSM_CAA: + return __vc_handle_msr_caa(regs, write); + case MSR_IA32_TSC: + case MSR_AMD64_GUEST_TSC_FREQ: + if (sev_status & MSR_AMD64_SNP_SECURE_TSC) + return __vc_handle_secure_tsc_msrs(regs, write); + break; + default: + break; + } + + ghcb_set_rcx(ghcb, regs->cx); + if (write) { + ghcb_set_rax(ghcb, regs->ax); + ghcb_set_rdx(ghcb, regs->dx); + } + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0); + + if ((ret == ES_OK) && !write) { + regs->ax = ghcb->save.rax; + regs->dx = ghcb->save.rdx; + } + + return ret; +} + +static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt) +{ + int trapnr = ctxt->fi.vector; + + if (trapnr == X86_TRAP_PF) + native_write_cr2(ctxt->fi.cr2); + + ctxt->regs->orig_ax = ctxt->fi.error_code; + do_early_exception(ctxt->regs, trapnr); +} + +static long *vc_insn_get_rm(struct es_em_ctxt *ctxt) +{ + long *reg_array; + int offset; + + reg_array = (long *)ctxt->regs; + offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); + + if (offset < 0) + return NULL; + + offset /= sizeof(long); + + return reg_array + offset; +} +static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned int bytes, bool read) +{ + u64 exit_code, exit_info_1, exit_info_2; + unsigned long ghcb_pa = __pa(ghcb); + enum es_result res; + phys_addr_t paddr; + void __user *ref; + + ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); + if (ref == (void __user *)-1L) + return ES_UNSUPPORTED; + + exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; + + res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr); + if (res != ES_OK) { + if (res == ES_EXCEPTION && !read) + ctxt->fi.error_code |= X86_PF_WRITE; + + return res; + } + + exit_info_1 = paddr; + /* Can never be greater than 8 */ + exit_info_2 = bytes; + + ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); + + return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); +} + +/* + * The MOVS instruction has two memory operands, which raises the + * problem that it is not known whether the access to the source or the + * destination caused the #VC exception (and hence whether an MMIO read + * or write operation needs to be emulated). + * + * Instead of playing games with walking page-tables and trying to guess + * whether the source or destination is an MMIO range, split the move + * into two operations, a read and a write with only one memory operand. + * This will cause a nested #VC exception on the MMIO address which can + * then be handled. + * + * This implementation has the benefit that it also supports MOVS where + * source _and_ destination are MMIO regions. + * + * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a + * rare operation. If it turns out to be a performance problem the split + * operations can be moved to memcpy_fromio() and memcpy_toio(). + */ +static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt, + unsigned int bytes) +{ + unsigned long ds_base, es_base; + unsigned char *src, *dst; + unsigned char buffer[8]; + enum es_result ret; + bool rep; + int off; + + ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS); + es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); + + if (ds_base == -1L || es_base == -1L) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } + + src = ds_base + (unsigned char *)ctxt->regs->si; + dst = es_base + (unsigned char *)ctxt->regs->di; + + ret = vc_read_mem(ctxt, src, buffer, bytes); + if (ret != ES_OK) + return ret; + + ret = vc_write_mem(ctxt, dst, buffer, bytes); + if (ret != ES_OK) + return ret; + + if (ctxt->regs->flags & X86_EFLAGS_DF) + off = -bytes; + else + off = bytes; + + ctxt->regs->si += off; + ctxt->regs->di += off; + + rep = insn_has_rep_prefix(&ctxt->insn); + if (rep) + ctxt->regs->cx -= 1; + + if (!rep || ctxt->regs->cx == 0) + return ES_OK; + else + return ES_RETRY; +} + +static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct insn *insn = &ctxt->insn; + enum insn_mmio_type mmio; + unsigned int bytes = 0; + enum es_result ret; + u8 sign_byte; + long *reg_data; + + mmio = insn_decode_mmio(insn, &bytes); + if (mmio == INSN_MMIO_DECODE_FAILED) + return ES_DECODE_FAILED; + + if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) { + reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs); + if (!reg_data) + return ES_DECODE_FAILED; + } + + if (user_mode(ctxt->regs)) + return ES_UNSUPPORTED; + + switch (mmio) { + case INSN_MMIO_WRITE: + memcpy(ghcb->shared_buffer, reg_data, bytes); + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + case INSN_MMIO_WRITE_IMM: + memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + case INSN_MMIO_READ: + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Zero-extend for 32-bit operation */ + if (bytes == 4) + *reg_data = 0; + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + case INSN_MMIO_READ_ZERO_EXTEND: + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Zero extend based on operand size */ + memset(reg_data, 0, insn->opnd_bytes); + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + case INSN_MMIO_READ_SIGN_EXTEND: + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + if (bytes == 1) { + u8 *val = (u8 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x80) ? 0xff : 0x00; + } else { + u16 *val = (u16 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x8000) ? 0xff : 0x00; + } + + /* Sign extend based on operand size */ + memset(reg_data, sign_byte, insn->opnd_bytes); + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + case INSN_MMIO_MOVS: + ret = vc_handle_mmio_movs(ctxt, bytes); + break; + default: + ret = ES_UNSUPPORTED; + break; + } + + return ret; +} + +static enum es_result vc_handle_dr7_write(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct sev_es_runtime_data *data = this_cpu_read(runtime_data); + long val, *reg = vc_insn_get_rm(ctxt); + enum es_result ret; + + if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) + return ES_VMM_ERROR; + + if (!reg) + return ES_DECODE_FAILED; + + val = *reg; + + /* Upper 32 bits must be written as zeroes */ + if (val >> 32) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } + + /* Clear out other reserved bits and set bit 10 */ + val = (val & 0xffff23ffL) | BIT(10); + + /* Early non-zero writes to DR7 are not supported */ + if (!data && (val & ~DR7_RESET_VALUE)) + return ES_UNSUPPORTED; + + /* Using a value of 0 for ExitInfo1 means RAX holds the value */ + ghcb_set_rax(ghcb, val); + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); + if (ret != ES_OK) + return ret; + + if (data) + data->dr7 = val; + + return ES_OK; +} + +static enum es_result vc_handle_dr7_read(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct sev_es_runtime_data *data = this_cpu_read(runtime_data); + long *reg = vc_insn_get_rm(ctxt); + + if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP) + return ES_VMM_ERROR; + + if (!reg) + return ES_DECODE_FAILED; + + if (data) + *reg = data->dr7; + else + *reg = DR7_RESET_VALUE; + + return ES_OK; +} + +static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); +} + +static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + enum es_result ret; + + ghcb_set_rcx(ghcb, ctxt->regs->cx); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + ctxt->regs->dx = ghcb->save.rdx; + + return ES_OK; +} + +static enum es_result vc_handle_monitor(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* + * Treat it as a NOP and do not leak a physical address to the + * hypervisor. + */ + return ES_OK; +} + +static enum es_result vc_handle_mwait(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* Treat the same as MONITOR/MONITORX */ + return ES_OK; +} + +static enum es_result vc_handle_vmmcall(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + enum es_result ret; + + ghcb_set_rax(ghcb, ctxt->regs->ax); + ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0); + + if (x86_platform.hyper.sev_es_hcall_prepare) + x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); + if (ret != ES_OK) + return ret; + + if (!ghcb_rax_is_valid(ghcb)) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + + /* + * Call sev_es_hcall_finish() after regs->ax is already set. + * This allows the hypervisor handler to overwrite it again if + * necessary. + */ + if (x86_platform.hyper.sev_es_hcall_finish && + !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs)) + return ES_VMM_ERROR; + + return ES_OK; +} + +static enum es_result vc_handle_trap_ac(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + /* + * Calling ecx_alignment_check() directly does not work, because it + * enables IRQs and the GHCB is active. Forward the exception and call + * it later from vc_forward_exception(). + */ + ctxt->fi.vector = X86_TRAP_AC; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; +} + +static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, + struct ghcb *ghcb, + unsigned long exit_code) +{ + enum es_result result = vc_check_opcode_bytes(ctxt, exit_code); + + if (result != ES_OK) + return result; + + switch (exit_code) { + case SVM_EXIT_READ_DR7: + result = vc_handle_dr7_read(ghcb, ctxt); + break; + case SVM_EXIT_WRITE_DR7: + result = vc_handle_dr7_write(ghcb, ctxt); + break; + case SVM_EXIT_EXCP_BASE + X86_TRAP_AC: + result = vc_handle_trap_ac(ghcb, ctxt); + break; + case SVM_EXIT_RDTSC: + case SVM_EXIT_RDTSCP: + result = vc_handle_rdtsc(ghcb, ctxt, exit_code); + break; + case SVM_EXIT_RDPMC: + result = vc_handle_rdpmc(ghcb, ctxt); + break; + case SVM_EXIT_INVD: + pr_err_ratelimited("#VC exception for INVD??? Seriously???\n"); + result = ES_UNSUPPORTED; + break; + case SVM_EXIT_CPUID: + result = vc_handle_cpuid(ghcb, ctxt); + break; + case SVM_EXIT_IOIO: + result = vc_handle_ioio(ghcb, ctxt); + break; + case SVM_EXIT_MSR: + result = vc_handle_msr(ghcb, ctxt); + break; + case SVM_EXIT_VMMCALL: + result = vc_handle_vmmcall(ghcb, ctxt); + break; + case SVM_EXIT_WBINVD: + result = vc_handle_wbinvd(ghcb, ctxt); + break; + case SVM_EXIT_MONITOR: + result = vc_handle_monitor(ghcb, ctxt); + break; + case SVM_EXIT_MWAIT: + result = vc_handle_mwait(ghcb, ctxt); + break; + case SVM_EXIT_NPF: + result = vc_handle_mmio(ghcb, ctxt); + break; + default: + /* + * Unexpected #VC exception + */ + result = ES_UNSUPPORTED; + } + + return result; +} + +static __always_inline bool is_vc2_stack(unsigned long sp) +{ + return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2)); +} + +static __always_inline bool vc_from_invalid_context(struct pt_regs *regs) +{ + unsigned long sp, prev_sp; + + sp = (unsigned long)regs; + prev_sp = regs->sp; + + /* + * If the code was already executing on the VC2 stack when the #VC + * happened, let it proceed to the normal handling routine. This way the + * code executing on the VC2 stack can cause #VC exceptions to get handled. + */ + return is_vc2_stack(sp) && !is_vc2_stack(prev_sp); +} + +static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code) +{ + struct ghcb_state state; + struct es_em_ctxt ctxt; + enum es_result result; + struct ghcb *ghcb; + bool ret = true; + + ghcb = __sev_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + result = vc_init_em_ctxt(&ctxt, regs, error_code); + + if (result == ES_OK) + result = vc_handle_exitcode(&ctxt, ghcb, error_code); + + __sev_put_ghcb(&state); + + /* Done - now check the result */ + switch (result) { + case ES_OK: + vc_finish_insn(&ctxt); + break; + case ES_UNSUPPORTED: + pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n", + error_code, regs->ip); + ret = false; + break; + case ES_VMM_ERROR: + pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", + error_code, regs->ip); + ret = false; + break; + case ES_DECODE_FAILED: + pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", + error_code, regs->ip); + ret = false; + break; + case ES_EXCEPTION: + vc_forward_exception(&ctxt); + break; + case ES_RETRY: + /* Nothing to do */ + break; + default: + pr_emerg("Unknown result in %s():%d\n", __func__, result); + /* + * Emulating the instruction which caused the #VC exception + * failed - can't continue so print debug information + */ + BUG(); + } + + return ret; +} + +static __always_inline bool vc_is_db(unsigned long error_code) +{ + return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB; +} + +/* + * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode + * and will panic when an error happens. + */ +DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication) +{ + irqentry_state_t irq_state; + + /* + * With the current implementation it is always possible to switch to a + * safe stack because #VC exceptions only happen at known places, like + * intercepted instructions or accesses to MMIO areas/IO ports. They can + * also happen with code instrumentation when the hypervisor intercepts + * #DB, but the critical paths are forbidden to be instrumented, so #DB + * exceptions currently also only happen in safe places. + * + * But keep this here in case the noinstr annotations are violated due + * to bug elsewhere. + */ + if (unlikely(vc_from_invalid_context(regs))) { + instrumentation_begin(); + panic("Can't handle #VC exception from unsupported context\n"); + instrumentation_end(); + } + + /* + * Handle #DB before calling into !noinstr code to avoid recursive #DB. + */ + if (vc_is_db(error_code)) { + exc_debug(regs); + return; + } + + irq_state = irqentry_nmi_enter(regs); + + instrumentation_begin(); + + if (!vc_raw_handle_exception(regs, error_code)) { + /* Show some debug info */ + show_regs(regs); + + /* Ask hypervisor to sev_es_terminate */ + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); + + /* If that fails and we get here - just panic */ + panic("Returned from Terminate-Request to Hypervisor\n"); + } + + instrumentation_end(); + irqentry_nmi_exit(regs, irq_state); +} + +/* + * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode + * and will kill the current task with SIGBUS when an error happens. + */ +DEFINE_IDTENTRY_VC_USER(exc_vmm_communication) +{ + /* + * Handle #DB before calling into !noinstr code to avoid recursive #DB. + */ + if (vc_is_db(error_code)) { + noist_exc_debug(regs); + return; + } + + irqentry_enter_from_user_mode(regs); + instrumentation_begin(); + + if (!vc_raw_handle_exception(regs, error_code)) { + /* + * Do not kill the machine if user-space triggered the + * exception. Send SIGBUS instead and let user-space deal with + * it. + */ + force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0); + } + + instrumentation_end(); + irqentry_exit_to_user_mode(regs); +} + +bool __init handle_vc_boot_ghcb(struct pt_regs *regs) +{ + unsigned long exit_code = regs->orig_ax; + struct es_em_ctxt ctxt; + enum es_result result; + + vc_ghcb_invalidate(boot_ghcb); + + result = vc_init_em_ctxt(&ctxt, regs, exit_code); + if (result == ES_OK) + result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code); + + /* Done - now check the result */ + switch (result) { + case ES_OK: + vc_finish_insn(&ctxt); + break; + case ES_UNSUPPORTED: + early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_VMM_ERROR: + early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_DECODE_FAILED: + early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n", + exit_code, regs->ip); + goto fail; + case ES_EXCEPTION: + vc_early_forward_exception(&ctxt); + break; + case ES_RETRY: + /* Nothing to do */ + break; + default: + BUG(); + } + + return true; + +fail: + show_regs(regs); + + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); +} + diff --git a/arch/x86/coco/sev/vc-shared.c b/arch/x86/coco/sev/vc-shared.c new file mode 100644 index 000000000000..2c0ab0fdc060 --- /dev/null +++ b/arch/x86/coco/sev/vc-shared.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0 + +static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt, + unsigned long exit_code) +{ + unsigned int opcode = (unsigned int)ctxt->insn.opcode.value; + u8 modrm = ctxt->insn.modrm.value; + + switch (exit_code) { + + case SVM_EXIT_IOIO: + case SVM_EXIT_NPF: + /* handled separately */ + return ES_OK; + + case SVM_EXIT_CPUID: + if (opcode == 0xa20f) + return ES_OK; + break; + + case SVM_EXIT_INVD: + if (opcode == 0x080f) + return ES_OK; + break; + + case SVM_EXIT_MONITOR: + /* MONITOR and MONITORX instructions generate the same error code */ + if (opcode == 0x010f && (modrm == 0xc8 || modrm == 0xfa)) + return ES_OK; + break; + + case SVM_EXIT_MWAIT: + /* MWAIT and MWAITX instructions generate the same error code */ + if (opcode == 0x010f && (modrm == 0xc9 || modrm == 0xfb)) + return ES_OK; + break; + + case SVM_EXIT_MSR: + /* RDMSR */ + if (opcode == 0x320f || + /* WRMSR */ + opcode == 0x300f) + return ES_OK; + break; + + case SVM_EXIT_RDPMC: + if (opcode == 0x330f) + return ES_OK; + break; + + case SVM_EXIT_RDTSC: + if (opcode == 0x310f) + return ES_OK; + break; + + case SVM_EXIT_RDTSCP: + if (opcode == 0x010f && modrm == 0xf9) + return ES_OK; + break; + + case SVM_EXIT_READ_DR7: + if (opcode == 0x210f && + X86_MODRM_REG(ctxt->insn.modrm.value) == 7) + return ES_OK; + break; + + case SVM_EXIT_VMMCALL: + if (opcode == 0x010f && modrm == 0xd9) + return ES_OK; + + break; + + case SVM_EXIT_WRITE_DR7: + if (opcode == 0x230f && + X86_MODRM_REG(ctxt->insn.modrm.value) == 7) + return ES_OK; + break; + + case SVM_EXIT_WBINVD: + if (opcode == 0x90f) + return ES_OK; + break; + + default: + break; + } + + sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n", + opcode, exit_code, ctxt->regs->ip); + + return ES_UNSUPPORTED; +} + +static bool vc_decoding_needed(unsigned long exit_code) +{ + /* Exceptions don't require to decode the instruction */ + return !(exit_code >= SVM_EXIT_EXCP_BASE && + exit_code <= SVM_EXIT_LAST_EXCP); +} + +static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, + struct pt_regs *regs, + unsigned long exit_code) +{ + enum es_result ret = ES_OK; + + memset(ctxt, 0, sizeof(*ctxt)); + ctxt->regs = regs; + + if (vc_decoding_needed(exit_code)) + ret = vc_decode_insn(ctxt); + + return ret; +} + +static void vc_finish_insn(struct es_em_ctxt *ctxt) +{ + ctxt->regs->ip += ctxt->insn.length; +} + +static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt, + unsigned long address, + bool write) +{ + if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) { + ctxt->fi.vector = X86_TRAP_PF; + ctxt->fi.error_code = X86_PF_USER; + ctxt->fi.cr2 = address; + if (write) + ctxt->fi.error_code |= X86_PF_WRITE; + + return ES_EXCEPTION; + } + + return ES_OK; +} + +static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt, + void *src, char *buf, + unsigned int data_size, + unsigned int count, + bool backwards) +{ + int i, b = backwards ? -1 : 1; + unsigned long address = (unsigned long)src; + enum es_result ret; + + ret = vc_insn_string_check(ctxt, address, false); + if (ret != ES_OK) + return ret; + + for (i = 0; i < count; i++) { + void *s = src + (i * data_size * b); + char *d = buf + (i * data_size); + + ret = vc_read_mem(ctxt, s, d, data_size); + if (ret != ES_OK) + break; + } + + return ret; +} + +static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt, + void *dst, char *buf, + unsigned int data_size, + unsigned int count, + bool backwards) +{ + int i, s = backwards ? -1 : 1; + unsigned long address = (unsigned long)dst; + enum es_result ret; + + ret = vc_insn_string_check(ctxt, address, true); + if (ret != ES_OK) + return ret; + + for (i = 0; i < count; i++) { + void *d = dst + (i * data_size * s); + char *b = buf + (i * data_size); + + ret = vc_write_mem(ctxt, d, b, data_size); + if (ret != ES_OK) + break; + } + + return ret; +} + +#define IOIO_TYPE_STR BIT(2) +#define IOIO_TYPE_IN 1 +#define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR) +#define IOIO_TYPE_OUT 0 +#define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR) + +#define IOIO_REP BIT(3) + +#define IOIO_ADDR_64 BIT(9) +#define IOIO_ADDR_32 BIT(8) +#define IOIO_ADDR_16 BIT(7) + +#define IOIO_DATA_32 BIT(6) +#define IOIO_DATA_16 BIT(5) +#define IOIO_DATA_8 BIT(4) + +#define IOIO_SEG_ES (0 << 10) +#define IOIO_SEG_DS (3 << 10) + +static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo) +{ + struct insn *insn = &ctxt->insn; + size_t size; + u64 port; + + *exitinfo = 0; + + switch (insn->opcode.bytes[0]) { + /* INS opcodes */ + case 0x6c: + case 0x6d: + *exitinfo |= IOIO_TYPE_INS; + *exitinfo |= IOIO_SEG_ES; + port = ctxt->regs->dx & 0xffff; + break; + + /* OUTS opcodes */ + case 0x6e: + case 0x6f: + *exitinfo |= IOIO_TYPE_OUTS; + *exitinfo |= IOIO_SEG_DS; + port = ctxt->regs->dx & 0xffff; + break; + + /* IN immediate opcodes */ + case 0xe4: + case 0xe5: + *exitinfo |= IOIO_TYPE_IN; + port = (u8)insn->immediate.value & 0xffff; + break; + + /* OUT immediate opcodes */ + case 0xe6: + case 0xe7: + *exitinfo |= IOIO_TYPE_OUT; + port = (u8)insn->immediate.value & 0xffff; + break; + + /* IN register opcodes */ + case 0xec: + case 0xed: + *exitinfo |= IOIO_TYPE_IN; + port = ctxt->regs->dx & 0xffff; + break; + + /* OUT register opcodes */ + case 0xee: + case 0xef: + *exitinfo |= IOIO_TYPE_OUT; + port = ctxt->regs->dx & 0xffff; + break; + + default: + return ES_DECODE_FAILED; + } + + *exitinfo |= port << 16; + + switch (insn->opcode.bytes[0]) { + case 0x6c: + case 0x6e: + case 0xe4: + case 0xe6: + case 0xec: + case 0xee: + /* Single byte opcodes */ + *exitinfo |= IOIO_DATA_8; + size = 1; + break; + default: + /* Length determined by instruction parsing */ + *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 + : IOIO_DATA_32; + size = (insn->opnd_bytes == 2) ? 2 : 4; + } + + switch (insn->addr_bytes) { + case 2: + *exitinfo |= IOIO_ADDR_16; + break; + case 4: + *exitinfo |= IOIO_ADDR_32; + break; + case 8: + *exitinfo |= IOIO_ADDR_64; + break; + } + + if (insn_has_rep_prefix(insn)) + *exitinfo |= IOIO_REP; + + return vc_ioio_check(ctxt, (u16)port, size); +} + +static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + u64 exit_info_1, exit_info_2; + enum es_result ret; + + ret = vc_ioio_exitinfo(ctxt, &exit_info_1); + if (ret != ES_OK) + return ret; + + if (exit_info_1 & IOIO_TYPE_STR) { + + /* (REP) INS/OUTS */ + + bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); + unsigned int io_bytes, exit_bytes; + unsigned int ghcb_count, op_count; + unsigned long es_base; + u64 sw_scratch; + + /* + * For the string variants with rep prefix the amount of in/out + * operations per #VC exception is limited so that the kernel + * has a chance to take interrupts and re-schedule while the + * instruction is emulated. + */ + io_bytes = (exit_info_1 >> 4) & 0x7; + ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; + + op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; + exit_info_2 = min(op_count, ghcb_count); + exit_bytes = exit_info_2 * io_bytes; + + es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); + + /* Read bytes of OUTS into the shared buffer */ + if (!(exit_info_1 & IOIO_TYPE_IN)) { + ret = vc_insn_string_read(ctxt, + (void *)(es_base + regs->si), + ghcb->shared_buffer, io_bytes, + exit_info_2, df); + if (ret) + return ret; + } + + /* + * Issue an VMGEXIT to the HV to consume the bytes from the + * shared buffer or to have it write them into the shared buffer + * depending on the instruction: OUTS or INS. + */ + sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); + ghcb_set_sw_scratch(ghcb, sw_scratch); + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, + exit_info_1, exit_info_2); + if (ret != ES_OK) + return ret; + + /* Read bytes from shared buffer into the guest's destination. */ + if (exit_info_1 & IOIO_TYPE_IN) { + ret = vc_insn_string_write(ctxt, + (void *)(es_base + regs->di), + ghcb->shared_buffer, io_bytes, + exit_info_2, df); + if (ret) + return ret; + + if (df) + regs->di -= exit_bytes; + else + regs->di += exit_bytes; + } else { + if (df) + regs->si -= exit_bytes; + else + regs->si += exit_bytes; + } + + if (exit_info_1 & IOIO_REP) + regs->cx -= exit_info_2; + + ret = regs->cx ? ES_RETRY : ES_OK; + + } else { + + /* IN/OUT into/from rAX */ + + int bits = (exit_info_1 & 0x70) >> 1; + u64 rax = 0; + + if (!(exit_info_1 & IOIO_TYPE_IN)) + rax = lower_bits(regs->ax, bits); + + ghcb_set_rax(ghcb, rax); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); + if (ret != ES_OK) + return ret; + + if (exit_info_1 & IOIO_TYPE_IN) { + if (!ghcb_rax_is_valid(ghcb)) + return ES_VMM_ERROR; + regs->ax = lower_bits(ghcb->save.rax, bits); + } + } + + return ret; +} + +static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + struct cpuid_leaf leaf; + int ret; + + leaf.fn = regs->ax; + leaf.subfn = regs->cx; + ret = snp_cpuid(ghcb, ctxt, &leaf); + if (!ret) { + regs->ax = leaf.eax; + regs->bx = leaf.ebx; + regs->cx = leaf.ecx; + regs->dx = leaf.edx; + } + + return ret; +} + +static enum es_result vc_handle_cpuid(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct pt_regs *regs = ctxt->regs; + u32 cr4 = native_read_cr4(); + enum es_result ret; + int snp_cpuid_ret; + + snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt); + if (!snp_cpuid_ret) + return ES_OK; + if (snp_cpuid_ret != -EOPNOTSUPP) + return ES_VMM_ERROR; + + ghcb_set_rax(ghcb, regs->ax); + ghcb_set_rcx(ghcb, regs->cx); + + if (cr4 & X86_CR4_OSXSAVE) + /* Safe to read xcr0 */ + ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); + else + /* xgetbv will cause #GP - use reset value for xcr0 */ + ghcb_set_xcr0(ghcb, 1); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && + ghcb_rbx_is_valid(ghcb) && + ghcb_rcx_is_valid(ghcb) && + ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + regs->ax = ghcb->save.rax; + regs->bx = ghcb->save.rbx; + regs->cx = ghcb->save.rcx; + regs->dx = ghcb->save.rdx; + + return ES_OK; +} + +static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + unsigned long exit_code) +{ + bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); + enum es_result ret; + + /* + * The hypervisor should not be intercepting RDTSC/RDTSCP when Secure + * TSC is enabled. A #VC exception will be generated if the RDTSC/RDTSCP + * instructions are being intercepted. If this should occur and Secure + * TSC is enabled, guest execution should be terminated as the guest + * cannot rely on the TSC value provided by the hypervisor. + */ + if (sev_status & MSR_AMD64_SNP_SECURE_TSC) + return ES_VMM_ERROR; + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) && + (!rdtscp || ghcb_rcx_is_valid(ghcb)))) + return ES_VMM_ERROR; + + ctxt->regs->ax = ghcb->save.rax; + ctxt->regs->dx = ghcb->save.rdx; + if (rdtscp) + ctxt->regs->cx = ghcb->save.rcx; + + return ES_OK; +} diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 91801138b10b..7cd2f395f301 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -1,7 +1,6 @@ CONFIG_WERROR=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_USELIB=y CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 3d948f10c94c..56cfdc79e2c6 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -4,7 +4,7 @@ menu "Accelerated Cryptographic Algorithms for CPU (x86)" config CRYPTO_CURVE25519_X86 tristate - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_KPP select CRYPTO_LIB_CURVE25519_GENERIC select CRYPTO_ARCH_HAVE_LIB_CURVE25519 @@ -17,13 +17,11 @@ config CRYPTO_CURVE25519_X86 config CRYPTO_AES_NI_INTEL tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XCTR, XTS, GCM (AES-NI/VAES)" - depends on X86 select CRYPTO_AEAD select CRYPTO_LIB_AES select CRYPTO_LIB_GF128MUL select CRYPTO_ALGAPI select CRYPTO_SKCIPHER - select CRYPTO_SIMD help Block cipher: AES cipher algorithms AEAD cipher: AES with GCM @@ -38,7 +36,7 @@ config CRYPTO_AES_NI_INTEL config CRYPTO_BLOWFISH_X86_64 tristate "Ciphers: Blowfish, modes: ECB, CBC" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_BLOWFISH_COMMON imply CRYPTO_CTR @@ -50,7 +48,7 @@ config CRYPTO_BLOWFISH_X86_64 config CRYPTO_CAMELLIA_X86_64 tristate "Ciphers: Camellia with modes: ECB, CBC" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER imply CRYPTO_CTR help @@ -61,10 +59,9 @@ config CRYPTO_CAMELLIA_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_CAMELLIA_X86_64 - select CRYPTO_SIMD imply CRYPTO_XTS help Length-preserving ciphers: Camellia with ECB and CBC modes @@ -75,7 +72,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX2)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 help Length-preserving ciphers: Camellia with ECB and CBC modes @@ -86,11 +83,10 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 config CRYPTO_CAST5_AVX_X86_64 tristate "Ciphers: CAST5 with modes: ECB, CBC (AVX)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_CAST5 select CRYPTO_CAST_COMMON - select CRYPTO_SIMD imply CRYPTO_CTR help Length-preserving ciphers: CAST5 (CAST-128) cipher algorithm @@ -103,11 +99,10 @@ config CRYPTO_CAST5_AVX_X86_64 config CRYPTO_CAST6_AVX_X86_64 tristate "Ciphers: CAST6 with modes: ECB, CBC (AVX)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_CAST6 select CRYPTO_CAST_COMMON - select CRYPTO_SIMD imply CRYPTO_XTS imply CRYPTO_CTR help @@ -121,7 +116,7 @@ config CRYPTO_CAST6_AVX_X86_64 config CRYPTO_DES3_EDE_X86_64 tristate "Ciphers: Triple DES EDE with modes: ECB, CBC" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_LIB_DES imply CRYPTO_CTR @@ -135,10 +130,9 @@ config CRYPTO_DES3_EDE_X86_64 config CRYPTO_SERPENT_SSE2_X86_64 tristate "Ciphers: Serpent with modes: ECB, CBC (SSE2)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_SERPENT - select CRYPTO_SIMD imply CRYPTO_CTR help Length-preserving ciphers: Serpent cipher algorithm @@ -151,10 +145,9 @@ config CRYPTO_SERPENT_SSE2_X86_64 config CRYPTO_SERPENT_SSE2_586 tristate "Ciphers: Serpent with modes: ECB, CBC (32-bit with SSE2)" - depends on X86 && !64BIT + depends on !64BIT select CRYPTO_SKCIPHER select CRYPTO_SERPENT - select CRYPTO_SIMD imply CRYPTO_CTR help Length-preserving ciphers: Serpent cipher algorithm @@ -167,10 +160,9 @@ config CRYPTO_SERPENT_SSE2_586 config CRYPTO_SERPENT_AVX_X86_64 tristate "Ciphers: Serpent with modes: ECB, CBC (AVX)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_SERPENT - select CRYPTO_SIMD imply CRYPTO_XTS imply CRYPTO_CTR help @@ -184,7 +176,7 @@ config CRYPTO_SERPENT_AVX_X86_64 config CRYPTO_SERPENT_AVX2_X86_64 tristate "Ciphers: Serpent with modes: ECB, CBC (AVX2)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SERPENT_AVX_X86_64 help Length-preserving ciphers: Serpent cipher algorithm @@ -197,9 +189,8 @@ config CRYPTO_SERPENT_AVX2_X86_64 config CRYPTO_SM4_AESNI_AVX_X86_64 tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER - select CRYPTO_SIMD select CRYPTO_ALGAPI select CRYPTO_SM4 help @@ -218,9 +209,8 @@ config CRYPTO_SM4_AESNI_AVX_X86_64 config CRYPTO_SM4_AESNI_AVX2_X86_64 tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX2)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER - select CRYPTO_SIMD select CRYPTO_ALGAPI select CRYPTO_SM4 select CRYPTO_SM4_AESNI_AVX_X86_64 @@ -240,7 +230,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 config CRYPTO_TWOFISH_586 tristate "Ciphers: Twofish (32-bit)" - depends on (X86 || UML_X86) && !64BIT + depends on !64BIT select CRYPTO_ALGAPI select CRYPTO_TWOFISH_COMMON imply CRYPTO_CTR @@ -251,7 +241,7 @@ config CRYPTO_TWOFISH_586 config CRYPTO_TWOFISH_X86_64 tristate "Ciphers: Twofish" - depends on (X86 || UML_X86) && 64BIT + depends on 64BIT select CRYPTO_ALGAPI select CRYPTO_TWOFISH_COMMON imply CRYPTO_CTR @@ -262,7 +252,7 @@ config CRYPTO_TWOFISH_X86_64 config CRYPTO_TWOFISH_X86_64_3WAY tristate "Ciphers: Twofish with modes: ECB, CBC (3-way parallel)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_X86_64 @@ -277,9 +267,8 @@ config CRYPTO_TWOFISH_X86_64_3WAY config CRYPTO_TWOFISH_AVX_X86_64 tristate "Ciphers: Twofish with modes: ECB, CBC (AVX)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER - select CRYPTO_SIMD select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_X86_64 select CRYPTO_TWOFISH_X86_64_3WAY @@ -295,9 +284,8 @@ config CRYPTO_TWOFISH_AVX_X86_64 config CRYPTO_ARIA_AESNI_AVX_X86_64 tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX/GFNI)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER - select CRYPTO_SIMD select CRYPTO_ALGAPI select CRYPTO_ARIA help @@ -313,9 +301,8 @@ config CRYPTO_ARIA_AESNI_AVX_X86_64 config CRYPTO_ARIA_AESNI_AVX2_X86_64 tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX2/GFNI)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SKCIPHER - select CRYPTO_SIMD select CRYPTO_ALGAPI select CRYPTO_ARIA select CRYPTO_ARIA_AESNI_AVX_X86_64 @@ -332,9 +319,8 @@ config CRYPTO_ARIA_AESNI_AVX2_X86_64 config CRYPTO_ARIA_GFNI_AVX512_X86_64 tristate "Ciphers: ARIA with modes: ECB, CTR (AVX512/GFNI)" - depends on X86 && 64BIT && AS_AVX512 && AS_GFNI + depends on 64BIT && AS_GFNI select CRYPTO_SKCIPHER - select CRYPTO_SIMD select CRYPTO_ALGAPI select CRYPTO_ARIA select CRYPTO_ARIA_AESNI_AVX_X86_64 @@ -349,27 +335,10 @@ config CRYPTO_ARIA_GFNI_AVX512_X86_64 Processes 64 blocks in parallel. -config CRYPTO_CHACHA20_X86_64 - tristate - depends on X86 && 64BIT - select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - default CRYPTO_LIB_CHACHA_INTERNAL - help - Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 - stream cipher algorithms - - Architecture: x86_64 using: - - SSSE3 (Supplemental SSE3) - - AVX2 (Advanced Vector Extensions 2) - - AVX-512VL (Advanced Vector Extensions-512VL) - config CRYPTO_AEGIS128_AESNI_SSE2 tristate "AEAD ciphers: AEGIS-128 (AES-NI/SSE4.1)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_AEAD - select CRYPTO_SIMD help AEGIS-128 AEAD algorithm @@ -379,7 +348,7 @@ config CRYPTO_AEGIS128_AESNI_SSE2 config CRYPTO_NHPOLY1305_SSE2 tristate "Hash functions: NHPoly1305 (SSE2)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_NHPOLY1305 help NHPoly1305 hash function for Adiantum @@ -389,7 +358,7 @@ config CRYPTO_NHPOLY1305_SSE2 config CRYPTO_NHPOLY1305_AVX2 tristate "Hash functions: NHPoly1305 (AVX2)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_NHPOLY1305 help NHPoly1305 hash function for Adiantum @@ -397,21 +366,9 @@ config CRYPTO_NHPOLY1305_AVX2 Architecture: x86_64 using: - AVX2 (Advanced Vector Extensions 2) -config CRYPTO_BLAKE2S_X86 - bool "Hash functions: BLAKE2s (SSSE3/AVX-512)" - depends on X86 && 64BIT - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S - help - BLAKE2s cryptographic hash function (RFC 7693) - - Architecture: x86_64 using: - - SSSE3 (Supplemental SSE3) - - AVX-512 (Advanced Vector Extensions-512) - config CRYPTO_POLYVAL_CLMUL_NI tristate "Hash functions: POLYVAL (CLMUL-NI)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_POLYVAL help POLYVAL hash function for HCTR2 @@ -419,23 +376,9 @@ config CRYPTO_POLYVAL_CLMUL_NI Architecture: x86_64 using: - CLMUL-NI (carry-less multiplication new instructions) -config CRYPTO_POLY1305_X86_64 - tristate - depends on X86 && 64BIT - select CRYPTO_HASH - select CRYPTO_LIB_POLY1305_GENERIC - select CRYPTO_ARCH_HAVE_LIB_POLY1305 - default CRYPTO_LIB_POLY1305_INTERNAL - help - Poly1305 authenticator algorithm (RFC7539) - - Architecture: x86_64 using: - - SSE2 (Streaming SIMD Extensions 2) - - AVX2 (Advanced Vector Extensions 2) - config CRYPTO_SHA1_SSSE3 tristate "Hash functions: SHA-1 (SSSE3/AVX/AVX2/SHA-NI)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SHA1 select CRYPTO_HASH help @@ -447,23 +390,9 @@ config CRYPTO_SHA1_SSSE3 - AVX2 (Advanced Vector Extensions 2) - SHA-NI (SHA Extensions New Instructions) -config CRYPTO_SHA256_SSSE3 - tristate "Hash functions: SHA-224 and SHA-256 (SSSE3/AVX/AVX2/SHA-NI)" - depends on X86 && 64BIT - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: x86_64 using: - - SSSE3 (Supplemental SSE3) - - AVX (Advanced Vector Extensions) - - AVX2 (Advanced Vector Extensions 2) - - SHA-NI (SHA Extensions New Instructions) - config CRYPTO_SHA512_SSSE3 tristate "Hash functions: SHA-384 and SHA-512 (SSSE3/AVX/AVX2)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_SHA512 select CRYPTO_HASH help @@ -476,9 +405,9 @@ config CRYPTO_SHA512_SSSE3 config CRYPTO_SM3_AVX_X86_64 tristate "Hash functions: SM3 (AVX)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_HASH - select CRYPTO_SM3 + select CRYPTO_LIB_SM3 help SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3 @@ -489,7 +418,7 @@ config CRYPTO_SM3_AVX_X86_64 config CRYPTO_GHASH_CLMUL_NI_INTEL tristate "Hash functions: GHASH (CLMUL-NI)" - depends on X86 && 64BIT + depends on 64BIT select CRYPTO_CRYPTD help GCM GHASH hash function (NIST SP800-38D) diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 5d19f41bde58..aa289a9e0153 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -42,10 +42,6 @@ cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o -obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o -chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha_glue.o -chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o - obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \ @@ -56,29 +52,17 @@ aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o endif obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o -sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o -sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o - -obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o -sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o -sha256-ssse3-$(CONFIG_AS_SHA256_NI) += sha256_ni_asm.o +sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ni_asm.o sha1_ssse3_glue.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o -obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += libblake2s-x86_64.o -libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o - obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o obj-$(CONFIG_CRYPTO_POLYVAL_CLMUL_NI) += polyval-clmulni.o polyval-clmulni-y := polyval-clmulni_asm.o polyval-clmulni_glue.o -obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o -poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o -targets += poly1305-x86_64-cryptogams.S - obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o @@ -104,10 +88,5 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o -quiet_cmd_perlasm = PERLASM $@ - cmd_perlasm = $(PERL) $< > $@ -$(obj)/%.S: $(src)/%.pl FORCE - $(call if_changed,perlasm) - # Disable GCOV in odd or sensitive code GCOV_PROFILE_curve25519-x86_64.o := n diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 26786e15abac..f1b6d40154e3 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -8,7 +8,6 @@ */ #include <crypto/internal/aead.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/module.h> @@ -233,21 +232,18 @@ static struct aead_alg crypto_aegis128_aesni_alg = { .chunksize = AEGIS128_BLOCK_SIZE, .base = { - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aegis_ctx) + __alignof__(struct aegis_ctx), .cra_priority = 400, - .cra_name = "__aegis128", - .cra_driver_name = "__aegis128-aesni", + .cra_name = "aegis128", + .cra_driver_name = "aegis128-aesni", .cra_module = THIS_MODULE, } }; -static struct simd_aead_alg *simd_alg; - static int __init crypto_aegis128_aesni_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM4_1) || @@ -255,13 +251,12 @@ static int __init crypto_aegis128_aesni_module_init(void) !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; - return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1, - &simd_alg); + return crypto_register_aead(&crypto_aegis128_aesni_alg); } static void __exit crypto_aegis128_aesni_module_exit(void) { - simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg); + crypto_unregister_aead(&crypto_aegis128_aesni_alg); } module_init(crypto_aegis128_aesni_module_init); diff --git a/arch/x86/crypto/aes-ctr-avx-x86_64.S b/arch/x86/crypto/aes-ctr-avx-x86_64.S index 1685d8b24b2c..bbbfd80f5a50 100644 --- a/arch/x86/crypto/aes-ctr-avx-x86_64.S +++ b/arch/x86/crypto/aes-ctr-avx-x86_64.S @@ -48,8 +48,7 @@ // using the following sets of CPU features: // - AES-NI && AVX // - VAES && AVX2 -// - VAES && (AVX10/256 || (AVX512BW && AVX512VL)) && BMI2 -// - VAES && (AVX10/512 || (AVX512BW && AVX512VL)) && BMI2 +// - VAES && AVX512BW && AVX512VL && BMI2 // // See the function definitions at the bottom of the file for more information. @@ -76,7 +75,6 @@ .text // Move a vector between memory and a register. -// The register operand must be in the first 16 vector registers. .macro _vmovdqu src, dst .if VL < 64 vmovdqu \src, \dst @@ -86,7 +84,6 @@ .endm // Move a vector between registers. -// The registers must be in the first 16 vector registers. .macro _vmovdqa src, dst .if VL < 64 vmovdqa \src, \dst @@ -96,7 +93,7 @@ .endm // Broadcast a 128-bit value from memory to all 128-bit lanes of a vector -// register. The register operand must be in the first 16 vector registers. +// register. .macro _vbroadcast128 src, dst .if VL == 16 vmovdqu \src, \dst @@ -108,7 +105,6 @@ .endm // XOR two vectors together. -// Any register operands must be in the first 16 vector registers. .macro _vpxor src1, src2, dst .if VL < 64 vpxor \src1, \src2, \dst @@ -199,8 +195,8 @@ // XOR each with the zero-th round key. Also update LE_CTR if !\final. .macro _prepare_2_ctr_vecs is_xctr, i0, i1, final=0 .if \is_xctr - .if USE_AVX10 - _vmovdqa LE_CTR, AESDATA\i0 + .if USE_AVX512 + vmovdqa64 LE_CTR, AESDATA\i0 vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i0 .else vpxor XCTR_IV, LE_CTR, AESDATA\i0 @@ -208,7 +204,7 @@ .endif vpaddq LE_CTR_INC1, LE_CTR, AESDATA\i1 - .if USE_AVX10 + .if USE_AVX512 vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i1 .else vpxor XCTR_IV, AESDATA\i1, AESDATA\i1 @@ -481,18 +477,12 @@ .Lxor_tail_partial_vec_0\@: // XOR the remaining 1 <= LEN < VL bytes. It's easy if masked // loads/stores are available; otherwise it's a bit harder... -.if USE_AVX10 - .if VL <= 32 - mov $-1, %eax - bzhi LEN, %eax, %eax - kmovd %eax, %k1 - .else +.if USE_AVX512 mov $-1, %rax bzhi LEN64, %rax, %rax kmovq %rax, %k1 - .endif vmovdqu8 (SRC), AESDATA1{%k1}{z} - _vpxor AESDATA1, AESDATA0, AESDATA0 + vpxord AESDATA1, AESDATA0, AESDATA0 vmovdqu8 AESDATA0, (DST){%k1} .else .if VL == 32 @@ -554,7 +544,7 @@ // eliminates carries. |ctr| is the per-message block counter starting at 1. .set VL, 16 -.set USE_AVX10, 0 +.set USE_AVX512, 0 SYM_TYPED_FUNC_START(aes_ctr64_crypt_aesni_avx) _aes_ctr_crypt 0 SYM_FUNC_END(aes_ctr64_crypt_aesni_avx) @@ -564,7 +554,7 @@ SYM_FUNC_END(aes_xctr_crypt_aesni_avx) #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) .set VL, 32 -.set USE_AVX10, 0 +.set USE_AVX512, 0 SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2) _aes_ctr_crypt 0 SYM_FUNC_END(aes_ctr64_crypt_vaes_avx2) @@ -572,21 +562,12 @@ SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx2) _aes_ctr_crypt 1 SYM_FUNC_END(aes_xctr_crypt_vaes_avx2) -.set VL, 32 -.set USE_AVX10, 1 -SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_256) - _aes_ctr_crypt 0 -SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_256) -SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_256) - _aes_ctr_crypt 1 -SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_256) - .set VL, 64 -.set USE_AVX10, 1 -SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_512) +.set USE_AVX512, 1 +SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx512) _aes_ctr_crypt 0 -SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_512) -SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_512) +SYM_FUNC_END(aes_ctr64_crypt_vaes_avx512) +SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512) _aes_ctr_crypt 1 -SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_512) +SYM_FUNC_END(aes_xctr_crypt_vaes_avx512) #endif // CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S index 93ba0ddbe009..db79cdf81588 100644 --- a/arch/x86/crypto/aes-xts-avx-x86_64.S +++ b/arch/x86/crypto/aes-xts-avx-x86_64.S @@ -52,32 +52,25 @@ * different code, it uses a macro to generate several implementations that * share similar source code but are targeted at different CPUs, listed below: * - * AES-NI + AVX + * AES-NI && AVX * - 128-bit vectors (1 AES block per vector) * - VEX-coded instructions * - xmm0-xmm15 * - This is for older CPUs that lack VAES but do have AVX. * - * VAES + VPCLMULQDQ + AVX2 + * VAES && VPCLMULQDQ && AVX2 * - 256-bit vectors (2 AES blocks per vector) * - VEX-coded instructions * - ymm0-ymm15 - * - This is for CPUs that have VAES but lack AVX512 or AVX10, - * e.g. Intel's Alder Lake and AMD's Zen 3. + * - This is for CPUs that have VAES but either lack AVX512 (e.g. Intel's + * Alder Lake and AMD's Zen 3) or downclock too eagerly when using zmm + * registers (e.g. Intel's Ice Lake). * - * VAES + VPCLMULQDQ + AVX10/256 + BMI2 - * - 256-bit vectors (2 AES blocks per vector) + * VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2 + * - 512-bit vectors (4 AES blocks per vector) * - EVEX-coded instructions - * - ymm0-ymm31 - * - This is for CPUs that have AVX512 but where using zmm registers causes - * downclocking, and for CPUs that have AVX10/256 but not AVX10/512. - * - By "AVX10/256" we really mean (AVX512BW + AVX512VL) || AVX10/256. - * To avoid confusion with 512-bit, we just write AVX10/256. - * - * VAES + VPCLMULQDQ + AVX10/512 + BMI2 - * - Same as the previous one, but upgrades to 512-bit vectors - * (4 AES blocks per vector) in zmm0-zmm31. - * - This is for CPUs that have good AVX512 or AVX10/512 support. + * - zmm0-zmm31 + * - This is for CPUs that have good AVX512 support. * * This file doesn't have an implementation for AES-NI alone (without AVX), as * the lack of VEX would make all the assembly code different. @@ -107,9 +100,20 @@ // exists when there's a carry out of the low 64 bits of the tweak. .quad 0x87, 1 + // These are the shift amounts that are needed when multiplying by [x^0, + // x^1, x^2, x^3] to compute the first vector of tweaks when VL=64. + // + // The right shifts by 64 are expected to zeroize the destination. + // 'vpsrlvq' is indeed defined to do that; i.e. it doesn't truncate the + // amount to 64 & 63 = 0 like the 'shr' scalar shift instruction would. +.Lrshift_amounts: + .byte 64, 64, 63, 63, 62, 62, 61, 61 +.Llshift_amounts: + .byte 0, 0, 1, 1, 2, 2, 3, 3 + // This table contains constants for vpshufb and vpblendvb, used to // handle variable byte shifts and blending during ciphertext stealing - // on CPUs that don't support AVX10-style masking. + // on CPUs that don't support AVX512-style masking. .Lcts_permute_table: .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 @@ -138,7 +142,7 @@ .irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 _define_Vi \i .endr -.if USE_AVX10 +.if USE_AVX512 .irp i, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 _define_Vi \i .endr @@ -193,7 +197,7 @@ // keys to the *end* of this register range. I.e., AES-128 uses // KEY5-KEY14, AES-192 uses KEY3-KEY14, and AES-256 uses KEY1-KEY14. // (All also use KEY0 for the XOR-only "round" at the beginning.) -.if USE_AVX10 +.if USE_AVX512 .set KEY1_XMM, %xmm16 .set KEY1, V16 .set KEY2_XMM, %xmm17 @@ -227,7 +231,6 @@ .endm // Move a vector between memory and a register. -// The register operand must be in the first 16 vector registers. .macro _vmovdqu src, dst .if VL < 64 vmovdqu \src, \dst @@ -238,9 +241,9 @@ // Broadcast a 128-bit value into a vector. .macro _vbroadcast128 src, dst -.if VL == 16 && !USE_AVX10 +.if VL == 16 vmovdqu \src, \dst -.elseif VL == 32 && !USE_AVX10 +.elseif VL == 32 vbroadcasti128 \src, \dst .else vbroadcasti32x4 \src, \dst @@ -248,7 +251,6 @@ .endm // XOR two vectors together. -// Any register operands must be in the first 16 vector registers. .macro _vpxor src1, src2, dst .if VL < 64 vpxor \src1, \src2, \dst @@ -259,7 +261,7 @@ // XOR three vectors together. .macro _xor3 src1, src2, src3_and_dst -.if USE_AVX10 +.if USE_AVX512 // vpternlogd with immediate 0x96 is a three-argument XOR. vpternlogd $0x96, \src1, \src2, \src3_and_dst .else @@ -274,7 +276,7 @@ vpshufd $0x13, \src, \tmp vpaddq \src, \src, \dst vpsrad $31, \tmp, \tmp -.if USE_AVX10 +.if USE_AVX512 vpternlogd $0x78, GF_POLY_XMM, \tmp, \dst .else vpand GF_POLY_XMM, \tmp, \tmp @@ -303,52 +305,75 @@ // Given the first XTS tweak at (TWEAK), compute the first set of tweaks and // store them in the vector registers TWEAK0-TWEAK3. Clobbers V0-V5. .macro _compute_first_set_of_tweaks - vmovdqu (TWEAK), TWEAK0_XMM - _vbroadcast128 .Lgf_poly(%rip), GF_POLY .if VL == 16 - // With VL=16, multiplying by x serially is fastest. + vmovdqu (TWEAK), TWEAK0_XMM + vmovdqu .Lgf_poly(%rip), GF_POLY _next_tweak TWEAK0, %xmm0, TWEAK1 _next_tweak TWEAK1, %xmm0, TWEAK2 _next_tweak TWEAK2, %xmm0, TWEAK3 -.else -.if VL == 32 - // Compute the second block of TWEAK0. +.elseif VL == 32 + vmovdqu (TWEAK), TWEAK0_XMM + vbroadcasti128 .Lgf_poly(%rip), GF_POLY + + // Compute the first vector of tweaks. _next_tweak TWEAK0_XMM, %xmm0, %xmm1 vinserti128 $1, %xmm1, TWEAK0, TWEAK0 -.elseif VL == 64 - // Compute the remaining blocks of TWEAK0. - _next_tweak TWEAK0_XMM, %xmm0, %xmm1 - _next_tweak %xmm1, %xmm0, %xmm2 - _next_tweak %xmm2, %xmm0, %xmm3 - vinserti32x4 $1, %xmm1, TWEAK0, TWEAK0 - vinserti32x4 $2, %xmm2, TWEAK0, TWEAK0 - vinserti32x4 $3, %xmm3, TWEAK0, TWEAK0 -.endif - // Compute TWEAK[1-3] from TWEAK0. - vpsrlq $64 - 1*VL/16, TWEAK0, V0 - vpsrlq $64 - 2*VL/16, TWEAK0, V2 - vpsrlq $64 - 3*VL/16, TWEAK0, V4 + + // Compute the next three vectors of tweaks: + // TWEAK1 = TWEAK0 * [x^2, x^2] + // TWEAK2 = TWEAK0 * [x^4, x^4] + // TWEAK3 = TWEAK0 * [x^6, x^6] + vpsrlq $64 - 2, TWEAK0, V0 + vpsrlq $64 - 4, TWEAK0, V2 + vpsrlq $64 - 6, TWEAK0, V4 vpclmulqdq $0x01, GF_POLY, V0, V1 vpclmulqdq $0x01, GF_POLY, V2, V3 vpclmulqdq $0x01, GF_POLY, V4, V5 vpslldq $8, V0, V0 vpslldq $8, V2, V2 vpslldq $8, V4, V4 - vpsllq $1*VL/16, TWEAK0, TWEAK1 - vpsllq $2*VL/16, TWEAK0, TWEAK2 - vpsllq $3*VL/16, TWEAK0, TWEAK3 -.if USE_AVX10 - vpternlogd $0x96, V0, V1, TWEAK1 - vpternlogd $0x96, V2, V3, TWEAK2 - vpternlogd $0x96, V4, V5, TWEAK3 -.else + vpsllq $2, TWEAK0, TWEAK1 + vpsllq $4, TWEAK0, TWEAK2 + vpsllq $6, TWEAK0, TWEAK3 vpxor V0, TWEAK1, TWEAK1 vpxor V2, TWEAK2, TWEAK2 vpxor V4, TWEAK3, TWEAK3 vpxor V1, TWEAK1, TWEAK1 vpxor V3, TWEAK2, TWEAK2 vpxor V5, TWEAK3, TWEAK3 -.endif +.else + vbroadcasti32x4 (TWEAK), TWEAK0 + vbroadcasti32x4 .Lgf_poly(%rip), GF_POLY + + // Compute the first vector of tweaks: + // TWEAK0 = broadcast128(TWEAK) * [x^0, x^1, x^2, x^3] + vpmovzxbq .Lrshift_amounts(%rip), V4 + vpsrlvq V4, TWEAK0, V0 + vpclmulqdq $0x01, GF_POLY, V0, V1 + vpmovzxbq .Llshift_amounts(%rip), V4 + vpslldq $8, V0, V0 + vpsllvq V4, TWEAK0, TWEAK0 + vpternlogd $0x96, V0, V1, TWEAK0 + + // Compute the next three vectors of tweaks: + // TWEAK1 = TWEAK0 * [x^4, x^4, x^4, x^4] + // TWEAK2 = TWEAK0 * [x^8, x^8, x^8, x^8] + // TWEAK3 = TWEAK0 * [x^12, x^12, x^12, x^12] + // x^8 only needs byte-aligned shifts, so optimize accordingly. + vpsrlq $64 - 4, TWEAK0, V0 + vpsrldq $(64 - 8) / 8, TWEAK0, V2 + vpsrlq $64 - 12, TWEAK0, V4 + vpclmulqdq $0x01, GF_POLY, V0, V1 + vpclmulqdq $0x01, GF_POLY, V2, V3 + vpclmulqdq $0x01, GF_POLY, V4, V5 + vpslldq $8, V0, V0 + vpslldq $8, V4, V4 + vpsllq $4, TWEAK0, TWEAK1 + vpslldq $8 / 8, TWEAK0, TWEAK2 + vpsllq $12, TWEAK0, TWEAK3 + vpternlogd $0x96, V0, V1, TWEAK1 + vpxord V3, TWEAK2, TWEAK2 + vpternlogd $0x96, V4, V5, TWEAK3 .endif .endm @@ -474,26 +499,26 @@ lea OFFS-16(KEY, KEYLEN64, 4), KEY // If all 32 SIMD registers are available, cache all the round keys. -.if USE_AVX10 +.if USE_AVX512 cmp $24, KEYLEN jl .Laes128\@ je .Laes192\@ - _vbroadcast128 -6*16(KEY), KEY1 - _vbroadcast128 -5*16(KEY), KEY2 + vbroadcasti32x4 -6*16(KEY), KEY1 + vbroadcasti32x4 -5*16(KEY), KEY2 .Laes192\@: - _vbroadcast128 -4*16(KEY), KEY3 - _vbroadcast128 -3*16(KEY), KEY4 + vbroadcasti32x4 -4*16(KEY), KEY3 + vbroadcasti32x4 -3*16(KEY), KEY4 .Laes128\@: - _vbroadcast128 -2*16(KEY), KEY5 - _vbroadcast128 -1*16(KEY), KEY6 - _vbroadcast128 0*16(KEY), KEY7 - _vbroadcast128 1*16(KEY), KEY8 - _vbroadcast128 2*16(KEY), KEY9 - _vbroadcast128 3*16(KEY), KEY10 - _vbroadcast128 4*16(KEY), KEY11 - _vbroadcast128 5*16(KEY), KEY12 - _vbroadcast128 6*16(KEY), KEY13 - _vbroadcast128 7*16(KEY), KEY14 + vbroadcasti32x4 -2*16(KEY), KEY5 + vbroadcasti32x4 -1*16(KEY), KEY6 + vbroadcasti32x4 0*16(KEY), KEY7 + vbroadcasti32x4 1*16(KEY), KEY8 + vbroadcasti32x4 2*16(KEY), KEY9 + vbroadcasti32x4 3*16(KEY), KEY10 + vbroadcasti32x4 4*16(KEY), KEY11 + vbroadcasti32x4 5*16(KEY), KEY12 + vbroadcasti32x4 6*16(KEY), KEY13 + vbroadcasti32x4 7*16(KEY), KEY14 .endif .endm @@ -521,7 +546,7 @@ // using the same key for all block(s). The round key is loaded from the // appropriate register or memory location for round \i. May clobber \tmp. .macro _vaes_1x enc, i, xmm_suffix, data, tmp -.if USE_AVX10 +.if USE_AVX512 _vaes \enc, KEY\i\xmm_suffix, \data .else .ifnb \xmm_suffix @@ -538,7 +563,7 @@ // appropriate register or memory location for round \i. In addition, does two // steps of the computation of the next set of tweaks. May clobber V4 and V5. .macro _vaes_4x enc, i -.if USE_AVX10 +.if USE_AVX512 _tweak_step (2*(\i-5)) _vaes \enc, KEY\i, V0 _vaes \enc, KEY\i, V1 @@ -574,7 +599,7 @@ .irp i, 5,6,7,8,9,10,11,12,13 _vaes_1x \enc, \i, \xmm_suffix, \data, tmp=\tmp .endr -.if USE_AVX10 +.if USE_AVX512 vpxord KEY14\xmm_suffix, \tweak, \tmp .else .ifnb \xmm_suffix @@ -617,11 +642,11 @@ // This is the main loop, en/decrypting 4*VL bytes per iteration. // XOR each source block with its tweak and the zero-th round key. -.if USE_AVX10 - _vmovdqu 0*VL(SRC), V0 - _vmovdqu 1*VL(SRC), V1 - _vmovdqu 2*VL(SRC), V2 - _vmovdqu 3*VL(SRC), V3 +.if USE_AVX512 + vmovdqu8 0*VL(SRC), V0 + vmovdqu8 1*VL(SRC), V1 + vmovdqu8 2*VL(SRC), V2 + vmovdqu8 3*VL(SRC), V3 vpternlogd $0x96, TWEAK0, KEY0, V0 vpternlogd $0x96, TWEAK1, KEY0, V1 vpternlogd $0x96, TWEAK2, KEY0, V2 @@ -654,7 +679,7 @@ // Reduce latency by doing the XOR before the vaesenclast, utilizing the // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a) // (and likewise for vaesdeclast). -.if USE_AVX10 +.if USE_AVX512 _tweak_step 18 _tweak_step 19 vpxord TWEAK0, KEY14, V4 @@ -762,7 +787,7 @@ _aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0, tmp=%xmm1 .endif -.if USE_AVX10 +.if USE_AVX512 // Create a mask that has the first LEN bits set. mov $-1, %r9d bzhi LEN, %r9d, %r9d @@ -811,7 +836,7 @@ // u8 iv[AES_BLOCK_SIZE]); // // Encrypt |iv| using the AES key |tweak_key| to get the first tweak. Assumes -// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX10. +// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX512. SYM_TYPED_FUNC_START(aes_xts_encrypt_iv) .set TWEAK_KEY, %rdi .set IV, %rsi @@ -853,7 +878,7 @@ SYM_FUNC_END(aes_xts_encrypt_iv) // multiple of 16, then this function updates |tweak| to contain the next tweak. .set VL, 16 -.set USE_AVX10, 0 +.set USE_AVX512, 0 SYM_TYPED_FUNC_START(aes_xts_encrypt_aesni_avx) _aes_xts_crypt 1 SYM_FUNC_END(aes_xts_encrypt_aesni_avx) @@ -863,7 +888,7 @@ SYM_FUNC_END(aes_xts_decrypt_aesni_avx) #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) .set VL, 32 -.set USE_AVX10, 0 +.set USE_AVX512, 0 SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2) _aes_xts_crypt 1 SYM_FUNC_END(aes_xts_encrypt_vaes_avx2) @@ -871,21 +896,12 @@ SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx2) _aes_xts_crypt 0 SYM_FUNC_END(aes_xts_decrypt_vaes_avx2) -.set VL, 32 -.set USE_AVX10, 1 -SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_256) - _aes_xts_crypt 1 -SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_256) -SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_256) - _aes_xts_crypt 0 -SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_256) - .set VL, 64 -.set USE_AVX10, 1 -SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_512) +.set USE_AVX512, 1 +SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx512) _aes_xts_crypt 1 -SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_512) -SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_512) +SYM_FUNC_END(aes_xts_encrypt_vaes_avx512) +SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512) _aes_xts_crypt 0 -SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_512) +SYM_FUNC_END(aes_xts_decrypt_vaes_avx512) #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index bc655d794a95..061b1ced93c5 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -566,10 +566,9 @@ static struct crypto_alg aesni_cipher_alg = { static struct skcipher_alg aesni_skciphers[] = { { .base = { - .cra_name = "__ecb(aes)", - .cra_driver_name = "__ecb-aes-aesni", + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-aesni", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = CRYPTO_AES_CTX_SIZE, .cra_module = THIS_MODULE, @@ -581,10 +580,9 @@ static struct skcipher_alg aesni_skciphers[] = { .decrypt = ecb_decrypt, }, { .base = { - .cra_name = "__cbc(aes)", - .cra_driver_name = "__cbc-aes-aesni", + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-aesni", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = CRYPTO_AES_CTX_SIZE, .cra_module = THIS_MODULE, @@ -597,10 +595,9 @@ static struct skcipher_alg aesni_skciphers[] = { .decrypt = cbc_decrypt, }, { .base = { - .cra_name = "__cts(cbc(aes))", - .cra_driver_name = "__cts-cbc-aes-aesni", + .cra_name = "cts(cbc(aes))", + .cra_driver_name = "cts-cbc-aes-aesni", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = CRYPTO_AES_CTX_SIZE, .cra_module = THIS_MODULE, @@ -615,10 +612,9 @@ static struct skcipher_alg aesni_skciphers[] = { #ifdef CONFIG_X86_64 }, { .base = { - .cra_name = "__ctr(aes)", - .cra_driver_name = "__ctr-aes-aesni", + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-aesni", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, .cra_ctxsize = CRYPTO_AES_CTX_SIZE, .cra_module = THIS_MODULE, @@ -633,10 +629,9 @@ static struct skcipher_alg aesni_skciphers[] = { #endif }, { .base = { - .cra_name = "__xts(aes)", - .cra_driver_name = "__xts-aes-aesni", + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-aesni", .cra_priority = 401, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = XTS_AES_CTX_SIZE, .cra_module = THIS_MODULE, @@ -651,9 +646,6 @@ static struct skcipher_alg aesni_skciphers[] = { } }; -static -struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; - #ifdef CONFIG_X86_64 asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key, u8 iv[AES_BLOCK_SIZE]); @@ -792,10 +784,9 @@ static int xctr_crypt_##suffix(struct skcipher_request *req) \ } \ \ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \ - .base.cra_name = "__xts(aes)", \ - .base.cra_driver_name = "__xts-aes-" driver_name_suffix, \ + .base.cra_name = "xts(aes)", \ + .base.cra_driver_name = "xts-aes-" driver_name_suffix, \ .base.cra_priority = priority, \ - .base.cra_flags = CRYPTO_ALG_INTERNAL, \ .base.cra_blocksize = AES_BLOCK_SIZE, \ .base.cra_ctxsize = XTS_AES_CTX_SIZE, \ .base.cra_module = THIS_MODULE, \ @@ -807,10 +798,9 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \ .encrypt = xts_encrypt_##suffix, \ .decrypt = xts_decrypt_##suffix, \ }, { \ - .base.cra_name = "__ctr(aes)", \ - .base.cra_driver_name = "__ctr-aes-" driver_name_suffix, \ + .base.cra_name = "ctr(aes)", \ + .base.cra_driver_name = "ctr-aes-" driver_name_suffix, \ .base.cra_priority = priority, \ - .base.cra_flags = CRYPTO_ALG_INTERNAL, \ .base.cra_blocksize = 1, \ .base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \ .base.cra_module = THIS_MODULE, \ @@ -822,10 +812,9 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \ .encrypt = ctr_crypt_##suffix, \ .decrypt = ctr_crypt_##suffix, \ }, { \ - .base.cra_name = "__xctr(aes)", \ - .base.cra_driver_name = "__xctr-aes-" driver_name_suffix, \ + .base.cra_name = "xctr(aes)", \ + .base.cra_driver_name = "xctr-aes-" driver_name_suffix, \ .base.cra_priority = priority, \ - .base.cra_flags = CRYPTO_ALG_INTERNAL, \ .base.cra_blocksize = 1, \ .base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \ .base.cra_module = THIS_MODULE, \ @@ -836,16 +825,12 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \ .setkey = aesni_skcipher_setkey, \ .encrypt = xctr_crypt_##suffix, \ .decrypt = xctr_crypt_##suffix, \ -}}; \ - \ -static struct simd_skcipher_alg * \ -simd_skcipher_algs_##suffix[ARRAY_SIZE(skcipher_algs_##suffix)] +}} DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500); #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600); -DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_256, "vaes-avx10_256", 700); -DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_512, "vaes-avx10_512", 800); +DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800); #endif /* The common part of the x86_64 AES-GCM key struct */ @@ -1499,10 +1484,9 @@ static struct aead_alg aes_gcm_algs_##suffix[] = { { \ .chunksize = AES_BLOCK_SIZE, \ .maxauthsize = 16, \ .base = { \ - .cra_name = "__gcm(aes)", \ - .cra_driver_name = "__" generic_driver_name, \ + .cra_name = "gcm(aes)", \ + .cra_driver_name = generic_driver_name, \ .cra_priority = (priority), \ - .cra_flags = CRYPTO_ALG_INTERNAL, \ .cra_blocksize = 1, \ .cra_ctxsize = (ctxsize), \ .cra_module = THIS_MODULE, \ @@ -1516,17 +1500,14 @@ static struct aead_alg aes_gcm_algs_##suffix[] = { { \ .chunksize = AES_BLOCK_SIZE, \ .maxauthsize = 16, \ .base = { \ - .cra_name = "__rfc4106(gcm(aes))", \ - .cra_driver_name = "__" rfc_driver_name, \ + .cra_name = "rfc4106(gcm(aes))", \ + .cra_driver_name = rfc_driver_name, \ .cra_priority = (priority), \ - .cra_flags = CRYPTO_ALG_INTERNAL, \ .cra_blocksize = 1, \ .cra_ctxsize = (ctxsize), \ .cra_module = THIS_MODULE, \ }, \ -} }; \ - \ -static struct simd_aead_alg *aes_gcm_simdalgs_##suffix[2] \ +} } /* aes_gcm_algs_aesni */ DEFINE_GCM_ALGS(aesni, /* no flags */ 0, @@ -1556,14 +1537,12 @@ static int __init register_avx_algs(void) if (!boot_cpu_has(X86_FEATURE_AVX)) return 0; - err = simd_register_skciphers_compat(skcipher_algs_aesni_avx, - ARRAY_SIZE(skcipher_algs_aesni_avx), - simd_skcipher_algs_aesni_avx); + err = crypto_register_skciphers(skcipher_algs_aesni_avx, + ARRAY_SIZE(skcipher_algs_aesni_avx)); if (err) return err; - err = simd_register_aeads_compat(aes_gcm_algs_aesni_avx, - ARRAY_SIZE(aes_gcm_algs_aesni_avx), - aes_gcm_simdalgs_aesni_avx); + err = crypto_register_aeads(aes_gcm_algs_aesni_avx, + ARRAY_SIZE(aes_gcm_algs_aesni_avx)); if (err) return err; /* @@ -1579,9 +1558,8 @@ static int __init register_avx_algs(void) !boot_cpu_has(X86_FEATURE_PCLMULQDQ) || !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) return 0; - err = simd_register_skciphers_compat(skcipher_algs_vaes_avx2, - ARRAY_SIZE(skcipher_algs_vaes_avx2), - simd_skcipher_algs_vaes_avx2); + err = crypto_register_skciphers(skcipher_algs_vaes_avx2, + ARRAY_SIZE(skcipher_algs_vaes_avx2)); if (err) return err; @@ -1592,76 +1570,52 @@ static int __init register_avx_algs(void) XFEATURE_MASK_AVX512, NULL)) return 0; - err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_256, - ARRAY_SIZE(skcipher_algs_vaes_avx10_256), - simd_skcipher_algs_vaes_avx10_256); - if (err) - return err; - err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_256, - ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256), - aes_gcm_simdalgs_vaes_avx10_256); + err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_256, + ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256)); if (err) return err; if (boot_cpu_has(X86_FEATURE_PREFER_YMM)) { int i; - for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx10_512); i++) - skcipher_algs_vaes_avx10_512[i].base.cra_priority = 1; + for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx512); i++) + skcipher_algs_vaes_avx512[i].base.cra_priority = 1; for (i = 0; i < ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512); i++) aes_gcm_algs_vaes_avx10_512[i].base.cra_priority = 1; } - err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_512, - ARRAY_SIZE(skcipher_algs_vaes_avx10_512), - simd_skcipher_algs_vaes_avx10_512); + err = crypto_register_skciphers(skcipher_algs_vaes_avx512, + ARRAY_SIZE(skcipher_algs_vaes_avx512)); if (err) return err; - err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_512, - ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512), - aes_gcm_simdalgs_vaes_avx10_512); + err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_512, + ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512)); if (err) return err; #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ return 0; } +#define unregister_skciphers(A) \ + if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \ + crypto_unregister_skciphers((A), ARRAY_SIZE(A)) +#define unregister_aeads(A) \ + if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \ + crypto_unregister_aeads((A), ARRAY_SIZE(A)) + static void unregister_avx_algs(void) { - if (simd_skcipher_algs_aesni_avx[0]) - simd_unregister_skciphers(skcipher_algs_aesni_avx, - ARRAY_SIZE(skcipher_algs_aesni_avx), - simd_skcipher_algs_aesni_avx); - if (aes_gcm_simdalgs_aesni_avx[0]) - simd_unregister_aeads(aes_gcm_algs_aesni_avx, - ARRAY_SIZE(aes_gcm_algs_aesni_avx), - aes_gcm_simdalgs_aesni_avx); + unregister_skciphers(skcipher_algs_aesni_avx); + unregister_aeads(aes_gcm_algs_aesni_avx); #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) - if (simd_skcipher_algs_vaes_avx2[0]) - simd_unregister_skciphers(skcipher_algs_vaes_avx2, - ARRAY_SIZE(skcipher_algs_vaes_avx2), - simd_skcipher_algs_vaes_avx2); - if (simd_skcipher_algs_vaes_avx10_256[0]) - simd_unregister_skciphers(skcipher_algs_vaes_avx10_256, - ARRAY_SIZE(skcipher_algs_vaes_avx10_256), - simd_skcipher_algs_vaes_avx10_256); - if (aes_gcm_simdalgs_vaes_avx10_256[0]) - simd_unregister_aeads(aes_gcm_algs_vaes_avx10_256, - ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256), - aes_gcm_simdalgs_vaes_avx10_256); - if (simd_skcipher_algs_vaes_avx10_512[0]) - simd_unregister_skciphers(skcipher_algs_vaes_avx10_512, - ARRAY_SIZE(skcipher_algs_vaes_avx10_512), - simd_skcipher_algs_vaes_avx10_512); - if (aes_gcm_simdalgs_vaes_avx10_512[0]) - simd_unregister_aeads(aes_gcm_algs_vaes_avx10_512, - ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512), - aes_gcm_simdalgs_vaes_avx10_512); + unregister_skciphers(skcipher_algs_vaes_avx2); + unregister_skciphers(skcipher_algs_vaes_avx512); + unregister_aeads(aes_gcm_algs_vaes_avx10_256); + unregister_aeads(aes_gcm_algs_vaes_avx10_512); #endif } #else /* CONFIG_X86_64 */ static struct aead_alg aes_gcm_algs_aesni[0]; -static struct simd_aead_alg *aes_gcm_simdalgs_aesni[0]; static int __init register_avx_algs(void) { @@ -1690,15 +1644,13 @@ static int __init aesni_init(void) if (err) return err; - err = simd_register_skciphers_compat(aesni_skciphers, - ARRAY_SIZE(aesni_skciphers), - aesni_simd_skciphers); + err = crypto_register_skciphers(aesni_skciphers, + ARRAY_SIZE(aesni_skciphers)); if (err) goto unregister_cipher; - err = simd_register_aeads_compat(aes_gcm_algs_aesni, - ARRAY_SIZE(aes_gcm_algs_aesni), - aes_gcm_simdalgs_aesni); + err = crypto_register_aeads(aes_gcm_algs_aesni, + ARRAY_SIZE(aes_gcm_algs_aesni)); if (err) goto unregister_skciphers; @@ -1710,12 +1662,11 @@ static int __init aesni_init(void) unregister_avx: unregister_avx_algs(); - simd_unregister_aeads(aes_gcm_algs_aesni, - ARRAY_SIZE(aes_gcm_algs_aesni), - aes_gcm_simdalgs_aesni); + crypto_unregister_aeads(aes_gcm_algs_aesni, + ARRAY_SIZE(aes_gcm_algs_aesni)); unregister_skciphers: - simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), - aesni_simd_skciphers); + crypto_unregister_skciphers(aesni_skciphers, + ARRAY_SIZE(aesni_skciphers)); unregister_cipher: crypto_unregister_alg(&aesni_cipher_alg); return err; @@ -1723,11 +1674,10 @@ unregister_cipher: static void __exit aesni_exit(void) { - simd_unregister_aeads(aes_gcm_algs_aesni, - ARRAY_SIZE(aes_gcm_algs_aesni), - aes_gcm_simdalgs_aesni); - simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), - aesni_simd_skciphers); + crypto_unregister_aeads(aes_gcm_algs_aesni, + ARRAY_SIZE(aes_gcm_algs_aesni)); + crypto_unregister_skciphers(aesni_skciphers, + ARRAY_SIZE(aesni_skciphers)); crypto_unregister_alg(&aesni_cipher_alg); unregister_avx_algs(); } diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c index 87a11804fc77..b4bddcd58457 100644 --- a/arch/x86/crypto/aria_aesni_avx2_glue.c +++ b/arch/x86/crypto/aria_aesni_avx2_glue.c @@ -6,7 +6,6 @@ */ #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <crypto/aria.h> #include <linux/crypto.h> #include <linux/err.h> @@ -165,10 +164,9 @@ static int aria_avx2_init_tfm(struct crypto_skcipher *tfm) static struct skcipher_alg aria_algs[] = { { - .base.cra_name = "__ecb(aria)", - .base.cra_driver_name = "__ecb-aria-avx2", + .base.cra_name = "ecb(aria)", + .base.cra_driver_name = "ecb-aria-avx2", .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = ARIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aria_ctx), .base.cra_module = THIS_MODULE, @@ -178,11 +176,10 @@ static struct skcipher_alg aria_algs[] = { .encrypt = aria_avx2_ecb_encrypt, .decrypt = aria_avx2_ecb_decrypt, }, { - .base.cra_name = "__ctr(aria)", - .base.cra_driver_name = "__ctr-aria-avx2", + .base.cra_name = "ctr(aria)", + .base.cra_driver_name = "ctr-aria-avx2", .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE, + .base.cra_flags = CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aria_ctx), .base.cra_module = THIS_MODULE, @@ -197,8 +194,6 @@ static struct skcipher_alg aria_algs[] = { } }; -static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)]; - static int __init aria_avx2_init(void) { const char *feature_name; @@ -233,15 +228,12 @@ static int __init aria_avx2_init(void) aria_ops.aria_ctr_crypt_32way = aria_aesni_avx2_ctr_crypt_32way; } - return simd_register_skciphers_compat(aria_algs, - ARRAY_SIZE(aria_algs), - aria_simd_algs); + return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs)); } static void __exit aria_avx2_exit(void) { - simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs), - aria_simd_algs); + crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs)); } module_init(aria_avx2_init); diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c index 4e1516b76669..ab9b38d05332 100644 --- a/arch/x86/crypto/aria_aesni_avx_glue.c +++ b/arch/x86/crypto/aria_aesni_avx_glue.c @@ -6,7 +6,6 @@ */ #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <crypto/aria.h> #include <linux/crypto.h> #include <linux/err.h> @@ -152,10 +151,9 @@ static int aria_avx_init_tfm(struct crypto_skcipher *tfm) static struct skcipher_alg aria_algs[] = { { - .base.cra_name = "__ecb(aria)", - .base.cra_driver_name = "__ecb-aria-avx", + .base.cra_name = "ecb(aria)", + .base.cra_driver_name = "ecb-aria-avx", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = ARIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aria_ctx), .base.cra_module = THIS_MODULE, @@ -165,10 +163,9 @@ static struct skcipher_alg aria_algs[] = { .encrypt = aria_avx_ecb_encrypt, .decrypt = aria_avx_ecb_decrypt, }, { - .base.cra_name = "__ctr(aria)", - .base.cra_driver_name = "__ctr-aria-avx", + .base.cra_name = "ctr(aria)", + .base.cra_driver_name = "ctr-aria-avx", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aria_ctx), .base.cra_module = THIS_MODULE, @@ -184,8 +181,6 @@ static struct skcipher_alg aria_algs[] = { } }; -static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)]; - static int __init aria_avx_init(void) { const char *feature_name; @@ -213,15 +208,12 @@ static int __init aria_avx_init(void) aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way; } - return simd_register_skciphers_compat(aria_algs, - ARRAY_SIZE(aria_algs), - aria_simd_algs); + return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs)); } static void __exit aria_avx_exit(void) { - simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs), - aria_simd_algs); + crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs)); } module_init(aria_avx_init); diff --git a/arch/x86/crypto/aria_gfni_avx512_glue.c b/arch/x86/crypto/aria_gfni_avx512_glue.c index f4a2208d2638..363cbf4399cc 100644 --- a/arch/x86/crypto/aria_gfni_avx512_glue.c +++ b/arch/x86/crypto/aria_gfni_avx512_glue.c @@ -6,7 +6,6 @@ */ #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <crypto/aria.h> #include <linux/crypto.h> #include <linux/err.h> @@ -165,10 +164,9 @@ static int aria_avx512_init_tfm(struct crypto_skcipher *tfm) static struct skcipher_alg aria_algs[] = { { - .base.cra_name = "__ecb(aria)", - .base.cra_driver_name = "__ecb-aria-avx512", + .base.cra_name = "ecb(aria)", + .base.cra_driver_name = "ecb-aria-avx512", .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = ARIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct aria_ctx), .base.cra_module = THIS_MODULE, @@ -178,11 +176,10 @@ static struct skcipher_alg aria_algs[] = { .encrypt = aria_avx512_ecb_encrypt, .decrypt = aria_avx512_ecb_decrypt, }, { - .base.cra_name = "__ctr(aria)", - .base.cra_driver_name = "__ctr-aria-avx512", + .base.cra_name = "ctr(aria)", + .base.cra_driver_name = "ctr-aria-avx512", .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL | - CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE, + .base.cra_flags = CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aria_ctx), .base.cra_module = THIS_MODULE, @@ -197,8 +194,6 @@ static struct skcipher_alg aria_algs[] = { } }; -static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)]; - static int __init aria_avx512_init(void) { const char *feature_name; @@ -229,15 +224,12 @@ static int __init aria_avx512_init(void) aria_ops.aria_decrypt_64way = aria_gfni_avx512_decrypt_64way; aria_ops.aria_ctr_crypt_64way = aria_gfni_avx512_ctr_crypt_64way; - return simd_register_skciphers_compat(aria_algs, - ARRAY_SIZE(aria_algs), - aria_simd_algs); + return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs)); } static void __exit aria_avx512_exit(void) { - simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs), - aria_simd_algs); + crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs)); } module_init(aria_avx512_init); diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index e7e4d64e9577..2d2f4e16537c 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -6,7 +6,6 @@ */ #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/module.h> @@ -69,10 +68,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg camellia_algs[] = { { - .base.cra_name = "__ecb(camellia)", - .base.cra_driver_name = "__ecb-camellia-aesni-avx2", + .base.cra_name = "ecb(camellia)", + .base.cra_driver_name = "ecb-camellia-aesni-avx2", .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_ctx), .base.cra_module = THIS_MODULE, @@ -82,10 +80,9 @@ static struct skcipher_alg camellia_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(camellia)", - .base.cra_driver_name = "__cbc-camellia-aesni-avx2", + .base.cra_name = "cbc(camellia)", + .base.cra_driver_name = "cbc-camellia-aesni-avx2", .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_ctx), .base.cra_module = THIS_MODULE, @@ -98,8 +95,6 @@ static struct skcipher_alg camellia_algs[] = { }, }; -static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)]; - static int __init camellia_aesni_init(void) { const char *feature_name; @@ -118,15 +113,13 @@ static int __init camellia_aesni_init(void) return -ENODEV; } - return simd_register_skciphers_compat(camellia_algs, - ARRAY_SIZE(camellia_algs), - camellia_simd_algs); + return crypto_register_skciphers(camellia_algs, + ARRAY_SIZE(camellia_algs)); } static void __exit camellia_aesni_fini(void) { - simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs), - camellia_simd_algs); + crypto_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs)); } module_init(camellia_aesni_init); diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index c7ccf63e741e..a7d162388142 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -6,7 +6,6 @@ */ #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/module.h> @@ -69,10 +68,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg camellia_algs[] = { { - .base.cra_name = "__ecb(camellia)", - .base.cra_driver_name = "__ecb-camellia-aesni", + .base.cra_name = "ecb(camellia)", + .base.cra_driver_name = "ecb-camellia-aesni", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_ctx), .base.cra_module = THIS_MODULE, @@ -82,10 +80,9 @@ static struct skcipher_alg camellia_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(camellia)", - .base.cra_driver_name = "__cbc-camellia-aesni", + .base.cra_name = "cbc(camellia)", + .base.cra_driver_name = "cbc-camellia-aesni", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_ctx), .base.cra_module = THIS_MODULE, @@ -98,8 +95,6 @@ static struct skcipher_alg camellia_algs[] = { } }; -static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)]; - static int __init camellia_aesni_init(void) { const char *feature_name; @@ -117,15 +112,13 @@ static int __init camellia_aesni_init(void) return -ENODEV; } - return simd_register_skciphers_compat(camellia_algs, - ARRAY_SIZE(camellia_algs), - camellia_simd_algs); + return crypto_register_skciphers(camellia_algs, + ARRAY_SIZE(camellia_algs)); } static void __exit camellia_aesni_fini(void) { - simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs), - camellia_simd_algs); + crypto_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs)); } module_init(camellia_aesni_init); diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 3976a87f92ad..3aca04d43b34 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -8,7 +8,6 @@ #include <crypto/algapi.h> #include <crypto/cast5.h> -#include <crypto/internal/simd.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/module.h> @@ -64,10 +63,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg cast5_algs[] = { { - .base.cra_name = "__ecb(cast5)", - .base.cra_driver_name = "__ecb-cast5-avx", + .base.cra_name = "ecb(cast5)", + .base.cra_driver_name = "ecb-cast5-avx", .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAST5_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cast5_ctx), .base.cra_module = THIS_MODULE, @@ -77,10 +75,9 @@ static struct skcipher_alg cast5_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(cast5)", - .base.cra_driver_name = "__cbc-cast5-avx", + .base.cra_name = "cbc(cast5)", + .base.cra_driver_name = "cbc-cast5-avx", .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAST5_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cast5_ctx), .base.cra_module = THIS_MODULE, @@ -93,8 +90,6 @@ static struct skcipher_alg cast5_algs[] = { } }; -static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)]; - static int __init cast5_init(void) { const char *feature_name; @@ -105,15 +100,13 @@ static int __init cast5_init(void) return -ENODEV; } - return simd_register_skciphers_compat(cast5_algs, - ARRAY_SIZE(cast5_algs), - cast5_simd_algs); + return crypto_register_skciphers(cast5_algs, + ARRAY_SIZE(cast5_algs)); } static void __exit cast5_exit(void) { - simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs), - cast5_simd_algs); + crypto_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs)); } module_init(cast5_init); diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 7e2aea372349..c4dd28c30303 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -14,7 +14,6 @@ #include <linux/err.h> #include <crypto/algapi.h> #include <crypto/cast6.h> -#include <crypto/internal/simd.h> #include "ecb_cbc_helpers.h" @@ -64,10 +63,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg cast6_algs[] = { { - .base.cra_name = "__ecb(cast6)", - .base.cra_driver_name = "__ecb-cast6-avx", + .base.cra_name = "ecb(cast6)", + .base.cra_driver_name = "ecb-cast6-avx", .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAST6_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cast6_ctx), .base.cra_module = THIS_MODULE, @@ -77,10 +75,9 @@ static struct skcipher_alg cast6_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(cast6)", - .base.cra_driver_name = "__cbc-cast6-avx", + .base.cra_name = "cbc(cast6)", + .base.cra_driver_name = "cbc-cast6-avx", .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAST6_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cast6_ctx), .base.cra_module = THIS_MODULE, @@ -93,8 +90,6 @@ static struct skcipher_alg cast6_algs[] = { }, }; -static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)]; - static int __init cast6_init(void) { const char *feature_name; @@ -105,15 +100,12 @@ static int __init cast6_init(void) return -ENODEV; } - return simd_register_skciphers_compat(cast6_algs, - ARRAY_SIZE(cast6_algs), - cast6_simd_algs); + return crypto_register_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs)); } static void __exit cast6_exit(void) { - simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs), - cast6_simd_algs); + crypto_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs)); } module_init(cast6_init); diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c deleted file mode 100644 index 8bb74a272879..000000000000 --- a/arch/x86/crypto/chacha_glue.c +++ /dev/null @@ -1,311 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * x64 SIMD accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright (C) 2015 Martin Willi - */ - -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/simd.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sizes.h> -#include <asm/simd.h> - -asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); -asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); -asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds); - -asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); -asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); -asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - -asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); -asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); -asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd); -static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2); -static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl); - -static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks) -{ - len = min(len, maxblocks * CHACHA_BLOCK_SIZE); - return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE; -} - -static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds) -{ - if (IS_ENABLED(CONFIG_AS_AVX512) && - static_branch_likely(&chacha_use_avx512vl)) { - while (bytes >= CHACHA_BLOCK_SIZE * 8) { - chacha_8block_xor_avx512vl(state, dst, src, bytes, - nrounds); - bytes -= CHACHA_BLOCK_SIZE * 8; - src += CHACHA_BLOCK_SIZE * 8; - dst += CHACHA_BLOCK_SIZE * 8; - state[12] += 8; - } - if (bytes > CHACHA_BLOCK_SIZE * 4) { - chacha_8block_xor_avx512vl(state, dst, src, bytes, - nrounds); - state[12] += chacha_advance(bytes, 8); - return; - } - if (bytes > CHACHA_BLOCK_SIZE * 2) { - chacha_4block_xor_avx512vl(state, dst, src, bytes, - nrounds); - state[12] += chacha_advance(bytes, 4); - return; - } - if (bytes) { - chacha_2block_xor_avx512vl(state, dst, src, bytes, - nrounds); - state[12] += chacha_advance(bytes, 2); - return; - } - } - - if (static_branch_likely(&chacha_use_avx2)) { - while (bytes >= CHACHA_BLOCK_SIZE * 8) { - chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); - bytes -= CHACHA_BLOCK_SIZE * 8; - src += CHACHA_BLOCK_SIZE * 8; - dst += CHACHA_BLOCK_SIZE * 8; - state[12] += 8; - } - if (bytes > CHACHA_BLOCK_SIZE * 4) { - chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); - state[12] += chacha_advance(bytes, 8); - return; - } - if (bytes > CHACHA_BLOCK_SIZE * 2) { - chacha_4block_xor_avx2(state, dst, src, bytes, nrounds); - state[12] += chacha_advance(bytes, 4); - return; - } - if (bytes > CHACHA_BLOCK_SIZE) { - chacha_2block_xor_avx2(state, dst, src, bytes, nrounds); - state[12] += chacha_advance(bytes, 2); - return; - } - } - - while (bytes >= CHACHA_BLOCK_SIZE * 4) { - chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); - bytes -= CHACHA_BLOCK_SIZE * 4; - src += CHACHA_BLOCK_SIZE * 4; - dst += CHACHA_BLOCK_SIZE * 4; - state[12] += 4; - } - if (bytes > CHACHA_BLOCK_SIZE) { - chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); - state[12] += chacha_advance(bytes, 4); - return; - } - if (bytes) { - chacha_block_xor_ssse3(state, dst, src, bytes, nrounds); - state[12]++; - } -} - -void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -{ - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { - hchacha_block_generic(state, stream, nrounds); - } else { - kernel_fpu_begin(); - hchacha_block_ssse3(state, stream, nrounds); - kernel_fpu_end(); - } -} -EXPORT_SYMBOL(hchacha_block_arch); - -void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, - int nrounds) -{ - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() || - bytes <= CHACHA_BLOCK_SIZE) - return chacha_crypt_generic(state, dst, src, bytes, nrounds); - - do { - unsigned int todo = min_t(unsigned int, bytes, SZ_4K); - - kernel_fpu_begin(); - chacha_dosimd(state, dst, src, todo, nrounds); - kernel_fpu_end(); - - bytes -= todo; - src += todo; - dst += todo; - } while (bytes); -} -EXPORT_SYMBOL(chacha_crypt_arch); - -static int chacha_simd_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - u32 state[CHACHA_STATE_WORDS] __aligned(8); - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - chacha_init(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - if (!static_branch_likely(&chacha_use_simd) || - !crypto_simd_usable()) { - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); - } else { - kernel_fpu_begin(); - chacha_dosimd(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); - kernel_fpu_end(); - } - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static int chacha_simd(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_simd_stream_xor(req, ctx, req->iv); -} - -static int xchacha_simd(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 state[CHACHA_STATE_WORDS] __aligned(8); - struct chacha_ctx subctx; - u8 real_iv[16]; - - chacha_init(state, ctx->key, req->iv); - - if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { - kernel_fpu_begin(); - hchacha_block_ssse3(state, subctx.key, ctx->nrounds); - kernel_fpu_end(); - } else { - hchacha_block_generic(state, subctx.key, ctx->nrounds); - } - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); - return chacha_simd_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-simd", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_simd, - .decrypt = chacha_simd, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-simd", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_simd, - .decrypt = xchacha_simd, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-simd", - .base.cra_priority = 300, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_simd, - .decrypt = xchacha_simd, - }, -}; - -static int __init chacha_simd_mod_init(void) -{ - if (!boot_cpu_has(X86_FEATURE_SSSE3)) - return 0; - - static_branch_enable(&chacha_use_simd); - - if (boot_cpu_has(X86_FEATURE_AVX) && - boot_cpu_has(X86_FEATURE_AVX2) && - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { - static_branch_enable(&chacha_use_avx2); - - if (IS_ENABLED(CONFIG_AS_AVX512) && - boot_cpu_has(X86_FEATURE_AVX512VL) && - boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */ - static_branch_enable(&chacha_use_avx512vl); - } - return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ? - crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; -} - -static void __exit chacha_simd_mod_fini(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3)) - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -module_init(chacha_simd_mod_init); -module_exit(chacha_simd_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-simd"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-simd"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-simd"); diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index 99cb983ded9e..c4fbaa82ed7a 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -103,8 +103,8 @@ SYM_FUNC_START(clmul_ghash_mul) SYM_FUNC_END(clmul_ghash_mul) /* - * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, - * const le128 *shash); + * int clmul_ghash_update(char *dst, const char *src, unsigned int srclen, + * const le128 *shash); */ SYM_FUNC_START(clmul_ghash_update) FRAME_BEGIN @@ -127,6 +127,7 @@ SYM_FUNC_START(clmul_ghash_update) pshufb BSWAP, DATA movups DATA, (%rdi) .Lupdate_just_ret: + mov %rdx, %rax FRAME_END RET SYM_FUNC_END(clmul_ghash_update) diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index c759ec808bf1..aea5d4d06be7 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -7,41 +7,27 @@ * Author: Huang Ying <ying.huang@intel.com> */ -#include <linux/err.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> -#include <crypto/cryptd.h> -#include <crypto/gf128mul.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <asm/cpu_device_id.h> #include <asm/simd.h> +#include <crypto/b128ops.h> +#include <crypto/ghash.h> +#include <crypto/internal/hash.h> +#include <crypto/utils.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> #include <linux/unaligned.h> -#define GHASH_BLOCK_SIZE 16 -#define GHASH_DIGEST_SIZE 16 +asmlinkage void clmul_ghash_mul(char *dst, const le128 *shash); -void clmul_ghash_mul(char *dst, const le128 *shash); +asmlinkage int clmul_ghash_update(char *dst, const char *src, + unsigned int srclen, const le128 *shash); -void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, - const le128 *shash); - -struct ghash_async_ctx { - struct cryptd_ahash *cryptd_tfm; -}; - -struct ghash_ctx { +struct x86_ghash_ctx { le128 shash; }; -struct ghash_desc_ctx { - u8 buffer[GHASH_BLOCK_SIZE]; - u32 bytes; -}; - static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); @@ -54,7 +40,7 @@ static int ghash_init(struct shash_desc *desc) static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { - struct ghash_ctx *ctx = crypto_shash_ctx(tfm); + struct x86_ghash_ctx *ctx = crypto_shash_ctx(tfm); u64 a, b; if (keylen != GHASH_BLOCK_SIZE) @@ -95,64 +81,38 @@ static int ghash_setkey(struct crypto_shash *tfm, static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { + struct x86_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; + int remain; kernel_fpu_begin(); - if (dctx->bytes) { - int n = min(srclen, dctx->bytes); - u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - dctx->bytes -= n; - srclen -= n; - - while (n--) - *pos++ ^= *src++; - - if (!dctx->bytes) - clmul_ghash_mul(dst, &ctx->shash); - } - - clmul_ghash_update(dst, src, srclen, &ctx->shash); + remain = clmul_ghash_update(dst, src, srclen, &ctx->shash); kernel_fpu_end(); - - if (srclen & 0xf) { - src += srclen - (srclen & 0xf); - srclen &= 0xf; - dctx->bytes = GHASH_BLOCK_SIZE - srclen; - while (srclen--) - *dst++ ^= *src++; - } - - return 0; + return remain; } -static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) +static void ghash_flush(struct x86_ghash_ctx *ctx, struct ghash_desc_ctx *dctx, + const u8 *src, unsigned int len) { u8 *dst = dctx->buffer; - if (dctx->bytes) { - u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - while (dctx->bytes--) - *tmp++ ^= 0; - - kernel_fpu_begin(); + kernel_fpu_begin(); + if (len) { + crypto_xor(dst, src, len); clmul_ghash_mul(dst, &ctx->shash); - kernel_fpu_end(); } - - dctx->bytes = 0; + kernel_fpu_end(); } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { + struct x86_ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; - ghash_flush(ctx, dctx); + ghash_flush(ctx, dctx, src, len); memcpy(dst, buf, GHASH_BLOCK_SIZE); return 0; @@ -162,186 +122,20 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, - .final = ghash_final, + .finup = ghash_finup, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { - .cra_name = "__ghash", - .cra_driver_name = "__ghash-pclmulqdqni", - .cra_priority = 0, - .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_name = "ghash", + .cra_driver_name = "ghash-pclmulqdqni", + .cra_priority = 400, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = GHASH_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ghash_ctx), + .cra_ctxsize = sizeof(struct x86_ghash_ctx), .cra_module = THIS_MODULE, }, }; -static int ghash_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); - - desc->tfm = child; - return crypto_shash_init(desc); -} - -static void ghash_init_cryptd_req(struct ahash_request *req) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); - ahash_request_set_callback(cryptd_req, req->base.flags, - req->base.complete, req->base.data); - ahash_request_set_crypt(cryptd_req, req->src, req->result, - req->nbytes); -} - -static int ghash_async_update(struct ahash_request *req) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - ghash_init_cryptd_req(req); - return crypto_ahash_update(cryptd_req); - } else { - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - return shash_ahash_update(req, desc); - } -} - -static int ghash_async_final(struct ahash_request *req) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - ghash_init_cryptd_req(req); - return crypto_ahash_final(cryptd_req); - } else { - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - return crypto_shash_final(desc, req->result); - } -} - -static int ghash_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - ghash_async_init(req); - memcpy(dctx, in, sizeof(*dctx)); - return 0; - -} - -static int ghash_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - memcpy(out, dctx, sizeof(*dctx)); - return 0; - -} - -static int ghash_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - ghash_init_cryptd_req(req); - return crypto_ahash_digest(cryptd_req); - } else { - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); - - desc->tfm = child; - return shash_ahash_digest(req, desc); - } -} - -static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct crypto_ahash *child = &ctx->cryptd_tfm->base; - - crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) - & CRYPTO_TFM_REQ_MASK); - return crypto_ahash_setkey(child, key, keylen); -} - -static int ghash_async_init_tfm(struct crypto_tfm *tfm) -{ - struct cryptd_ahash *cryptd_tfm; - struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); - - cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - ctx->cryptd_tfm = cryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&cryptd_tfm->base)); - - return 0; -} - -static void ghash_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); - - cryptd_free_ahash(ctx->cryptd_tfm); -} - -static struct ahash_alg ghash_async_alg = { - .init = ghash_async_init, - .update = ghash_async_update, - .final = ghash_async_final, - .setkey = ghash_async_setkey, - .digest = ghash_async_digest, - .export = ghash_async_export, - .import = ghash_async_import, - .halg = { - .digestsize = GHASH_DIGEST_SIZE, - .statesize = sizeof(struct ghash_desc_ctx), - .base = { - .cra_name = "ghash", - .cra_driver_name = "ghash-clmulni", - .cra_priority = 400, - .cra_ctxsize = sizeof(struct ghash_async_ctx), - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = GHASH_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_init = ghash_async_init_tfm, - .cra_exit = ghash_async_exit_tfm, - }, - }, -}; - static const struct x86_cpu_id pcmul_cpu_id[] = { X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), /* Pickle-Mickle-Duck */ {} @@ -350,29 +144,14 @@ MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id); static int __init ghash_pclmulqdqni_mod_init(void) { - int err; - if (!x86_match_cpu(pcmul_cpu_id)) return -ENODEV; - err = crypto_register_shash(&ghash_alg); - if (err) - goto err_out; - err = crypto_register_ahash(&ghash_async_alg); - if (err) - goto err_shash; - - return 0; - -err_shash: - crypto_unregister_shash(&ghash_alg); -err_out: - return err; + return crypto_register_shash(&ghash_alg); } static void __exit ghash_pclmulqdqni_mod_exit(void) { - crypto_unregister_ahash(&ghash_async_alg); crypto_unregister_shash(&ghash_alg); } diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c deleted file mode 100644 index 08ff4b489f7e..000000000000 --- a/arch/x86/crypto/poly1305_glue.c +++ /dev/null @@ -1,290 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <crypto/internal/simd.h> -#include <linux/crypto.h> -#include <linux/jump_label.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sizes.h> -#include <asm/cpu_device_id.h> -#include <asm/simd.h> - -asmlinkage void poly1305_init_x86_64(void *ctx, - const u8 key[POLY1305_BLOCK_SIZE]); -asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, - const size_t len, const u32 padbit); -asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], - const u32 nonce[4]); -asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], - const u32 nonce[4]); -asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len, - const u32 padbit); -asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len, - const u32 padbit); -asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp, - const size_t len, const u32 padbit); - -static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx); -static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2); -static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512); - -struct poly1305_arch_internal { - union { - struct { - u32 h[5]; - u32 is_base2_26; - }; - u64 hs[3]; - }; - u64 r[2]; - u64 pad; - struct { u32 r2, r1, r4, r3; } rn[9]; -}; - -/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit - * the unfortunate situation of using AVX and then having to go back to scalar - * -- because the user is silly and has called the update function from two - * separate contexts -- then we need to convert back to the original base before - * proceeding. It is possible to reason that the initial reduction below is - * sufficient given the implementation invariants. However, for an avoidance of - * doubt and because this is not performance critical, we do the full reduction - * anyway. Z3 proof of below function: https://xn--4db.cc/ltPtHCKN/py - */ -static void convert_to_base2_64(void *ctx) -{ - struct poly1305_arch_internal *state = ctx; - u32 cy; - - if (!state->is_base2_26) - return; - - cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy; - cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy; - cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy; - cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy; - state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0]; - state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12); - state->hs[2] = state->h[4] >> 24; -#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1)) - cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL); - state->hs[2] &= 3; - state->hs[0] += cy; - state->hs[1] += (cy = ULT(state->hs[0], cy)); - state->hs[2] += ULT(state->hs[1], cy); -#undef ULT - state->is_base2_26 = 0; -} - -static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE]) -{ - poly1305_init_x86_64(ctx, key); -} - -static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, - const u32 padbit) -{ - struct poly1305_arch_internal *state = ctx; - - /* SIMD disables preemption, so relax after processing each page. */ - BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE || - SZ_4K % POLY1305_BLOCK_SIZE); - - if (!static_branch_likely(&poly1305_use_avx) || - (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || - !crypto_simd_usable()) { - convert_to_base2_64(ctx); - poly1305_blocks_x86_64(ctx, inp, len, padbit); - return; - } - - do { - const size_t bytes = min_t(size_t, len, SZ_4K); - - kernel_fpu_begin(); - if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512)) - poly1305_blocks_avx512(ctx, inp, bytes, padbit); - else if (static_branch_likely(&poly1305_use_avx2)) - poly1305_blocks_avx2(ctx, inp, bytes, padbit); - else - poly1305_blocks_avx(ctx, inp, bytes, padbit); - kernel_fpu_end(); - - len -= bytes; - inp += bytes; - } while (len); -} - -static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], - const u32 nonce[4]) -{ - if (!static_branch_likely(&poly1305_use_avx)) - poly1305_emit_x86_64(ctx, mac, nonce); - else - poly1305_emit_avx(ctx, mac, nonce); -} - -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) -{ - poly1305_simd_init(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(&key[16]); - dctx->s[1] = get_unaligned_le32(&key[20]); - dctx->s[2] = get_unaligned_le32(&key[24]); - dctx->s[3] = get_unaligned_le32(&key[28]); - dctx->buflen = 0; - dctx->sset = true; -} -EXPORT_SYMBOL(poly1305_init_arch); - -static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx, - const u8 *inp, unsigned int len) -{ - unsigned int acc = 0; - if (unlikely(!dctx->sset)) { - if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) { - poly1305_simd_init(&dctx->h, inp); - inp += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - acc += POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (len >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(&inp[0]); - dctx->s[1] = get_unaligned_le32(&inp[4]); - dctx->s[2] = get_unaligned_le32(&inp[8]); - dctx->s[3] = get_unaligned_le32(&inp[12]); - acc += POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } - return acc; -} - -void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) -{ - unsigned int bytes, used; - - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - srclen -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, POLY1305_BLOCK_SIZE))) - poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { - bytes = round_down(srclen, POLY1305_BLOCK_SIZE); - srclen -= bytes; - used = crypto_poly1305_setdctxkey(dctx, src, bytes); - if (likely(bytes - used)) - poly1305_simd_blocks(&dctx->h, src + used, bytes - used, 1); - src += bytes; - } - - if (unlikely(srclen)) { - dctx->buflen = srclen; - memcpy(dctx->buf, src, srclen); - } -} -EXPORT_SYMBOL(poly1305_update_arch); - -void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -{ - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); - poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - - poly1305_simd_emit(&dctx->h, dst, dctx->s); - memzero_explicit(dctx, sizeof(*dctx)); -} -EXPORT_SYMBOL(poly1305_final_arch); - -static int crypto_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - *dctx = (struct poly1305_desc_ctx){}; - return 0; -} - -static int crypto_poly1305_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - poly1305_update_arch(dctx, src, srclen); - return 0; -} - -static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_arch(dctx, dst); - return 0; -} - -static struct shash_alg alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_init, - .update = crypto_poly1305_update, - .final = crypto_poly1305_final, - .descsize = sizeof(struct poly1305_desc_ctx), - .base = { - .cra_name = "poly1305", - .cra_driver_name = "poly1305-simd", - .cra_priority = 300, - .cra_blocksize = POLY1305_BLOCK_SIZE, - .cra_module = THIS_MODULE, - }, -}; - -static int __init poly1305_simd_mod_init(void) -{ - if (boot_cpu_has(X86_FEATURE_AVX) && - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) - static_branch_enable(&poly1305_use_avx); - if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) - static_branch_enable(&poly1305_use_avx2); - if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) && - boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) && - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) && - /* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */ - boot_cpu_data.x86_vfm != INTEL_SKYLAKE_X) - static_branch_enable(&poly1305_use_avx512); - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0; -} - -static void __exit poly1305_simd_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) - crypto_unregister_shash(&alg); -} - -module_init(poly1305_simd_mod_init); -module_exit(poly1305_simd_mod_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); -MODULE_DESCRIPTION("Poly1305 authenticator"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-simd"); diff --git a/arch/x86/crypto/polyval-clmulni_glue.c b/arch/x86/crypto/polyval-clmulni_glue.c index 8fa58b0f3cb3..6b466867f91a 100644 --- a/arch/x86/crypto/polyval-clmulni_glue.c +++ b/arch/x86/crypto/polyval-clmulni_glue.c @@ -16,16 +16,15 @@ * operations. */ -#include <crypto/algapi.h> +#include <asm/cpu_device_id.h> +#include <asm/fpu/api.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> #include <crypto/polyval.h> -#include <linux/crypto.h> -#include <linux/init.h> +#include <crypto/utils.h> +#include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> -#include <asm/cpu_device_id.h> -#include <asm/simd.h> +#include <linux/string.h> #define POLYVAL_ALIGN 16 #define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN) @@ -42,7 +41,6 @@ struct polyval_tfm_ctx { struct polyval_desc_ctx { u8 buffer[POLYVAL_BLOCK_SIZE]; - u32 bytes; }; asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys, @@ -57,25 +55,16 @@ static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm) static void internal_polyval_update(const struct polyval_tfm_ctx *keys, const u8 *in, size_t nblocks, u8 *accumulator) { - if (likely(crypto_simd_usable())) { - kernel_fpu_begin(); - clmul_polyval_update(keys, in, nblocks, accumulator); - kernel_fpu_end(); - } else { - polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in, - nblocks, accumulator); - } + kernel_fpu_begin(); + clmul_polyval_update(keys, in, nblocks, accumulator); + kernel_fpu_end(); } static void internal_polyval_mul(u8 *op1, const u8 *op2) { - if (likely(crypto_simd_usable())) { - kernel_fpu_begin(); - clmul_polyval_mul(op1, op2); - kernel_fpu_end(); - } else { - polyval_mul_non4k(op1, op2); - } + kernel_fpu_begin(); + clmul_polyval_mul(op1, op2); + kernel_fpu_end(); } static int polyval_x86_setkey(struct crypto_shash *tfm, @@ -112,49 +101,27 @@ static int polyval_x86_update(struct shash_desc *desc, { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); - u8 *pos; unsigned int nblocks; - unsigned int n; - - if (dctx->bytes) { - n = min(srclen, dctx->bytes); - pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes; - - dctx->bytes -= n; - srclen -= n; - - while (n--) - *pos++ ^= *src++; - if (!dctx->bytes) - internal_polyval_mul(dctx->buffer, - tctx->key_powers[NUM_KEY_POWERS-1]); - } - - while (srclen >= POLYVAL_BLOCK_SIZE) { + do { /* Allow rescheduling every 4K bytes. */ nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; internal_polyval_update(tctx, src, nblocks, dctx->buffer); srclen -= nblocks * POLYVAL_BLOCK_SIZE; src += nblocks * POLYVAL_BLOCK_SIZE; - } + } while (srclen >= POLYVAL_BLOCK_SIZE); - if (srclen) { - dctx->bytes = POLYVAL_BLOCK_SIZE - srclen; - pos = dctx->buffer; - while (srclen--) - *pos++ ^= *src++; - } - - return 0; + return srclen; } -static int polyval_x86_final(struct shash_desc *desc, u8 *dst) +static int polyval_x86_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); - if (dctx->bytes) { + if (len) { + crypto_xor(dctx->buffer, src, len); internal_polyval_mul(dctx->buffer, tctx->key_powers[NUM_KEY_POWERS-1]); } @@ -168,13 +135,14 @@ static struct shash_alg polyval_alg = { .digestsize = POLYVAL_DIGEST_SIZE, .init = polyval_x86_init, .update = polyval_x86_update, - .final = polyval_x86_final, + .finup = polyval_x86_finup, .setkey = polyval_x86_setkey, .descsize = sizeof(struct polyval_desc_ctx), .base = { .cra_name = "polyval", .cra_driver_name = "polyval-clmulni", .cra_priority = 200, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = POLYVAL_BLOCK_SIZE, .cra_ctxsize = POLYVAL_CTX_SIZE, .cra_module = THIS_MODULE, diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 347e97f4b713..f5f2121b7956 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c @@ -10,7 +10,6 @@ #include <linux/crypto.h> #include <linux/err.h> #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <crypto/serpent.h> #include "serpent-avx.h" @@ -65,10 +64,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg serpent_algs[] = { { - .base.cra_name = "__ecb(serpent)", - .base.cra_driver_name = "__ecb-serpent-avx2", + .base.cra_name = "ecb(serpent)", + .base.cra_driver_name = "ecb-serpent-avx2", .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, @@ -78,10 +76,9 @@ static struct skcipher_alg serpent_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(serpent)", - .base.cra_driver_name = "__cbc-serpent-avx2", + .base.cra_name = "cbc(serpent)", + .base.cra_driver_name = "cbc-serpent-avx2", .base.cra_priority = 600, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, @@ -94,8 +91,6 @@ static struct skcipher_alg serpent_algs[] = { }, }; -static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)]; - static int __init serpent_avx2_init(void) { const char *feature_name; @@ -110,15 +105,13 @@ static int __init serpent_avx2_init(void) return -ENODEV; } - return simd_register_skciphers_compat(serpent_algs, - ARRAY_SIZE(serpent_algs), - serpent_simd_algs); + return crypto_register_skciphers(serpent_algs, + ARRAY_SIZE(serpent_algs)); } static void __exit serpent_avx2_fini(void) { - simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs), - serpent_simd_algs); + crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs)); } module_init(serpent_avx2_init); diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 6c248e1ea4ef..e640abc1cb8a 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c @@ -13,7 +13,6 @@ #include <linux/crypto.h> #include <linux/err.h> #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <crypto/serpent.h> #include "serpent-avx.h" @@ -71,10 +70,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg serpent_algs[] = { { - .base.cra_name = "__ecb(serpent)", - .base.cra_driver_name = "__ecb-serpent-avx", + .base.cra_name = "ecb(serpent)", + .base.cra_driver_name = "ecb-serpent-avx", .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, @@ -84,10 +82,9 @@ static struct skcipher_alg serpent_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(serpent)", - .base.cra_driver_name = "__cbc-serpent-avx", + .base.cra_name = "cbc(serpent)", + .base.cra_driver_name = "cbc-serpent-avx", .base.cra_priority = 500, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, @@ -100,8 +97,6 @@ static struct skcipher_alg serpent_algs[] = { }, }; -static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)]; - static int __init serpent_init(void) { const char *feature_name; @@ -112,15 +107,13 @@ static int __init serpent_init(void) return -ENODEV; } - return simd_register_skciphers_compat(serpent_algs, - ARRAY_SIZE(serpent_algs), - serpent_simd_algs); + return crypto_register_skciphers(serpent_algs, + ARRAY_SIZE(serpent_algs)); } static void __exit serpent_exit(void) { - simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs), - serpent_simd_algs); + crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs)); } module_init(serpent_init); diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index d78f37e9b2cf..80ee17ec21b4 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c @@ -18,7 +18,6 @@ #include <linux/err.h> #include <crypto/algapi.h> #include <crypto/b128ops.h> -#include <crypto/internal/simd.h> #include <crypto/serpent.h> #include "serpent-sse2.h" @@ -74,10 +73,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg serpent_algs[] = { { - .base.cra_name = "__ecb(serpent)", - .base.cra_driver_name = "__ecb-serpent-sse2", + .base.cra_name = "ecb(serpent)", + .base.cra_driver_name = "ecb-serpent-sse2", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, @@ -87,10 +85,9 @@ static struct skcipher_alg serpent_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(serpent)", - .base.cra_driver_name = "__cbc-serpent-sse2", + .base.cra_name = "cbc(serpent)", + .base.cra_driver_name = "cbc-serpent-sse2", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = SERPENT_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct serpent_ctx), .base.cra_module = THIS_MODULE, @@ -103,8 +100,6 @@ static struct skcipher_alg serpent_algs[] = { }, }; -static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)]; - static int __init serpent_sse2_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2)) { @@ -112,15 +107,13 @@ static int __init serpent_sse2_init(void) return -ENODEV; } - return simd_register_skciphers_compat(serpent_algs, - ARRAY_SIZE(serpent_algs), - serpent_simd_algs); + return crypto_register_skciphers(serpent_algs, + ARRAY_SIZE(serpent_algs)); } static void __exit serpent_sse2_exit(void) { - simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs), - serpent_simd_algs); + crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs)); } module_init(serpent_sse2_init); diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index ab8bc54f254d..0a912bfc86c5 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -16,21 +16,17 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/cpu_device_id.h> +#include <asm/simd.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/cpu_device_id.h> -#include <asm/simd.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> static const struct x86_cpu_id module_cpu_ids[] = { -#ifdef CONFIG_AS_SHA1_NI X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL), -#endif X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), @@ -38,14 +34,10 @@ static const struct x86_cpu_id module_cpu_ids[] = { }; MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); -static int sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len, sha1_block_fn *sha1_xform) +static inline int sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len, sha1_block_fn *sha1_xform) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) - return crypto_sha1_update(desc, data, len); + int remain; /* * Make sure struct sha1_state begins directly with the SHA1 @@ -54,22 +46,18 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); kernel_fpu_begin(); - sha1_base_do_update(desc, data, len, sha1_xform); + remain = sha1_base_do_update_blocks(desc, data, len, sha1_xform); kernel_fpu_end(); - return 0; + return remain; } -static int sha1_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out, sha1_block_fn *sha1_xform) +static inline int sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out, + sha1_block_fn *sha1_xform) { - if (!crypto_simd_usable()) - return crypto_sha1_finup(desc, data, len, out); - kernel_fpu_begin(); - if (len) - sha1_base_do_update(desc, data, len, sha1_xform); - sha1_base_do_finalize(desc, sha1_xform); + sha1_base_do_finup(desc, data, len, sha1_xform); kernel_fpu_end(); return sha1_base_finish(desc, out); @@ -90,23 +78,17 @@ static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data, return sha1_finup(desc, data, len, out, sha1_transform_ssse3); } -/* Add padding and return the message digest. */ -static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) -{ - return sha1_ssse3_finup(desc, NULL, 0, out); -} - static struct shash_alg sha1_ssse3_alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_ssse3_update, - .final = sha1_ssse3_final, .finup = sha1_ssse3_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-ssse3", .cra_priority = 150, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -140,22 +122,17 @@ static int sha1_avx_finup(struct shash_desc *desc, const u8 *data, return sha1_finup(desc, data, len, out, sha1_transform_avx); } -static int sha1_avx_final(struct shash_desc *desc, u8 *out) -{ - return sha1_avx_finup(desc, NULL, 0, out); -} - static struct shash_alg sha1_avx_alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_avx_update, - .final = sha1_avx_final, .finup = sha1_avx_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-avx", .cra_priority = 160, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -200,8 +177,8 @@ static bool avx2_usable(void) return false; } -static void sha1_apply_transform_avx2(struct sha1_state *state, - const u8 *data, int blocks) +static inline void sha1_apply_transform_avx2(struct sha1_state *state, + const u8 *data, int blocks) { /* Select the optimal transform based on data block size */ if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE) @@ -222,22 +199,17 @@ static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data, return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2); } -static int sha1_avx2_final(struct shash_desc *desc, u8 *out) -{ - return sha1_avx2_finup(desc, NULL, 0, out); -} - static struct shash_alg sha1_avx2_alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_avx2_update, - .final = sha1_avx2_final, .finup = sha1_avx2_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-avx2", .cra_priority = 170, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -256,7 +228,6 @@ static void unregister_sha1_avx2(void) crypto_unregister_shash(&sha1_avx2_alg); } -#ifdef CONFIG_AS_SHA1_NI asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data, int rounds); @@ -272,22 +243,17 @@ static int sha1_ni_finup(struct shash_desc *desc, const u8 *data, return sha1_finup(desc, data, len, out, sha1_ni_transform); } -static int sha1_ni_final(struct shash_desc *desc, u8 *out) -{ - return sha1_ni_finup(desc, NULL, 0, out); -} - static struct shash_alg sha1_ni_alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_ni_update, - .final = sha1_ni_final, .finup = sha1_ni_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-ni", .cra_priority = 250, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -306,11 +272,6 @@ static void unregister_sha1_ni(void) crypto_unregister_shash(&sha1_ni_alg); } -#else -static inline int register_sha1_ni(void) { return 0; } -static inline void unregister_sha1_ni(void) { } -#endif - static int __init sha1_ssse3_mod_init(void) { if (!x86_match_cpu(module_cpu_ids)) @@ -360,6 +321,4 @@ MODULE_ALIAS_CRYPTO("sha1"); MODULE_ALIAS_CRYPTO("sha1-ssse3"); MODULE_ALIAS_CRYPTO("sha1-avx"); MODULE_ALIAS_CRYPTO("sha1-avx2"); -#ifdef CONFIG_AS_SHA1_NI MODULE_ALIAS_CRYPTO("sha1-ni"); -#endif diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c deleted file mode 100644 index e04a43d9f7d5..000000000000 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ /dev/null @@ -1,467 +0,0 @@ -/* - * Cryptographic API. - * - * Glue code for the SHA256 Secure Hash Algorithm assembler implementations - * using SSSE3, AVX, AVX2, and SHA-NI instructions. - * - * This file is based on sha256_generic.c - * - * Copyright (C) 2013 Intel Corporation. - * - * Author: - * Tim Chen <tim.c.chen@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <linux/string.h> -#include <asm/cpu_device_id.h> -#include <asm/simd.h> - -asmlinkage void sha256_transform_ssse3(struct sha256_state *state, - const u8 *data, int blocks); - -static const struct x86_cpu_id module_cpu_ids[] = { -#ifdef CONFIG_AS_SHA256_NI - X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL), -#endif - X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), - X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), - X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); - -static int _sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len, sha256_block_fn *sha256_xform) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) - return crypto_sha256_update(desc, data, len); - - /* - * Make sure struct sha256_state begins directly with the SHA256 - * 256-bit internal state, as this is what the asm functions expect. - */ - BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); - - kernel_fpu_begin(); - sha256_base_do_update(desc, data, len, sha256_xform); - kernel_fpu_end(); - - return 0; -} - -static int sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out, sha256_block_fn *sha256_xform) -{ - if (!crypto_simd_usable()) - return crypto_sha256_finup(desc, data, len, out); - - kernel_fpu_begin(); - if (len) - sha256_base_do_update(desc, data, len, sha256_xform); - sha256_base_do_finalize(desc, sha256_xform); - kernel_fpu_end(); - - return sha256_base_finish(desc, out); -} - -static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return _sha256_update(desc, data, len, sha256_transform_ssse3); -} - -static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_finup(desc, data, len, out, sha256_transform_ssse3); -} - -/* Add padding and return the message digest. */ -static int sha256_ssse3_final(struct shash_desc *desc, u8 *out) -{ - return sha256_ssse3_finup(desc, NULL, 0, out); -} - -static int sha256_ssse3_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_base_init(desc) ?: - sha256_ssse3_finup(desc, data, len, out); -} - -static struct shash_alg sha256_ssse3_algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = sha256_ssse3_update, - .final = sha256_ssse3_final, - .finup = sha256_ssse3_finup, - .digest = sha256_ssse3_digest, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-ssse3", - .cra_priority = 150, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = sha256_ssse3_update, - .final = sha256_ssse3_final, - .finup = sha256_ssse3_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-ssse3", - .cra_priority = 150, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int register_sha256_ssse3(void) -{ - if (boot_cpu_has(X86_FEATURE_SSSE3)) - return crypto_register_shashes(sha256_ssse3_algs, - ARRAY_SIZE(sha256_ssse3_algs)); - return 0; -} - -static void unregister_sha256_ssse3(void) -{ - if (boot_cpu_has(X86_FEATURE_SSSE3)) - crypto_unregister_shashes(sha256_ssse3_algs, - ARRAY_SIZE(sha256_ssse3_algs)); -} - -asmlinkage void sha256_transform_avx(struct sha256_state *state, - const u8 *data, int blocks); - -static int sha256_avx_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return _sha256_update(desc, data, len, sha256_transform_avx); -} - -static int sha256_avx_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_finup(desc, data, len, out, sha256_transform_avx); -} - -static int sha256_avx_final(struct shash_desc *desc, u8 *out) -{ - return sha256_avx_finup(desc, NULL, 0, out); -} - -static int sha256_avx_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_base_init(desc) ?: - sha256_avx_finup(desc, data, len, out); -} - -static struct shash_alg sha256_avx_algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = sha256_avx_update, - .final = sha256_avx_final, - .finup = sha256_avx_finup, - .digest = sha256_avx_digest, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-avx", - .cra_priority = 160, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = sha256_avx_update, - .final = sha256_avx_final, - .finup = sha256_avx_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-avx", - .cra_priority = 160, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static bool avx_usable(void) -{ - if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { - if (boot_cpu_has(X86_FEATURE_AVX)) - pr_info("AVX detected but unusable.\n"); - return false; - } - - return true; -} - -static int register_sha256_avx(void) -{ - if (avx_usable()) - return crypto_register_shashes(sha256_avx_algs, - ARRAY_SIZE(sha256_avx_algs)); - return 0; -} - -static void unregister_sha256_avx(void) -{ - if (avx_usable()) - crypto_unregister_shashes(sha256_avx_algs, - ARRAY_SIZE(sha256_avx_algs)); -} - -asmlinkage void sha256_transform_rorx(struct sha256_state *state, - const u8 *data, int blocks); - -static int sha256_avx2_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return _sha256_update(desc, data, len, sha256_transform_rorx); -} - -static int sha256_avx2_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_finup(desc, data, len, out, sha256_transform_rorx); -} - -static int sha256_avx2_final(struct shash_desc *desc, u8 *out) -{ - return sha256_avx2_finup(desc, NULL, 0, out); -} - -static int sha256_avx2_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_base_init(desc) ?: - sha256_avx2_finup(desc, data, len, out); -} - -static struct shash_alg sha256_avx2_algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = sha256_avx2_update, - .final = sha256_avx2_final, - .finup = sha256_avx2_finup, - .digest = sha256_avx2_digest, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-avx2", - .cra_priority = 170, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = sha256_avx2_update, - .final = sha256_avx2_final, - .finup = sha256_avx2_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-avx2", - .cra_priority = 170, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static bool avx2_usable(void) -{ - if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) && - boot_cpu_has(X86_FEATURE_BMI2)) - return true; - - return false; -} - -static int register_sha256_avx2(void) -{ - if (avx2_usable()) - return crypto_register_shashes(sha256_avx2_algs, - ARRAY_SIZE(sha256_avx2_algs)); - return 0; -} - -static void unregister_sha256_avx2(void) -{ - if (avx2_usable()) - crypto_unregister_shashes(sha256_avx2_algs, - ARRAY_SIZE(sha256_avx2_algs)); -} - -#ifdef CONFIG_AS_SHA256_NI -asmlinkage void sha256_ni_transform(struct sha256_state *digest, - const u8 *data, int rounds); - -static int sha256_ni_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - return _sha256_update(desc, data, len, sha256_ni_transform); -} - -static int sha256_ni_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_finup(desc, data, len, out, sha256_ni_transform); -} - -static int sha256_ni_final(struct shash_desc *desc, u8 *out) -{ - return sha256_ni_finup(desc, NULL, 0, out); -} - -static int sha256_ni_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return sha256_base_init(desc) ?: - sha256_ni_finup(desc, data, len, out); -} - -static struct shash_alg sha256_ni_algs[] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = sha256_ni_update, - .final = sha256_ni_final, - .finup = sha256_ni_finup, - .digest = sha256_ni_digest, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-ni", - .cra_priority = 250, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = sha256_ni_update, - .final = sha256_ni_final, - .finup = sha256_ni_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name = "sha224-ni", - .cra_priority = 250, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int register_sha256_ni(void) -{ - if (boot_cpu_has(X86_FEATURE_SHA_NI)) - return crypto_register_shashes(sha256_ni_algs, - ARRAY_SIZE(sha256_ni_algs)); - return 0; -} - -static void unregister_sha256_ni(void) -{ - if (boot_cpu_has(X86_FEATURE_SHA_NI)) - crypto_unregister_shashes(sha256_ni_algs, - ARRAY_SIZE(sha256_ni_algs)); -} - -#else -static inline int register_sha256_ni(void) { return 0; } -static inline void unregister_sha256_ni(void) { } -#endif - -static int __init sha256_ssse3_mod_init(void) -{ - if (!x86_match_cpu(module_cpu_ids)) - return -ENODEV; - - if (register_sha256_ssse3()) - goto fail; - - if (register_sha256_avx()) { - unregister_sha256_ssse3(); - goto fail; - } - - if (register_sha256_avx2()) { - unregister_sha256_avx(); - unregister_sha256_ssse3(); - goto fail; - } - - if (register_sha256_ni()) { - unregister_sha256_avx2(); - unregister_sha256_avx(); - unregister_sha256_ssse3(); - goto fail; - } - - return 0; -fail: - return -ENODEV; -} - -static void __exit sha256_ssse3_mod_fini(void) -{ - unregister_sha256_ni(); - unregister_sha256_avx2(); - unregister_sha256_avx(); - unregister_sha256_ssse3(); -} - -module_init(sha256_ssse3_mod_init); -module_exit(sha256_ssse3_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); - -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha256-ssse3"); -MODULE_ALIAS_CRYPTO("sha256-avx"); -MODULE_ALIAS_CRYPTO("sha256-avx2"); -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha224-ssse3"); -MODULE_ALIAS_CRYPTO("sha224-avx"); -MODULE_ALIAS_CRYPTO("sha224-avx2"); -#ifdef CONFIG_AS_SHA256_NI -MODULE_ALIAS_CRYPTO("sha256-ni"); -MODULE_ALIAS_CRYPTO("sha224-ni"); -#endif diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 6d3b85e53d0e..067684c54395 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -27,17 +27,13 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/cpu_device_id.h> +#include <asm/simd.h> #include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/mm.h> -#include <linux/string.h> -#include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <asm/cpu_device_id.h> -#include <asm/simd.h> asmlinkage void sha512_transform_ssse3(struct sha512_state *state, const u8 *data, int blocks); @@ -45,11 +41,7 @@ asmlinkage void sha512_transform_ssse3(struct sha512_state *state, static int sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha512_block_fn *sha512_xform) { - struct sha512_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) - return crypto_sha512_update(desc, data, len); + int remain; /* * Make sure struct sha512_state begins directly with the SHA512 @@ -58,22 +50,17 @@ static int sha512_update(struct shash_desc *desc, const u8 *data, BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0); kernel_fpu_begin(); - sha512_base_do_update(desc, data, len, sha512_xform); + remain = sha512_base_do_update_blocks(desc, data, len, sha512_xform); kernel_fpu_end(); - return 0; + return remain; } static int sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out, sha512_block_fn *sha512_xform) { - if (!crypto_simd_usable()) - return crypto_sha512_finup(desc, data, len, out); - kernel_fpu_begin(); - if (len) - sha512_base_do_update(desc, data, len, sha512_xform); - sha512_base_do_finalize(desc, sha512_xform); + sha512_base_do_finup(desc, data, len, sha512_xform); kernel_fpu_end(); return sha512_base_finish(desc, out); @@ -91,23 +78,18 @@ static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data, return sha512_finup(desc, data, len, out, sha512_transform_ssse3); } -/* Add padding and return the message digest. */ -static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) -{ - return sha512_ssse3_finup(desc, NULL, 0, out); -} - static struct shash_alg sha512_ssse3_algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_ssse3_update, - .final = sha512_ssse3_final, .finup = sha512_ssse3_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-ssse3", .cra_priority = 150, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -115,13 +97,14 @@ static struct shash_alg sha512_ssse3_algs[] = { { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_ssse3_update, - .final = sha512_ssse3_final, .finup = sha512_ssse3_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-ssse3", .cra_priority = 150, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -167,23 +150,18 @@ static int sha512_avx_finup(struct shash_desc *desc, const u8 *data, return sha512_finup(desc, data, len, out, sha512_transform_avx); } -/* Add padding and return the message digest. */ -static int sha512_avx_final(struct shash_desc *desc, u8 *out) -{ - return sha512_avx_finup(desc, NULL, 0, out); -} - static struct shash_alg sha512_avx_algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_avx_update, - .final = sha512_avx_final, .finup = sha512_avx_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-avx", .cra_priority = 160, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -191,13 +169,14 @@ static struct shash_alg sha512_avx_algs[] = { { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_avx_update, - .final = sha512_avx_final, .finup = sha512_avx_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-avx", .cra_priority = 160, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -233,23 +212,18 @@ static int sha512_avx2_finup(struct shash_desc *desc, const u8 *data, return sha512_finup(desc, data, len, out, sha512_transform_rorx); } -/* Add padding and return the message digest. */ -static int sha512_avx2_final(struct shash_desc *desc, u8 *out) -{ - return sha512_avx2_finup(desc, NULL, 0, out); -} - static struct shash_alg sha512_avx2_algs[] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_avx2_update, - .final = sha512_avx2_final, .finup = sha512_avx2_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-avx2", .cra_priority = 170, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -257,13 +231,14 @@ static struct shash_alg sha512_avx2_algs[] = { { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_avx2_update, - .final = sha512_avx2_final, .finup = sha512_avx2_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-avx2", .cra_priority = 170, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/x86/crypto/sm3_avx_glue.c b/arch/x86/crypto/sm3_avx_glue.c index 661b6f22ffcd..6e8c42b9dc8e 100644 --- a/arch/x86/crypto/sm3_avx_glue.c +++ b/arch/x86/crypto/sm3_avx_glue.c @@ -10,12 +10,11 @@ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/types.h> #include <crypto/sm3.h> #include <crypto/sm3_base.h> -#include <asm/simd.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> asmlinkage void sm3_transform_avx(struct sm3_state *state, const u8 *data, int nblocks); @@ -23,13 +22,7 @@ asmlinkage void sm3_transform_avx(struct sm3_state *state, static int sm3_avx_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sm3_state *sctx = shash_desc_ctx(desc); - - if (!crypto_simd_usable() || - (sctx->count % SM3_BLOCK_SIZE) + len < SM3_BLOCK_SIZE) { - sm3_update(sctx, data, len); - return 0; - } + int remain; /* * Make sure struct sm3_state begins directly with the SM3 @@ -38,45 +31,17 @@ static int sm3_avx_update(struct shash_desc *desc, const u8 *data, BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0); kernel_fpu_begin(); - sm3_base_do_update(desc, data, len, sm3_transform_avx); + remain = sm3_base_do_update_blocks(desc, data, len, sm3_transform_avx); kernel_fpu_end(); - - return 0; + return remain; } static int sm3_avx_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) { - struct sm3_state *sctx = shash_desc_ctx(desc); - - if (len) - sm3_update(sctx, data, len); - - sm3_final(sctx, out); - return 0; - } - kernel_fpu_begin(); - if (len) - sm3_base_do_update(desc, data, len, sm3_transform_avx); - sm3_base_do_finalize(desc, sm3_transform_avx); + sm3_base_do_finup(desc, data, len, sm3_transform_avx); kernel_fpu_end(); - - return sm3_base_finish(desc, out); -} - -static int sm3_avx_final(struct shash_desc *desc, u8 *out) -{ - if (!crypto_simd_usable()) { - sm3_final(shash_desc_ctx(desc), out); - return 0; - } - - kernel_fpu_begin(); - sm3_base_do_finalize(desc, sm3_transform_avx); - kernel_fpu_end(); - return sm3_base_finish(desc, out); } @@ -84,13 +49,14 @@ static struct shash_alg sm3_avx_alg = { .digestsize = SM3_DIGEST_SIZE, .init = sm3_base_init, .update = sm3_avx_update, - .final = sm3_avx_final, .finup = sm3_avx_finup, - .descsize = sizeof(struct sm3_state), + .descsize = SM3_STATE_SIZE, .base = { .cra_name = "sm3", .cra_driver_name = "sm3-avx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SM3_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c index 1148fd4cd57f..fec0ab7a63dd 100644 --- a/arch/x86/crypto/sm4_aesni_avx2_glue.c +++ b/arch/x86/crypto/sm4_aesni_avx2_glue.c @@ -8,11 +8,10 @@ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ +#include <asm/fpu/api.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/kernel.h> -#include <asm/simd.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/sm4.h> #include "sm4-avx.h" @@ -48,10 +47,9 @@ static int ctr_crypt(struct skcipher_request *req) static struct skcipher_alg sm4_aesni_avx2_skciphers[] = { { .base = { - .cra_name = "__ecb(sm4)", - .cra_driver_name = "__ecb-sm4-aesni-avx2", + .cra_name = "ecb(sm4)", + .cra_driver_name = "ecb-sm4-aesni-avx2", .cra_priority = 500, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, @@ -64,10 +62,9 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = { .decrypt = sm4_avx_ecb_decrypt, }, { .base = { - .cra_name = "__cbc(sm4)", - .cra_driver_name = "__cbc-sm4-aesni-avx2", + .cra_name = "cbc(sm4)", + .cra_driver_name = "cbc-sm4-aesni-avx2", .cra_priority = 500, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, @@ -81,10 +78,9 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = { .decrypt = cbc_decrypt, }, { .base = { - .cra_name = "__ctr(sm4)", - .cra_driver_name = "__ctr-sm4-aesni-avx2", + .cra_name = "ctr(sm4)", + .cra_driver_name = "ctr-sm4-aesni-avx2", .cra_priority = 500, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, @@ -100,9 +96,6 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = { } }; -static struct simd_skcipher_alg * -simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)]; - static int __init sm4_init(void) { const char *feature_name; @@ -121,16 +114,14 @@ static int __init sm4_init(void) return -ENODEV; } - return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers, - ARRAY_SIZE(sm4_aesni_avx2_skciphers), - simd_sm4_aesni_avx2_skciphers); + return crypto_register_skciphers(sm4_aesni_avx2_skciphers, + ARRAY_SIZE(sm4_aesni_avx2_skciphers)); } static void __exit sm4_exit(void) { - simd_unregister_skciphers(sm4_aesni_avx2_skciphers, - ARRAY_SIZE(sm4_aesni_avx2_skciphers), - simd_sm4_aesni_avx2_skciphers); + crypto_unregister_skciphers(sm4_aesni_avx2_skciphers, + ARRAY_SIZE(sm4_aesni_avx2_skciphers)); } module_init(sm4_init); diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c index 85b4ca78b47b..72867fc49ce8 100644 --- a/arch/x86/crypto/sm4_aesni_avx_glue.c +++ b/arch/x86/crypto/sm4_aesni_avx_glue.c @@ -8,11 +8,10 @@ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ +#include <asm/fpu/api.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/kernel.h> -#include <asm/simd.h> -#include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/sm4.h> #include "sm4-avx.h" @@ -263,10 +262,9 @@ static int ctr_crypt(struct skcipher_request *req) static struct skcipher_alg sm4_aesni_avx_skciphers[] = { { .base = { - .cra_name = "__ecb(sm4)", - .cra_driver_name = "__ecb-sm4-aesni-avx", + .cra_name = "ecb(sm4)", + .cra_driver_name = "ecb-sm4-aesni-avx", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, @@ -279,10 +277,9 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = { .decrypt = sm4_avx_ecb_decrypt, }, { .base = { - .cra_name = "__cbc(sm4)", - .cra_driver_name = "__cbc-sm4-aesni-avx", + .cra_name = "cbc(sm4)", + .cra_driver_name = "cbc-sm4-aesni-avx", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = SM4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, @@ -296,10 +293,9 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = { .decrypt = cbc_decrypt, }, { .base = { - .cra_name = "__ctr(sm4)", - .cra_driver_name = "__ctr-sm4-aesni-avx", + .cra_name = "ctr(sm4)", + .cra_driver_name = "ctr-sm4-aesni-avx", .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct sm4_ctx), .cra_module = THIS_MODULE, @@ -315,9 +311,6 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = { } }; -static struct simd_skcipher_alg * -simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)]; - static int __init sm4_init(void) { const char *feature_name; @@ -335,16 +328,14 @@ static int __init sm4_init(void) return -ENODEV; } - return simd_register_skciphers_compat(sm4_aesni_avx_skciphers, - ARRAY_SIZE(sm4_aesni_avx_skciphers), - simd_sm4_aesni_avx_skciphers); + return crypto_register_skciphers(sm4_aesni_avx_skciphers, + ARRAY_SIZE(sm4_aesni_avx_skciphers)); } static void __exit sm4_exit(void) { - simd_unregister_skciphers(sm4_aesni_avx_skciphers, - ARRAY_SIZE(sm4_aesni_avx_skciphers), - simd_sm4_aesni_avx_skciphers); + crypto_unregister_skciphers(sm4_aesni_avx_skciphers, + ARRAY_SIZE(sm4_aesni_avx_skciphers)); } module_init(sm4_init); diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index 3eb3440b477a..9e20db013750 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c @@ -13,7 +13,6 @@ #include <linux/crypto.h> #include <linux/err.h> #include <crypto/algapi.h> -#include <crypto/internal/simd.h> #include <crypto/twofish.h> #include "twofish.h" @@ -74,10 +73,9 @@ static int cbc_decrypt(struct skcipher_request *req) static struct skcipher_alg twofish_algs[] = { { - .base.cra_name = "__ecb(twofish)", - .base.cra_driver_name = "__ecb-twofish-avx", + .base.cra_name = "ecb(twofish)", + .base.cra_driver_name = "ecb-twofish-avx", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = TF_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct twofish_ctx), .base.cra_module = THIS_MODULE, @@ -87,10 +85,9 @@ static struct skcipher_alg twofish_algs[] = { .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { - .base.cra_name = "__cbc(twofish)", - .base.cra_driver_name = "__cbc-twofish-avx", + .base.cra_name = "cbc(twofish)", + .base.cra_driver_name = "cbc-twofish-avx", .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = TF_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct twofish_ctx), .base.cra_module = THIS_MODULE, @@ -103,8 +100,6 @@ static struct skcipher_alg twofish_algs[] = { }, }; -static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)]; - static int __init twofish_init(void) { const char *feature_name; @@ -114,15 +109,13 @@ static int __init twofish_init(void) return -ENODEV; } - return simd_register_skciphers_compat(twofish_algs, - ARRAY_SIZE(twofish_algs), - twofish_simd_algs); + return crypto_register_skciphers(twofish_algs, + ARRAY_SIZE(twofish_algs)); } static void __exit twofish_exit(void) { - simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs), - twofish_simd_algs); + crypto_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs)); } module_init(twofish_init); diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index adb299d3b6a1..afe105b2f907 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -65,7 +65,6 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, static void vdso_fix_landing(const struct vdso_image *image, struct vm_area_struct *new_vma) { -#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION if (in_ia32_syscall() && image == &vdso_image_32) { struct pt_regs *regs = current_pt_regs(); unsigned long vdso_land = image->sym_int80_landing_pad; @@ -76,7 +75,6 @@ static void vdso_fix_landing(const struct vdso_image *image, if (regs->ip == old_land_addr) regs->ip = new_vma->vm_start + vdso_land; } -#endif } static int vdso_mremap(const struct vm_special_mapping *sm, @@ -227,7 +225,6 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) return map_vdso(image, addr); } -#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static int load_vdso32(void) { if (vdso32_enabled != 1) /* Other values all mean "disabled" */ @@ -235,45 +232,38 @@ static int load_vdso32(void) return map_vdso(&vdso_image_32, 0); } -#endif -#ifdef CONFIG_X86_64 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - if (!vdso64_enabled) - return 0; + if (IS_ENABLED(CONFIG_X86_64)) { + if (!vdso64_enabled) + return 0; + + return map_vdso(&vdso_image_64, 0); + } - return map_vdso(&vdso_image_64, 0); + return load_vdso32(); } #ifdef CONFIG_COMPAT int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp, bool x32) { -#ifdef CONFIG_X86_X32_ABI - if (x32) { + if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) { if (!vdso64_enabled) return 0; return map_vdso(&vdso_image_x32, 0); } -#endif -#ifdef CONFIG_IA32_EMULATION - return load_vdso32(); -#else + + if (IS_ENABLED(CONFIG_IA32_EMULATION)) + return load_vdso32(); + return 0; -#endif -} -#endif -#else -int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) -{ - return load_vdso32(); } #endif bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) { -#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) const struct vdso_image *image = current->mm->context.vdso_image; unsigned long vdso = (unsigned long) current->mm->context.vdso; @@ -282,7 +272,6 @@ bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad) return true; } -#endif return false; } diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 2fb7d53cf333..c9103a6fa06e 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -341,9 +341,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root) pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); p4d = p4d_offset(pgd, VSYSCALL_ADDR); -#if CONFIG_PGTABLE_LEVELS >= 5 set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER)); -#endif pud = pud_offset(p4d, VSYSCALL_ADDR); set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); pmd = pmd_offset(pud, VSYSCALL_ADDR); diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index ec3427463382..06f35a6b58a5 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -44,12 +44,12 @@ static inline unsigned int brs_to(int idx) static __always_inline void set_debug_extn_cfg(u64 val) { /* bits[4:3] must always be set to 11b */ - __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32); + native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3); } static __always_inline u64 get_debug_extn_cfg(void) { - return __rdmsr(MSR_AMD_DBG_EXTN_CFG); + return native_rdmsrq(MSR_AMD_DBG_EXTN_CFG); } static bool __init amd_brs_detect(void) @@ -187,7 +187,7 @@ void amd_brs_reset(void) /* * Mark first entry as poisoned */ - wrmsrl(brs_to(0), BRS_POISON); + wrmsrq(brs_to(0), BRS_POISON); } int __init amd_brs_init(void) @@ -325,7 +325,7 @@ void amd_brs_drain(void) u32 brs_idx = tos - i; u64 from, to; - rdmsrl(brs_to(brs_idx), to); + rdmsrq(brs_to(brs_idx), to); /* Entry does not belong to us (as marked by kernel) */ if (to == BRS_POISON) @@ -341,7 +341,7 @@ void amd_brs_drain(void) if (!amd_brs_match_plm(event, to)) continue; - rdmsrl(brs_from(brs_idx), from); + rdmsrq(brs_from(brs_idx), from); perf_clear_branch_entry_bitfields(br+nr); @@ -371,7 +371,7 @@ static void amd_brs_poison_buffer(void) idx = amd_brs_get_tos(&cfg); /* Poison target of entry */ - wrmsrl(brs_to(idx), BRS_POISON); + wrmsrq(brs_to(idx), BRS_POISON); } /* diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 30d6ceb4c8ad..b20661b8621d 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -9,6 +9,7 @@ #include <linux/jiffies.h> #include <asm/apicdef.h> #include <asm/apic.h> +#include <asm/msr.h> #include <asm/nmi.h> #include "../perf_event.h" @@ -563,13 +564,13 @@ static void amd_pmu_cpu_reset(int cpu) return; /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */ - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); /* * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze * and PerfCntrGLobalStatus.PerfCntrOvfl */ - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask); } @@ -651,7 +652,7 @@ static void amd_pmu_cpu_dead(int cpu) static __always_inline void amd_pmu_set_global_ctl(u64 ctl) { - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); } static inline u64 amd_pmu_get_global_status(void) @@ -659,7 +660,7 @@ static inline u64 amd_pmu_get_global_status(void) u64 status; /* PerfCntrGlobalStatus is read-only */ - rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status); + rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status); return status; } @@ -672,14 +673,14 @@ static inline void amd_pmu_ack_global_status(u64 status) * clears the same bit in PerfCntrGlobalStatus */ - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status); + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status); } static bool amd_pmu_test_overflow_topbit(int idx) { u64 counter; - rdmsrl(x86_pmu_event_addr(idx), counter); + rdmsrq(x86_pmu_event_addr(idx), counter); return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1)); } @@ -1003,8 +1004,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); - if (perf_event_overflow(event, &data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } /* diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 0252b7ea8bca..112f43b23ebf 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -15,6 +15,7 @@ #include <linux/sched/clock.h> #include <asm/apic.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -26,7 +27,7 @@ static u32 ibs_caps; #include <linux/hardirq.h> #include <asm/nmi.h> -#include <asm/amd-ibs.h> +#include <asm/amd/ibs.h> /* attr.config2 */ #define IBS_SW_FILTER_MASK 1 @@ -424,7 +425,7 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, * prev count manually on overflow. */ while (!perf_event_try_update(event, count, 64)) { - rdmsrl(event->hw.config_base, *config); + rdmsrq(event->hw.config_base, *config); count = perf_ibs->get_count(*config); } } @@ -435,9 +436,9 @@ static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, u64 tmp = hwc->config | config; if (perf_ibs->fetch_count_reset_broken) - wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask); + wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask); - wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask); + wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask); } /* @@ -452,9 +453,9 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs, { config &= ~perf_ibs->cnt_mask; if (boot_cpu_data.x86 == 0x10) - wrmsrl(hwc->config_base, config); + wrmsrq(hwc->config_base, config); config &= ~perf_ibs->enable_mask; - wrmsrl(hwc->config_base, config); + wrmsrq(hwc->config_base, config); } /* @@ -513,7 +514,7 @@ static void perf_ibs_stop(struct perf_event *event, int flags) if (!stopping && (hwc->state & PERF_HES_UPTODATE)) return; - rdmsrl(hwc->config_base, config); + rdmsrq(hwc->config_base, config); if (stopping) { /* @@ -1256,7 +1257,7 @@ fail: hwc = &event->hw; msr = hwc->config_base; buf = ibs_data.regs; - rdmsrl(msr, *buf); + rdmsrq(msr, *buf); if (!(*buf++ & perf_ibs->valid_mask)) goto fail; @@ -1274,7 +1275,7 @@ fail: offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip); do { - rdmsrl(msr + offset, *buf++); + rdmsrq(msr + offset, *buf++); size++; offset = find_next_bit(perf_ibs->offset_mask, perf_ibs->offset_max, @@ -1304,17 +1305,17 @@ fail: if (event->attr.sample_type & PERF_SAMPLE_RAW) { if (perf_ibs == &perf_ibs_op) { if (ibs_caps & IBS_CAPS_BRNTRGT) { - rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); + rdmsrq(MSR_AMD64_IBSBRTARGET, *buf++); br_target_idx = size; size++; } if (ibs_caps & IBS_CAPS_OPDATA4) { - rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + rdmsrq(MSR_AMD64_IBSOPDATA4, *buf++); size++; } } if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { - rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); + rdmsrq(MSR_AMD64_ICIBSEXTDCTL, *buf++); size++; } } @@ -1373,9 +1374,7 @@ fail: hwc->sample_period = perf_ibs->min_period; out: - if (throttle) { - perf_ibs_stop(event, 0); - } else { + if (!throttle) { if (perf_ibs == &perf_ibs_op) { if (ibs_caps & IBS_CAPS_OPCNTEXT) { new_config = period & IBS_OP_MAX_CNT_EXT_MASK; @@ -1565,7 +1564,7 @@ static inline int ibs_eilvt_valid(void) preempt_disable(); - rdmsrl(MSR_AMD64_IBSCTL, val); + rdmsrq(MSR_AMD64_IBSCTL, val); offset = val & IBSCTL_LVT_OFFSET_MASK; if (!(val & IBSCTL_LVT_OFFSET_VALID)) { @@ -1680,7 +1679,7 @@ static inline int get_ibs_lvt_offset(void) { u64 val; - rdmsrl(MSR_AMD64_IBSCTL, val); + rdmsrq(MSR_AMD64_IBSCTL, val); if (!(val & IBSCTL_LVT_OFFSET_VALID)) return -EINVAL; diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index f8228d8243f7..a721da9987dd 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -16,6 +16,8 @@ #include <linux/slab.h> #include <linux/amd-iommu.h> +#include <asm/msr.h> + #include "../perf_event.h" #include "iommu.h" diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c index c06ccca96851..d24da377df77 100644 --- a/arch/x86/events/amd/lbr.c +++ b/arch/x86/events/amd/lbr.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/perf_event.h> +#include <asm/msr.h> #include <asm/perf_event.h> #include "../perf_event.h" @@ -61,19 +62,19 @@ struct branch_entry { static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val) { - wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val); + wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val); } static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val) { - wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); + wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); } static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx) { u64 val; - rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val); + rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val); return val; } @@ -82,7 +83,7 @@ static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx) { u64 val; - rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); + rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); return val; } @@ -333,7 +334,7 @@ void amd_pmu_lbr_reset(void) cpuc->last_task_ctx = NULL; cpuc->last_log_id = 0; - wrmsrl(MSR_AMD64_LBR_SELECT, 0); + wrmsrq(MSR_AMD64_LBR_SELECT, 0); } void amd_pmu_lbr_add(struct perf_event *event) @@ -396,16 +397,16 @@ void amd_pmu_lbr_enable_all(void) /* Set hardware branch filter */ if (cpuc->lbr_select) { lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK; - wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select); + wrmsrq(MSR_AMD64_LBR_SELECT, lbr_select); } if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl); + wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); } - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); - wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); + rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); + wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); } void amd_pmu_lbr_disable_all(void) diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index 37d5b380516e..dad42790cf7d 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <linux/perf_event.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "../perf_event.h" /* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */ @@ -48,8 +49,8 @@ static void event_update(struct perf_event *event) prev_pwr_acc = hwc->pwr_acc; prev_ptsc = hwc->ptsc; - rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc); - rdmsrl(MSR_F15H_PTSC, new_ptsc); + rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc); + rdmsrq(MSR_F15H_PTSC, new_ptsc); /* * Calculate the CU power consumption over a time period, the unit of @@ -75,8 +76,8 @@ static void __pmu_event_start(struct perf_event *event) event->hw.state = 0; - rdmsrl(MSR_F15H_PTSC, event->hw.ptsc); - rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); + rdmsrq(MSR_F15H_PTSC, event->hw.ptsc); + rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); } static void pmu_event_start(struct perf_event *event, int mode) @@ -272,7 +273,7 @@ static int __init amd_power_pmu_init(void) cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); - if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { + if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { pr_err("Failed to read max compute unit power accumulator MSR\n"); return -ENODEV; } diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 49c26ce2b115..e8b6af199c73 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -21,6 +21,7 @@ #define NUM_COUNTERS_NB 4 #define NUM_COUNTERS_L2 4 #define NUM_COUNTERS_L3 6 +#define NUM_COUNTERS_MAX 64 #define RDPMC_BASE_NB 6 #define RDPMC_BASE_LLC 10 @@ -38,7 +39,10 @@ struct amd_uncore_ctx { int refcnt; int cpu; struct perf_event **events; - struct hlist_node node; + unsigned long active_mask[BITS_TO_LONGS(NUM_COUNTERS_MAX)]; + int nr_active; + struct hrtimer hrtimer; + u64 hrtimer_duration; }; struct amd_uncore_pmu { @@ -83,11 +87,51 @@ struct amd_uncore { static struct amd_uncore uncores[UNCORE_TYPE_MAX]; +/* Interval for hrtimer, defaults to 60000 milliseconds */ +static unsigned int update_interval = 60 * MSEC_PER_SEC; +module_param(update_interval, uint, 0444); + static struct amd_uncore_pmu *event_to_amd_uncore_pmu(struct perf_event *event) { return container_of(event->pmu, struct amd_uncore_pmu, pmu); } +static enum hrtimer_restart amd_uncore_hrtimer(struct hrtimer *hrtimer) +{ + struct amd_uncore_ctx *ctx; + struct perf_event *event; + int bit; + + ctx = container_of(hrtimer, struct amd_uncore_ctx, hrtimer); + + if (!ctx->nr_active || ctx->cpu != smp_processor_id()) + return HRTIMER_NORESTART; + + for_each_set_bit(bit, ctx->active_mask, NUM_COUNTERS_MAX) { + event = ctx->events[bit]; + event->pmu->read(event); + } + + hrtimer_forward_now(hrtimer, ns_to_ktime(ctx->hrtimer_duration)); + return HRTIMER_RESTART; +} + +static void amd_uncore_start_hrtimer(struct amd_uncore_ctx *ctx) +{ + hrtimer_start(&ctx->hrtimer, ns_to_ktime(ctx->hrtimer_duration), + HRTIMER_MODE_REL_PINNED_HARD); +} + +static void amd_uncore_cancel_hrtimer(struct amd_uncore_ctx *ctx) +{ + hrtimer_cancel(&ctx->hrtimer); +} + +static void amd_uncore_init_hrtimer(struct amd_uncore_ctx *ctx) +{ + hrtimer_setup(&ctx->hrtimer, amd_uncore_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); +} + static void amd_uncore_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -106,9 +150,9 @@ static void amd_uncore_read(struct perf_event *event) * read counts directly from the corresponding PERF_CTR. */ if (hwc->event_base_rdpmc < 0) - rdmsrl(hwc->event_base, new); + rdmsrq(hwc->event_base, new); else - rdpmcl(hwc->event_base_rdpmc, new); + new = rdpmc(hwc->event_base_rdpmc); local64_set(&hwc->prev_count, new); delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); @@ -118,27 +162,40 @@ static void amd_uncore_read(struct perf_event *event) static void amd_uncore_start(struct perf_event *event, int flags) { + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; + if (!ctx->nr_active++) + amd_uncore_start_hrtimer(ctx); + if (flags & PERF_EF_RELOAD) - wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); + wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count)); hwc->state = 0; - wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); + __set_bit(hwc->idx, ctx->active_mask); + wrmsrq(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); perf_event_update_userpage(event); } static void amd_uncore_stop(struct perf_event *event, int flags) { + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config); + wrmsrq(hwc->config_base, hwc->config); hwc->state |= PERF_HES_STOPPED; if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { event->pmu->read(event); hwc->state |= PERF_HES_UPTODATE; } + + if (!--ctx->nr_active) + amd_uncore_cancel_hrtimer(ctx); + + __clear_bit(hwc->idx, ctx->active_mask); } static int amd_uncore_add(struct perf_event *event, int flags) @@ -491,6 +548,9 @@ static int amd_uncore_ctx_init(struct amd_uncore *uncore, unsigned int cpu) goto fail; } + amd_uncore_init_hrtimer(curr); + curr->hrtimer_duration = (u64)update_interval * NSEC_PER_MSEC; + cpumask_set_cpu(cpu, &pmu->active_mask); } @@ -880,16 +940,55 @@ static int amd_uncore_umc_event_init(struct perf_event *event) static void amd_uncore_umc_start(struct perf_event *event, int flags) { + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; + if (!ctx->nr_active++) + amd_uncore_start_hrtimer(ctx); + if (flags & PERF_EF_RELOAD) - wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); + wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count)); hwc->state = 0; - wrmsrl(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC)); + __set_bit(hwc->idx, ctx->active_mask); + wrmsrq(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC)); perf_event_update_userpage(event); } +static void amd_uncore_umc_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev, new, shift; + s64 delta; + + shift = COUNTER_SHIFT + 1; + prev = local64_read(&hwc->prev_count); + + /* + * UMC counters do not have RDPMC assignments. Read counts directly + * from the corresponding PERF_CTR. + */ + rdmsrl(hwc->event_base, new); + + /* + * Unlike the other uncore counters, UMC counters saturate and set the + * Overflow bit (bit 48) on overflow. Since they do not roll over, + * proactively reset the corresponding PERF_CTR when bit 47 is set so + * that the counter never gets a chance to saturate. + */ + if (new & BIT_ULL(63 - COUNTER_SHIFT)) { + wrmsrl(hwc->event_base, 0); + local64_set(&hwc->prev_count, 0); + } else { + local64_set(&hwc->prev_count, new); + } + + delta = (new << shift) - (prev << shift); + delta >>= shift; + local64_add(delta, &event->count); +} + static void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) { @@ -968,7 +1067,7 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu) .del = amd_uncore_del, .start = amd_uncore_umc_start, .stop = amd_uncore_stop, - .read = amd_uncore_read, + .read = amd_uncore_umc_read, .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, .module = THIS_MODULE, }; diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 139ad80d1df3..7610f26dfbd9 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -32,6 +32,7 @@ #include <asm/apic.h> #include <asm/stacktrace.h> +#include <asm/msr.h> #include <asm/nmi.h> #include <asm/smp.h> #include <asm/alternative.h> @@ -95,6 +96,11 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter); DEFINE_STATIC_CALL_NULL(x86_pmu_late_setup, *x86_pmu.late_setup); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all); + /* * This one is magic, it will get called even when PMU init fails (because * there is no PMU), in which case it should simply return NULL. @@ -134,7 +140,7 @@ u64 x86_perf_event_update(struct perf_event *event) */ prev_raw_count = local64_read(&hwc->prev_count); do { - rdpmcl(hwc->event_base_rdpmc, new_raw_count); + new_raw_count = rdpmc(hwc->event_base_rdpmc); } while (!local64_try_cmpxchg(&hwc->prev_count, &prev_raw_count, new_raw_count)); @@ -269,7 +275,7 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask, */ for_each_set_bit(i, cntr_mask, X86_PMC_IDX_MAX) { reg = x86_pmu_config_addr(i); - ret = rdmsrl_safe(reg, &val); + ret = rdmsrq_safe(reg, &val); if (ret) goto msr_fail; if (val & ARCH_PERFMON_EVENTSEL_ENABLE) { @@ -283,7 +289,7 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask, if (*(u64 *)fixed_cntr_mask) { reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; - ret = rdmsrl_safe(reg, &val); + ret = rdmsrq_safe(reg, &val); if (ret) goto msr_fail; for_each_set_bit(i, fixed_cntr_mask, X86_PMC_IDX_MAX) { @@ -314,11 +320,11 @@ bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask, * (qemu/kvm) that don't trap on the MSR access and always return 0s. */ reg = x86_pmu_event_addr(reg_safe); - if (rdmsrl_safe(reg, &val)) + if (rdmsrq_safe(reg, &val)) goto msr_fail; val ^= 0xffffUL; - ret = wrmsrl_safe(reg, val); - ret |= rdmsrl_safe(reg, &val_new); + ret = wrmsrq_safe(reg, val); + ret |= rdmsrq_safe(reg, &val_new); if (ret || val != val_new) goto msr_fail; @@ -674,6 +680,7 @@ static int __x86_pmu_event_init(struct perf_event *event) event->hw.idx = -1; event->hw.last_cpu = -1; event->hw.last_tag = ~0ULL; + event->hw.dyn_constraint = ~0ULL; /* mark unused */ event->hw.extra_reg.idx = EXTRA_REG_NONE; @@ -693,13 +700,13 @@ void x86_pmu_disable_all(void) if (!test_bit(idx, cpuc->active_mask)) continue; - rdmsrl(x86_pmu_config_addr(idx), val); + rdmsrq(x86_pmu_config_addr(idx), val); if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) continue; val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; - wrmsrl(x86_pmu_config_addr(idx), val); + wrmsrq(x86_pmu_config_addr(idx), val); if (is_counter_pair(hwc)) - wrmsrl(x86_pmu_config_addr(idx + 1), 0); + wrmsrq(x86_pmu_config_addr(idx + 1), 0); } } @@ -756,15 +763,16 @@ void x86_pmu_enable_all(int added) int is_x86_event(struct perf_event *event) { - int i; - - if (!is_hybrid()) - return event->pmu == &pmu; - - for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu) - return true; - } + /* + * For a non-hybrid platforms, the type of X86 pmu is + * always PERF_TYPE_RAW. + * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE + * is a unique capability for the X86 PMU. + * Use them to detect a X86 event. + */ + if (event->pmu->type == PERF_TYPE_RAW || + event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE) + return true; return false; } @@ -1420,14 +1428,14 @@ int x86_perf_event_set_period(struct perf_event *event) */ local64_set(&hwc->prev_count, (u64)-left); - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); + wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* * Sign extend the Merge event counter's upper 16 bits since * we currently declare a 48-bit counter width */ if (is_counter_pair(hwc)) - wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff); + wrmsrq(x86_pmu_event_addr(idx + 1), 0xffff); perf_event_update_userpage(event); @@ -1550,10 +1558,10 @@ void perf_event_print_debug(void) return; if (x86_pmu.version >= 2) { - rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); - rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); - rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); + rdmsrq(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); + rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); + rdmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); + rdmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); pr_info("\n"); pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); @@ -1561,19 +1569,19 @@ void perf_event_print_debug(void) pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); if (pebs_constraints) { - rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); + rdmsrq(MSR_IA32_PEBS_ENABLE, pebs); pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs); } if (x86_pmu.lbr_nr) { - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl); } } pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); for_each_set_bit(idx, cntr_mask, X86_PMC_IDX_MAX) { - rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); - rdmsrl(x86_pmu_event_addr(idx), pmc_count); + rdmsrq(x86_pmu_config_addr(idx), pmc_ctrl); + rdmsrq(x86_pmu_event_addr(idx), pmc_count); prev_left = per_cpu(pmc_prev_left[idx], cpu); @@ -1587,7 +1595,7 @@ void perf_event_print_debug(void) for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) { if (fixed_counter_disabled(idx, cpuc->pmu)) continue; - rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count); + rdmsrq(x86_pmu_fixed_ctr_addr(idx), pmc_count); pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", cpu, idx, pmc_count); @@ -1683,6 +1691,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) struct cpu_hw_events *cpuc; struct perf_event *event; int idx, handled = 0; + u64 last_period; u64 val; cpuc = this_cpu_ptr(&cpu_hw_events); @@ -1702,6 +1711,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) continue; event = cpuc->events[idx]; + last_period = event->hw.last_period; val = static_call(x86_pmu_update)(event); if (val & (1ULL << (x86_pmu.cntval_bits - 1))) @@ -1715,12 +1725,11 @@ int x86_pmu_handle_irq(struct pt_regs *regs) if (!static_call(x86_pmu_set_period)(event)) continue; - perf_sample_data_init(&data, 0, event->hw.last_period); + perf_sample_data_init(&data, 0, last_period); perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); - if (perf_event_overflow(event, &data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } if (handled) @@ -2046,6 +2055,11 @@ static void x86_pmu_static_call_update(void) static_call_update(x86_pmu_filter, x86_pmu.filter); static_call_update(x86_pmu_late_setup, x86_pmu.late_setup); + + static_call_update(x86_pmu_pebs_enable, x86_pmu.pebs_enable); + static_call_update(x86_pmu_pebs_disable, x86_pmu.pebs_disable); + static_call_update(x86_pmu_pebs_enable_all, x86_pmu.pebs_enable_all); + static_call_update(x86_pmu_pebs_disable_all, x86_pmu.pebs_disable_all); } static void _x86_pmu_read(struct perf_event *event) @@ -2496,9 +2510,9 @@ void perf_clear_dirty_counters(void) if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask))) continue; - wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0); + wrmsrq(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0); } else { - wrmsrl(x86_pmu_event_addr(i), 0); + wrmsrq(x86_pmu_event_addr(i), 0); } } @@ -2803,8 +2817,15 @@ static unsigned long get_segment_base(unsigned int segment) #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; + /* + * If we're not in a valid context with a real (not just lazy) + * user mm, then don't even try. + */ + if (!nmi_uaccess_okay()) + return 0; + /* IRQs are off, so this synchronizes with smp_store_release */ - ldt = READ_ONCE(current->active_mm->context.ldt); + ldt = smp_load_acquire(¤t->mm->context.ldt); if (!ldt || idx >= ldt->nr_entries) return 0; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index a95e6c91c4d7..61da6b8a3d51 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -17,6 +17,7 @@ #include <linux/sizes.h> #include <asm/perf_event.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -80,54 +81,54 @@ static void * bts_buffer_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) { - struct bts_buffer *buf; + struct bts_buffer *bb; struct page *page; int cpu = event->cpu; int node = (cpu == -1) ? cpu : cpu_to_node(cpu); unsigned long offset; size_t size = nr_pages << PAGE_SHIFT; - int pg, nbuf, pad; + int pg, nr_buf, pad; /* count all the high order buffers */ - for (pg = 0, nbuf = 0; pg < nr_pages;) { + for (pg = 0, nr_buf = 0; pg < nr_pages;) { page = virt_to_page(pages[pg]); pg += buf_nr_pages(page); - nbuf++; + nr_buf++; } /* * to avoid interrupts in overwrite mode, only allow one physical */ - if (overwrite && nbuf > 1) + if (overwrite && nr_buf > 1) return NULL; - buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node); - if (!buf) + bb = kzalloc_node(struct_size(bb, buf, nr_buf), GFP_KERNEL, node); + if (!bb) return NULL; - buf->nr_pages = nr_pages; - buf->nr_bufs = nbuf; - buf->snapshot = overwrite; - buf->data_pages = pages; - buf->real_size = size - size % BTS_RECORD_SIZE; + bb->nr_pages = nr_pages; + bb->nr_bufs = nr_buf; + bb->snapshot = overwrite; + bb->data_pages = pages; + bb->real_size = size - size % BTS_RECORD_SIZE; - for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) { + for (pg = 0, nr_buf = 0, offset = 0, pad = 0; nr_buf < bb->nr_bufs; nr_buf++) { unsigned int __nr_pages; page = virt_to_page(pages[pg]); __nr_pages = buf_nr_pages(page); - buf->buf[nbuf].page = page; - buf->buf[nbuf].offset = offset; - buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); - buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement; - pad = buf->buf[nbuf].size % BTS_RECORD_SIZE; - buf->buf[nbuf].size -= pad; + bb->buf[nr_buf].page = page; + bb->buf[nr_buf].offset = offset; + bb->buf[nr_buf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); + bb->buf[nr_buf].size = buf_size(page) - bb->buf[nr_buf].displacement; + pad = bb->buf[nr_buf].size % BTS_RECORD_SIZE; + bb->buf[nr_buf].size -= pad; pg += __nr_pages; offset += __nr_pages << PAGE_SHIFT; } - return buf; + return bb; } static void bts_buffer_free_aux(void *data) @@ -135,25 +136,25 @@ static void bts_buffer_free_aux(void *data) kfree(data); } -static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx) +static unsigned long bts_buffer_offset(struct bts_buffer *bb, unsigned int idx) { - return buf->buf[idx].offset + buf->buf[idx].displacement; + return bb->buf[idx].offset + bb->buf[idx].displacement; } static void -bts_config_buffer(struct bts_buffer *buf) +bts_config_buffer(struct bts_buffer *bb) { int cpu = raw_smp_processor_id(); struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - struct bts_phys *phys = &buf->buf[buf->cur_buf]; + struct bts_phys *phys = &bb->buf[bb->cur_buf]; unsigned long index, thresh = 0, end = phys->size; struct page *page = phys->page; - index = local_read(&buf->head); + index = local_read(&bb->head); - if (!buf->snapshot) { - if (buf->end < phys->offset + buf_size(page)) - end = buf->end - phys->offset - phys->displacement; + if (!bb->snapshot) { + if (bb->end < phys->offset + buf_size(page)) + end = bb->end - phys->offset - phys->displacement; index -= phys->offset + phys->displacement; @@ -168,7 +169,7 @@ bts_config_buffer(struct bts_buffer *buf) ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement; ds->bts_index = ds->bts_buffer_base + index; ds->bts_absolute_maximum = ds->bts_buffer_base + end; - ds->bts_interrupt_threshold = !buf->snapshot + ds->bts_interrupt_threshold = !bb->snapshot ? ds->bts_buffer_base + thresh : ds->bts_absolute_maximum + BTS_RECORD_SIZE; } @@ -184,16 +185,16 @@ static void bts_update(struct bts_ctx *bts) { int cpu = raw_smp_processor_id(); struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - struct bts_buffer *buf = perf_get_aux(&bts->handle); + struct bts_buffer *bb = perf_get_aux(&bts->handle); unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head; - if (!buf) + if (!bb) return; - head = index + bts_buffer_offset(buf, buf->cur_buf); - old = local_xchg(&buf->head, head); + head = index + bts_buffer_offset(bb, bb->cur_buf); + old = local_xchg(&bb->head, head); - if (!buf->snapshot) { + if (!bb->snapshot) { if (old == head) return; @@ -205,9 +206,9 @@ static void bts_update(struct bts_ctx *bts) * old and head are always in the same physical buffer, so we * can subtract them to get the data size. */ - local_add(head - old, &buf->data_size); + local_add(head - old, &bb->data_size); } else { - local_set(&buf->data_size, head); + local_set(&bb->data_size, head); } /* @@ -218,7 +219,7 @@ static void bts_update(struct bts_ctx *bts) } static int -bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle); +bts_buffer_reset(struct bts_buffer *bb, struct perf_output_handle *handle); /* * Ordering PMU callbacks wrt themselves and the PMI is done by means @@ -232,17 +233,17 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle); static void __bts_event_start(struct perf_event *event) { struct bts_ctx *bts = this_cpu_ptr(bts_ctx); - struct bts_buffer *buf = perf_get_aux(&bts->handle); + struct bts_buffer *bb = perf_get_aux(&bts->handle); u64 config = 0; - if (!buf->snapshot) + if (!bb->snapshot) config |= ARCH_PERFMON_EVENTSEL_INT; if (!event->attr.exclude_kernel) config |= ARCH_PERFMON_EVENTSEL_OS; if (!event->attr.exclude_user) config |= ARCH_PERFMON_EVENTSEL_USR; - bts_config_buffer(buf); + bts_config_buffer(bb); /* * local barrier to make sure that ds configuration made it @@ -261,13 +262,13 @@ static void bts_event_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct bts_ctx *bts = this_cpu_ptr(bts_ctx); - struct bts_buffer *buf; + struct bts_buffer *bb; - buf = perf_aux_output_begin(&bts->handle, event); - if (!buf) + bb = perf_aux_output_begin(&bts->handle, event); + if (!bb) goto fail_stop; - if (bts_buffer_reset(buf, &bts->handle)) + if (bts_buffer_reset(bb, &bts->handle)) goto fail_end_stop; bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base; @@ -306,27 +307,27 @@ static void bts_event_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct bts_ctx *bts = this_cpu_ptr(bts_ctx); - struct bts_buffer *buf = NULL; + struct bts_buffer *bb = NULL; int state = READ_ONCE(bts->state); if (state == BTS_STATE_ACTIVE) __bts_event_stop(event, BTS_STATE_STOPPED); if (state != BTS_STATE_STOPPED) - buf = perf_get_aux(&bts->handle); + bb = perf_get_aux(&bts->handle); event->hw.state |= PERF_HES_STOPPED; if (flags & PERF_EF_UPDATE) { bts_update(bts); - if (buf) { - if (buf->snapshot) + if (bb) { + if (bb->snapshot) bts->handle.head = - local_xchg(&buf->data_size, - buf->nr_pages << PAGE_SHIFT); + local_xchg(&bb->data_size, + bb->nr_pages << PAGE_SHIFT); perf_aux_output_end(&bts->handle, - local_xchg(&buf->data_size, 0)); + local_xchg(&bb->data_size, 0)); } cpuc->ds->bts_index = bts->ds_back.bts_buffer_base; @@ -382,19 +383,19 @@ void intel_bts_disable_local(void) } static int -bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle) +bts_buffer_reset(struct bts_buffer *bb, struct perf_output_handle *handle) { unsigned long head, space, next_space, pad, gap, skip, wakeup; unsigned int next_buf; struct bts_phys *phys, *next_phys; int ret; - if (buf->snapshot) + if (bb->snapshot) return 0; - head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); + head = handle->head & ((bb->nr_pages << PAGE_SHIFT) - 1); - phys = &buf->buf[buf->cur_buf]; + phys = &bb->buf[bb->cur_buf]; space = phys->offset + phys->displacement + phys->size - head; pad = space; if (space > handle->size) { @@ -403,10 +404,10 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle) } if (space <= BTS_SAFETY_MARGIN) { /* See if next phys buffer has more space */ - next_buf = buf->cur_buf + 1; - if (next_buf >= buf->nr_bufs) + next_buf = bb->cur_buf + 1; + if (next_buf >= bb->nr_bufs) next_buf = 0; - next_phys = &buf->buf[next_buf]; + next_phys = &bb->buf[next_buf]; gap = buf_size(phys->page) - phys->displacement - phys->size + next_phys->displacement; skip = pad + gap; @@ -431,8 +432,8 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle) * anymore, so we must not be racing with * bts_update(). */ - buf->cur_buf = next_buf; - local_set(&buf->head, head); + bb->cur_buf = next_buf; + local_set(&bb->head, head); } } } @@ -445,7 +446,7 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle) space -= space % BTS_RECORD_SIZE; } - buf->end = head + space; + bb->end = head + space; /* * If we have no space, the lost notification would have been sent when @@ -462,7 +463,7 @@ int intel_bts_interrupt(void) struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds; struct bts_ctx *bts; struct perf_event *event; - struct bts_buffer *buf; + struct bts_buffer *bb; s64 old_head; int err = -ENOSPC, handled = 0; @@ -485,8 +486,8 @@ int intel_bts_interrupt(void) if (READ_ONCE(bts->state) == BTS_STATE_STOPPED) return handled; - buf = perf_get_aux(&bts->handle); - if (!buf) + bb = perf_get_aux(&bts->handle); + if (!bb) return handled; /* @@ -494,26 +495,26 @@ int intel_bts_interrupt(void) * there's no other way of telling, because the pointer will * keep moving */ - if (buf->snapshot) + if (bb->snapshot) return 0; - old_head = local_read(&buf->head); + old_head = local_read(&bb->head); bts_update(bts); /* no new data */ - if (old_head == local_read(&buf->head)) + if (old_head == local_read(&bb->head)) return handled; - perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0)); + perf_aux_output_end(&bts->handle, local_xchg(&bb->data_size, 0)); - buf = perf_aux_output_begin(&bts->handle, event); - if (buf) - err = bts_buffer_reset(buf, &bts->handle); + bb = perf_aux_output_begin(&bts->handle, event); + if (bb) + err = bts_buffer_reset(bb, &bts->handle); if (err) { WRITE_ONCE(bts->state, BTS_STATE_STOPPED); - if (buf) { + if (bb) { /* * BTS_STATE_STOPPED should be visible before * cleared handle::event @@ -599,7 +600,11 @@ static void bts_event_read(struct perf_event *event) static __init int bts_init(void) { - if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) + if (!boot_cpu_has(X86_FEATURE_DTES64)) + return -ENODEV; + + x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); + if (!x86_pmu.bts) return -ENODEV; if (boot_cpu_has(X86_FEATURE_PTI)) { diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index c5f385413392..466283326630 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -23,6 +23,7 @@ #include <asm/intel_pt.h> #include <asm/apic.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -2224,6 +2225,18 @@ static struct extra_reg intel_cmt_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; +EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_skt, "event=0x9c,umask=0x01"); +EVENT_ATTR_STR(topdown-retiring, td_retiring_skt, "event=0xc2,umask=0x02"); +EVENT_ATTR_STR(topdown-be-bound, td_be_bound_skt, "event=0xa4,umask=0x02"); + +static struct attribute *skt_events_attrs[] = { + EVENT_PTR(td_fe_bound_skt), + EVENT_PTR(td_retiring_skt), + EVENT_PTR(td_bad_spec_cmt), + EVENT_PTR(td_be_bound_skt), + NULL, +}; + #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ #define KNL_MCDRAM_LOCAL BIT_ULL(21) @@ -2285,7 +2298,7 @@ static __always_inline void __intel_pmu_disable_all(bool bts) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) intel_pmu_disable_bts(); @@ -2294,7 +2307,7 @@ static __always_inline void __intel_pmu_disable_all(bool bts) static __always_inline void intel_pmu_disable_all(void) { __intel_pmu_disable_all(true); - intel_pmu_pebs_disable_all(); + static_call_cond(x86_pmu_pebs_disable_all)(); intel_pmu_lbr_disable_all(); } @@ -2306,11 +2319,11 @@ static void __intel_pmu_enable_all(int added, bool pmi) intel_pmu_lbr_enable_all(pmi); if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { - wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); + wrmsrq(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; } - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, intel_ctrl & ~cpuc->intel_ctrl_guest_mask); if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { @@ -2326,7 +2339,7 @@ static void __intel_pmu_enable_all(int added, bool pmi) static void intel_pmu_enable_all(int added) { - intel_pmu_pebs_enable_all(); + static_call_cond(x86_pmu_pebs_enable_all)(); __intel_pmu_enable_all(added, false); } @@ -2426,12 +2439,12 @@ static void intel_pmu_nhm_workaround(void) } for (i = 0; i < 4; i++) { - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); - wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); + wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); + wrmsrq(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); } - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); for (i = 0; i < 4; i++) { event = cpuc->events[i]; @@ -2441,7 +2454,7 @@ static void intel_pmu_nhm_workaround(void) __x86_pmu_enable_event(&event->hw, ARCH_PERFMON_EVENTSEL_ENABLE); } else - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); + wrmsrq(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); } } @@ -2458,7 +2471,7 @@ static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) if (cpuc->tfa_shadow != val) { cpuc->tfa_shadow = val; - wrmsrl(MSR_TSX_FORCE_ABORT, val); + wrmsrq(MSR_TSX_FORCE_ABORT, val); } } @@ -2489,14 +2502,14 @@ static inline u64 intel_pmu_get_status(void) { u64 status; - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); return status; } static inline void intel_pmu_ack_status(u64 ack) { - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); + wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } static inline bool event_is_checkpointed(struct perf_event *event) @@ -2583,7 +2596,7 @@ static void intel_pmu_disable_event(struct perf_event *event) * so we don't trigger the event without PEBS bit set. */ if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_disable(event); + static_call(x86_pmu_pebs_disable)(event); } static void intel_pmu_assign_event(struct perf_event *event, int idx) @@ -2603,6 +2616,9 @@ static void intel_pmu_del_event(struct perf_event *event) intel_pmu_lbr_del(event); if (event->attr.precise_ip) intel_pmu_pebs_del(event); + if (is_pebs_counter_event_group(event) || + is_acr_event_group(event)) + this_cpu_ptr(&cpu_hw_events)->n_late_setup--; } static int icl_set_topdown_event_period(struct perf_event *event) @@ -2619,15 +2635,15 @@ static int icl_set_topdown_event_period(struct perf_event *event) * Don't need to clear them again. */ if (left == x86_pmu.max_period) { - wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); - wrmsrl(MSR_PERF_METRICS, 0); + wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrq(MSR_PERF_METRICS, 0); hwc->saved_slots = 0; hwc->saved_metric = 0; } if ((hwc->saved_slots) && is_slots_event(event)) { - wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); - wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); + wrmsrq(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); + wrmsrq(MSR_PERF_METRICS, hwc->saved_metric); } perf_event_update_userpage(event); @@ -2724,12 +2740,12 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, if (!val) { /* read Fixed counter 3 */ - rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots); + slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE); if (!slots) return 0; /* read PERF_METRICS */ - rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics); + metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS); } else { slots = val[0]; metrics = val[1]; @@ -2773,8 +2789,8 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, if (reset) { /* The fixed counter 3 has to be written before the PERF_METRICS. */ - wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); - wrmsrl(MSR_PERF_METRICS, 0); + wrmsrq(MSR_CORE_PERF_FIXED_CTR3, 0); + wrmsrq(MSR_PERF_METRICS, 0); if (event) update_saved_topdown_regs(event, 0, 0, metric_end); } @@ -2880,6 +2896,52 @@ static void intel_pmu_enable_fixed(struct perf_event *event) cpuc->fixed_ctrl_val |= bits; } +static void intel_pmu_config_acr(int idx, u64 mask, u32 reload) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int msr_b, msr_c; + + if (!mask && !cpuc->acr_cfg_b[idx]) + return; + + if (idx < INTEL_PMC_IDX_FIXED) { + msr_b = MSR_IA32_PMC_V6_GP0_CFG_B; + msr_c = MSR_IA32_PMC_V6_GP0_CFG_C; + } else { + msr_b = MSR_IA32_PMC_V6_FX0_CFG_B; + msr_c = MSR_IA32_PMC_V6_FX0_CFG_C; + idx -= INTEL_PMC_IDX_FIXED; + } + + if (cpuc->acr_cfg_b[idx] != mask) { + wrmsrl(msr_b + x86_pmu.addr_offset(idx, false), mask); + cpuc->acr_cfg_b[idx] = mask; + } + /* Only need to update the reload value when there is a valid config value. */ + if (mask && cpuc->acr_cfg_c[idx] != reload) { + wrmsrl(msr_c + x86_pmu.addr_offset(idx, false), reload); + cpuc->acr_cfg_c[idx] = reload; + } +} + +static void intel_pmu_enable_acr(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!is_acr_event_group(event) || !event->attr.config2) { + /* + * The disable doesn't clear the ACR CFG register. + * Check and clear the ACR CFG register. + */ + intel_pmu_config_acr(hwc->idx, 0, 0); + return; + } + + intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period); +} + +DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr); + static void intel_pmu_enable_event(struct perf_event *event) { u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; @@ -2887,16 +2949,19 @@ static void intel_pmu_enable_event(struct perf_event *event) int idx = hwc->idx; if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_enable(event); + static_call(x86_pmu_pebs_enable)(event); switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: if (branch_sample_counters(event)) enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; intel_set_masks(event, idx); + static_call_cond(intel_pmu_enable_acr_event)(event); __x86_pmu_enable_event(hwc, enable_mask); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + static_call_cond(intel_pmu_enable_acr_event)(event); + fallthrough; case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_enable_fixed(event); break; @@ -2914,12 +2979,51 @@ static void intel_pmu_enable_event(struct perf_event *event) } } +static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc) +{ + struct perf_event *event, *leader; + int i, j, idx; + + for (i = 0; i < cpuc->n_events; i++) { + leader = cpuc->event_list[i]; + if (!is_acr_event_group(leader)) + continue; + + /* The ACR events must be contiguous. */ + for (j = i; j < cpuc->n_events; j++) { + event = cpuc->event_list[j]; + if (event->group_leader != leader->group_leader) + break; + for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) { + if (WARN_ON_ONCE(i + idx > cpuc->n_events)) + return; + __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1); + } + } + i = j - 1; + } +} + +void intel_pmu_late_setup(void) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + if (!cpuc->n_late_setup) + return; + + intel_pmu_pebs_late_setup(cpuc); + intel_pmu_acr_late_setup(cpuc); +} + static void intel_pmu_add_event(struct perf_event *event) { if (event->attr.precise_ip) intel_pmu_pebs_add(event); if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_add(event); + if (is_pebs_counter_event_group(event) || + is_acr_event_group(event)) + this_cpu_ptr(&cpu_hw_events)->n_late_setup++; } /* @@ -2937,7 +3041,7 @@ int intel_pmu_save_and_restart(struct perf_event *event) */ if (unlikely(event_is_checkpointed(event))) { /* No race with NMIs because the counter should not be armed */ - wrmsrl(event->hw.event_base, 0); + wrmsrq(event->hw.event_base, 0); local64_set(&event->hw.prev_count, 0); } return static_call(x86_pmu_set_period)(event); @@ -2976,13 +3080,13 @@ static void intel_pmu_reset(void) pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); for_each_set_bit(idx, cntr_mask, INTEL_PMC_MAX_GENERIC) { - wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); - wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); + wrmsrq_safe(x86_pmu_config_addr(idx), 0ull); + wrmsrq_safe(x86_pmu_event_addr(idx), 0ull); } for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) { if (fixed_counter_disabled(idx, cpuc->pmu)) continue; - wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull); + wrmsrq_safe(x86_pmu_fixed_ctr_addr(idx), 0ull); } if (ds) @@ -2991,7 +3095,7 @@ static void intel_pmu_reset(void) /* Ack all overflows and disable fixed counters */ if (x86_pmu.version >= 2) { intel_pmu_ack_status(intel_pmu_get_status()); - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); } /* Reset LBRs and LBR freezing */ @@ -3035,8 +3139,7 @@ static void x86_pmu_handle_guest_pebs(struct pt_regs *regs, continue; perf_sample_data_init(data, 0, event->hw.last_period); - if (perf_event_overflow(event, data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, data, regs); /* Inject one fake event is enough. */ break; @@ -3101,7 +3204,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) * Update the MSR if pebs_enabled is changed. */ if (pebs_enabled != cpuc->pebs_enabled) - wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); + wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); /* * Above PEBS handler (PEBS counters snapshotting) has updated fixed @@ -3141,6 +3244,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; + u64 last_period; handled++; @@ -3168,16 +3272,17 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) if (is_pebs_counter_event_group(event)) x86_pmu.drain_pebs(regs, &data); + last_period = event->hw.last_period; + if (!intel_pmu_save_and_restart(event)) continue; - perf_sample_data_init(&data, 0, event->hw.last_period); + perf_sample_data_init(&data, 0, last_period); if (has_branch_stack(event)) intel_pmu_lbr_save_brstack(&data, cpuc, event); - if (perf_event_overflow(event, &data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } return handled; @@ -3739,10 +3844,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (cpuc->excl_cntrs) return intel_get_excl_constraints(cpuc, event, idx, c2); - /* Not all counters support the branch counter feature. */ - if (branch_sample_counters(event)) { + if (event->hw.dyn_constraint != ~0ULL) { c2 = dyn_constraint(cpuc, c2, idx); - c2->idxmsk64 &= x86_pmu.lbr_counters; + c2->idxmsk64 &= event->hw.dyn_constraint; c2->weight = hweight64(c2->idxmsk64); } @@ -4083,6 +4187,39 @@ end: return start; } +static inline bool intel_pmu_has_acr(struct pmu *pmu) +{ + return !!hybrid(pmu, acr_cause_mask64); +} + +static bool intel_pmu_is_acr_group(struct perf_event *event) +{ + /* The group leader has the ACR flag set */ + if (is_acr_event_group(event)) + return true; + + /* The acr_mask is set */ + if (event->attr.config2) + return true; + + return false; +} + +static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event, + u64 *cause_mask, int *num) +{ + event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); + *cause_mask |= event->attr.config2; + *num += 1; +} + +static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event, + int idx, u64 cause_mask) +{ + if (test_bit(idx, (unsigned long *)&cause_mask)) + event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); @@ -4144,15 +4281,19 @@ static int intel_pmu_hw_config(struct perf_event *event) leader = event->group_leader; if (branch_sample_call_stack(leader)) return -EINVAL; - if (branch_sample_counters(leader)) + if (branch_sample_counters(leader)) { num++; + leader->hw.dyn_constraint &= x86_pmu.lbr_counters; + } leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; for_each_sibling_event(sibling, leader) { if (branch_sample_call_stack(sibling)) return -EINVAL; - if (branch_sample_counters(sibling)) + if (branch_sample_counters(sibling)) { num++; + sibling->hw.dyn_constraint &= x86_pmu.lbr_counters; + } } if (num > fls(x86_pmu.lbr_counters)) @@ -4207,6 +4348,94 @@ static int intel_pmu_hw_config(struct perf_event *event) event->attr.precise_ip) event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR; + if (intel_pmu_has_acr(event->pmu) && intel_pmu_is_acr_group(event)) { + struct perf_event *sibling, *leader = event->group_leader; + struct pmu *pmu = event->pmu; + bool has_sw_event = false; + int num = 0, idx = 0; + u64 cause_mask = 0; + + /* Not support perf metrics */ + if (is_metric_event(event)) + return -EINVAL; + + /* Not support freq mode */ + if (event->attr.freq) + return -EINVAL; + + /* PDist is not supported */ + if (event->attr.config2 && event->attr.precise_ip > 2) + return -EINVAL; + + /* The reload value cannot exceeds the max period */ + if (event->attr.sample_period > x86_pmu.max_period) + return -EINVAL; + /* + * The counter-constraints of each event cannot be finalized + * unless the whole group is scanned. However, it's hard + * to know whether the event is the last one of the group. + * Recalculate the counter-constraints for each event when + * adding a new event. + * + * The group is traversed twice, which may be optimized later. + * In the first round, + * - Find all events which do reload when other events + * overflow and set the corresponding counter-constraints + * - Add all events, which can cause other events reload, + * in the cause_mask + * - Error out if the number of events exceeds the HW limit + * - The ACR events must be contiguous. + * Error out if there are non-X86 events between ACR events. + * This is not a HW limit, but a SW limit. + * With the assumption, the intel_pmu_acr_late_setup() can + * easily convert the event idx to counter idx without + * traversing the whole event list. + */ + if (!is_x86_event(leader)) + return -EINVAL; + + if (leader->attr.config2) + intel_pmu_set_acr_cntr_constr(leader, &cause_mask, &num); + + if (leader->nr_siblings) { + for_each_sibling_event(sibling, leader) { + if (!is_x86_event(sibling)) { + has_sw_event = true; + continue; + } + if (!sibling->attr.config2) + continue; + if (has_sw_event) + return -EINVAL; + intel_pmu_set_acr_cntr_constr(sibling, &cause_mask, &num); + } + } + if (leader != event && event->attr.config2) { + if (has_sw_event) + return -EINVAL; + intel_pmu_set_acr_cntr_constr(event, &cause_mask, &num); + } + + if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) || + num > hweight64(hybrid(event->pmu, acr_cntr_mask64))) + return -EINVAL; + /* + * In the second round, apply the counter-constraints for + * the events which can cause other events reload. + */ + intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask); + + if (leader->nr_siblings) { + for_each_sibling_event(sibling, leader) + intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask); + } + + if (leader != event) + intel_pmu_set_acr_caused_constr(event, idx, cause_mask); + + leader->hw.flags |= PERF_X86_EVENT_ACR; + } + if ((event->attr.type == PERF_TYPE_HARDWARE) || (event->attr.type == PERF_TYPE_HW_CACHE)) return 0; @@ -4354,7 +4583,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask, }; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) return arr; /* @@ -4952,7 +5181,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) goto err; } - if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) { + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) { size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); @@ -5041,7 +5270,7 @@ static inline bool intel_pmu_broken_perf_cap(void) return false; } -static void update_pmu_cap(struct x86_hybrid_pmu *pmu) +static void update_pmu_cap(struct pmu *pmu) { unsigned int cntr, fixed_cntr, ecx, edx; union cpuid35_eax eax; @@ -5050,20 +5279,30 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx); if (ebx.split.umask2) - pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2; + hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2; if (ebx.split.eq) - pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ; + hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ; if (eax.split.cntr_subleaf) { cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, &cntr, &fixed_cntr, &ecx, &edx); - pmu->cntr_mask64 = cntr; - pmu->fixed_cntr_mask64 = fixed_cntr; + hybrid(pmu, cntr_mask64) = cntr; + hybrid(pmu, fixed_cntr_mask64) = fixed_cntr; + } + + if (eax.split.acr_subleaf) { + cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF, + &cntr, &fixed_cntr, &ecx, &edx); + /* The mask of the counters which can be reloaded */ + hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED); + + /* The mask of the counters which can cause a reload of reloadable counters */ + hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED); } if (!intel_pmu_broken_perf_cap()) { /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ - rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); + rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities); } } @@ -5150,7 +5389,7 @@ static bool init_hybrid_pmu(int cpu) goto end; if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) - update_pmu_cap(pmu); + update_pmu_cap(&pmu->pmu); intel_pmu_check_hybrid_pmus(pmu); @@ -5211,7 +5450,7 @@ static void intel_pmu_cpu_starting(int cpu) if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { union perf_capabilities perf_cap; - rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); + rdmsrq(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); if (!perf_cap.perf_metrics) { x86_pmu.intel_cap.perf_metrics = 0; x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); @@ -5524,7 +5763,7 @@ static __init void intel_clovertown_quirk(void) * these chips. */ pr_warn("PEBS disabled due to CPU errata\n"); - x86_pmu.pebs = 0; + x86_pmu.ds_pebs = 0; x86_pmu.pebs_constraints = NULL; } @@ -5619,24 +5858,24 @@ static bool check_msr(unsigned long msr, u64 mask) * matches, this is needed to detect certain hardware emulators * (qemu/kvm) that don't trap on the MSR access and always return 0s. */ - if (rdmsrl_safe(msr, &val_old)) + if (rdmsrq_safe(msr, &val_old)) return false; /* - * Only change the bits which can be updated by wrmsrl. + * Only change the bits which can be updated by wrmsrq. */ val_tmp = val_old ^ mask; if (is_lbr_from(msr)) val_tmp = lbr_from_signext_quirk_wr(val_tmp); - if (wrmsrl_safe(msr, val_tmp) || - rdmsrl_safe(msr, &val_new)) + if (wrmsrq_safe(msr, val_tmp) || + rdmsrq_safe(msr, &val_new)) return false; /* - * Quirk only affects validation in wrmsr(), so wrmsrl()'s value - * should equal rdmsrl()'s even with the quirk. + * Quirk only affects validation in wrmsr(), so wrmsrq()'s value + * should equal rdmsrq()'s even with the quirk. */ if (val_new != val_tmp) return false; @@ -5647,7 +5886,7 @@ static bool check_msr(unsigned long msr, u64 mask) /* Here it's sure that the MSR can be safely accessed. * Restore the old value and return. */ - wrmsrl(msr, val_old); + wrmsrq(msr, val_old); return true; } @@ -6012,7 +6251,7 @@ tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) static umode_t pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) { - return x86_pmu.pebs ? attr->mode : 0; + return x86_pmu.ds_pebs ? attr->mode : 0; } static umode_t @@ -6043,6 +6282,21 @@ td_is_visible(struct kobject *kobj, struct attribute *attr, int i) return attr->mode; } +PMU_FORMAT_ATTR(acr_mask, "config2:0-63"); + +static struct attribute *format_acr_attrs[] = { + &format_attr_acr_mask.attr, + NULL +}; + +static umode_t +acr_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + + return intel_pmu_has_acr(dev_get_drvdata(dev)) ? attr->mode : 0; +} + static struct attribute_group group_events_td = { .name = "events", .is_visible = td_is_visible, @@ -6085,6 +6339,12 @@ static struct attribute_group group_format_evtsel_ext = { .is_visible = evtsel_ext_is_visible, }; +static struct attribute_group group_format_acr = { + .name = "format", + .attrs = format_acr_attrs, + .is_visible = acr_is_visible, +}; + static struct attribute_group group_default = { .attrs = intel_pmu_attrs, .is_visible = default_is_visible, @@ -6099,6 +6359,7 @@ static const struct attribute_group *attr_update[] = { &group_format_extra, &group_format_extra_skl, &group_format_evtsel_ext, + &group_format_acr, &group_default, NULL, }; @@ -6383,6 +6644,7 @@ static const struct attribute_group *hybrid_attr_update[] = { &group_caps_lbr, &hybrid_group_format_extra, &group_format_evtsel_ext, + &group_format_acr, &group_default, &hybrid_group_cpus, NULL, @@ -6575,6 +6837,7 @@ static __always_inline void intel_pmu_init_skt(struct pmu *pmu) intel_pmu_init_grt(pmu); hybrid(pmu, event_constraints) = intel_skt_event_constraints; hybrid(pmu, extra_regs) = intel_cmt_extra_regs; + static_call_update(intel_pmu_enable_acr_event, intel_pmu_enable_acr); } __init int intel_pmu_init(void) @@ -6635,6 +6898,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_events_mask = intel_pmu_pebs_mask(x86_pmu.cntr_mask64); x86_pmu.pebs_capable = PEBS_COUNTER_MASK; + x86_pmu.config_mask = X86_RAW_EVENT_MASK; /* * Quirk: v2 perfmon does not report fixed-purpose events, so @@ -6651,7 +6915,7 @@ __init int intel_pmu_init(void) if (boot_cpu_has(X86_FEATURE_PDCM)) { u64 capabilities; - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); + rdmsrq(MSR_IA32_PERF_CAPABILITIES, capabilities); x86_pmu.intel_cap.capabilities = capabilities; } @@ -6663,7 +6927,7 @@ __init int intel_pmu_init(void) if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) intel_pmu_arch_lbr_init(); - intel_ds_init(); + intel_pebs_init(); x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ @@ -6674,6 +6938,12 @@ __init int intel_pmu_init(void) } /* + * Many features on and after V6 require dynamic constraint, + * e.g., Arch PEBS, ACR. + */ + if (version >= 6) + x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT; + /* * Install the hw-cache-events table: */ switch (boot_cpu_data.x86_vfm) { @@ -6884,6 +7154,18 @@ __init int intel_pmu_init(void) name = "crestmont"; break; + case INTEL_ATOM_DARKMONT_X: + intel_pmu_init_skt(NULL); + intel_pmu_pebs_data_source_cmt(); + x86_pmu.pebs_latency_data = cmt_latency_data; + x86_pmu.get_event_constraints = cmt_get_event_constraints; + td_attr = skt_events_attrs; + mem_attr = grt_mem_attrs; + extra_attr = cmt_format_attr; + pr_cont("Darkmont events, "); + name = "darkmont"; + break; + case INTEL_WESTMERE: case INTEL_WESTMERE_EP: case INTEL_WESTMERE_EX: @@ -7433,6 +7715,18 @@ __init int intel_pmu_init(void) x86_pmu.attr_update = hybrid_attr_update; } + /* + * The archPerfmonExt (0x23) includes an enhanced enumeration of + * PMU architectural features with a per-core view. For non-hybrid, + * each core has the same PMU capabilities. It's good enough to + * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu + * is used to keep the common capabilities. Still keep the values + * from the leaf 0xa. The core specific update will be done later + * when a new type is online. + */ + if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) + update_pmu_cap(NULL); + intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64, &x86_pmu.fixed_cntr_mask64, &x86_pmu.intel_ctrl); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index ae4ec16156bb..ec753e39b007 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -111,6 +111,7 @@ #include <linux/nospec.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "../perf_event.h" #include "../probe.h" @@ -320,7 +321,7 @@ static inline u64 cstate_pmu_read_counter(struct perf_event *event) { u64 val; - rdmsrl(event->hw.event_base, val); + rdmsrq(event->hw.event_base, val); return val; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8d86e91bd5e5..c0b7ac1c7594 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -10,6 +10,7 @@ #include <asm/tlbflush.h> #include <asm/insn.h> #include <asm/io.h> +#include <asm/msr.h> #include <asm/timer.h> #include "../perf_event.h" @@ -624,7 +625,7 @@ static int alloc_pebs_buffer(int cpu) int max, node = cpu_to_node(cpu); void *buffer, *insn_buff, *cea; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) return 0; buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); @@ -659,7 +660,7 @@ static void release_pebs_buffer(int cpu) struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); void *cea; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) return; kfree(per_cpu(insn_buffer, cpu)); @@ -734,7 +735,7 @@ void release_ds_buffers(void) { int cpu; - if (!x86_pmu.bts && !x86_pmu.pebs) + if (!x86_pmu.bts && !x86_pmu.ds_pebs) return; for_each_possible_cpu(cpu) @@ -750,7 +751,8 @@ void release_ds_buffers(void) } for_each_possible_cpu(cpu) { - release_pebs_buffer(cpu); + if (x86_pmu.ds_pebs) + release_pebs_buffer(cpu); release_bts_buffer(cpu); } } @@ -761,15 +763,17 @@ void reserve_ds_buffers(void) int cpu; x86_pmu.bts_active = 0; - x86_pmu.pebs_active = 0; - if (!x86_pmu.bts && !x86_pmu.pebs) + if (x86_pmu.ds_pebs) + x86_pmu.pebs_active = 0; + + if (!x86_pmu.bts && !x86_pmu.ds_pebs) return; if (!x86_pmu.bts) bts_err = 1; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) pebs_err = 1; for_each_possible_cpu(cpu) { @@ -781,7 +785,8 @@ void reserve_ds_buffers(void) if (!bts_err && alloc_bts_buffer(cpu)) bts_err = 1; - if (!pebs_err && alloc_pebs_buffer(cpu)) + if (x86_pmu.ds_pebs && !pebs_err && + alloc_pebs_buffer(cpu)) pebs_err = 1; if (bts_err && pebs_err) @@ -793,7 +798,7 @@ void reserve_ds_buffers(void) release_bts_buffer(cpu); } - if (pebs_err) { + if (x86_pmu.ds_pebs && pebs_err) { for_each_possible_cpu(cpu) release_pebs_buffer(cpu); } @@ -805,7 +810,7 @@ void reserve_ds_buffers(void) if (x86_pmu.bts && !bts_err) x86_pmu.bts_active = 1; - if (x86_pmu.pebs && !pebs_err) + if (x86_pmu.ds_pebs && !pebs_err) x86_pmu.pebs_active = 1; for_each_possible_cpu(cpu) { @@ -1355,9 +1360,8 @@ static void __intel_pmu_pebs_update_cfg(struct perf_event *event, } -static void intel_pmu_late_setup(void) +void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc) { - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_event *event; u64 pebs_data_cfg = 0; int i; @@ -1517,7 +1521,7 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event) else value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx]; } - wrmsrl(base + idx, value); + wrmsrq(base + idx, value); } static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) @@ -1554,7 +1558,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) */ intel_pmu_drain_pebs_buffer(); adaptive_pebs_record_size_update(); - wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg); + wrmsrq(MSR_PEBS_DATA_CFG, pebs_data_cfg); cpuc->active_pebs_data_cfg = pebs_data_cfg; } } @@ -1617,7 +1621,7 @@ void intel_pmu_pebs_disable(struct perf_event *event) intel_pmu_pebs_via_pt_disable(event); if (cpuc->enabled) - wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); + wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); hwc->config |= ARCH_PERFMON_EVENTSEL_INT; } @@ -1627,7 +1631,7 @@ void intel_pmu_pebs_enable_all(void) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->pebs_enabled) - wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); + wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); } void intel_pmu_pebs_disable_all(void) @@ -1828,8 +1832,6 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, perf_sample_data_init(data, 0, event->hw.last_period); - data->period = event->hw.last_period; - /* * Use latency for weight (only avail with PEBS-LL) */ @@ -2082,7 +2084,6 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, sample_type = event->attr.sample_type; format_group = basic->format_group; perf_sample_data_init(data, 0, event->hw.last_period); - data->period = event->hw.last_period; setup_pebs_time(event, data, basic->tsc); @@ -2276,7 +2277,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) WARN_ON(this_cpu_read(cpu_hw_events.enabled)); prev_raw_count = local64_read(&hwc->prev_count); - rdpmcl(hwc->event_base_rdpmc, new_raw_count); + new_raw_count = rdpmc(hwc->event_base_rdpmc); local64_set(&hwc->prev_count, new_raw_count); /* @@ -2359,8 +2360,7 @@ __intel_pmu_pebs_last_event(struct perf_event *event, * All but the last records are processed. * The last one is left to be able to call the overflow handler. */ - if (perf_event_overflow(event, data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, data, regs); } if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { @@ -2589,8 +2589,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d if (error[bit]) { perf_log_lost_samples(event, error[bit]); - if (iregs && perf_event_account_interrupt(event)) - x86_pmu_stop(event, 0); + if (iregs) + perf_event_account_interrupt(event); } if (counts[bit]) { @@ -2670,10 +2670,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d } /* - * BTS, PEBS probe and setup + * PEBS probe and setup */ -void __init intel_ds_init(void) +void __init intel_pebs_init(void) { /* * No support for 32bit formats @@ -2681,13 +2681,12 @@ void __init intel_ds_init(void) if (!boot_cpu_has(X86_FEATURE_DTES64)) return; - x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); - x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); + x86_pmu.ds_pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; if (x86_pmu.version <= 4) x86_pmu.pebs_no_isolation = 1; - if (x86_pmu.pebs) { + if (x86_pmu.ds_pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; char *pebs_qual = ""; int format = x86_pmu.intel_cap.pebs_format; @@ -2695,6 +2694,11 @@ void __init intel_ds_init(void) if (format < 4) x86_pmu.intel_cap.pebs_baseline = 0; + x86_pmu.pebs_enable = intel_pmu_pebs_enable; + x86_pmu.pebs_disable = intel_pmu_pebs_disable; + x86_pmu.pebs_enable_all = intel_pmu_pebs_enable_all; + x86_pmu.pebs_disable_all = intel_pmu_pebs_disable_all; + switch (format) { case 0: pr_cont("PEBS fmt0%c, ", pebs_type); @@ -2779,7 +2783,7 @@ void __init intel_ds_init(void) default: pr_cont("no PEBS fmt%d%c, ", format, pebs_type); - x86_pmu.pebs = 0; + x86_pmu.ds_pebs = 0; } } } @@ -2788,8 +2792,8 @@ void perf_restore_debug_store(void) { struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); - if (!x86_pmu.bts && !x86_pmu.pebs) + if (!x86_pmu.bts && !x86_pmu.ds_pebs) return; - wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); + wrmsrq(MSR_IA32_DS_AREA, (unsigned long)ds); } diff --git a/arch/x86/events/intel/knc.c b/arch/x86/events/intel/knc.c index 034a1f6a457c..e614baf42926 100644 --- a/arch/x86/events/intel/knc.c +++ b/arch/x86/events/intel/knc.c @@ -5,6 +5,7 @@ #include <linux/types.h> #include <asm/hardirq.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -159,18 +160,18 @@ static void knc_pmu_disable_all(void) { u64 val; - rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); + rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); val &= ~(KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1); - wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); + wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); } static void knc_pmu_enable_all(int added) { u64 val; - rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); + rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); val |= (KNC_ENABLE_COUNTER0|KNC_ENABLE_COUNTER1); - wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); + wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); } static inline void @@ -182,7 +183,7 @@ knc_pmu_disable_event(struct perf_event *event) val = hwc->config; val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; - (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); + (void)wrmsrq_safe(hwc->config_base + hwc->idx, val); } static void knc_pmu_enable_event(struct perf_event *event) @@ -193,21 +194,21 @@ static void knc_pmu_enable_event(struct perf_event *event) val = hwc->config; val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); + (void)wrmsrq_safe(hwc->config_base + hwc->idx, val); } static inline u64 knc_pmu_get_status(void) { u64 status; - rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status); + rdmsrq(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status); return status; } static inline void knc_pmu_ack_status(u64 ack) { - wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack); + wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack); } static int knc_pmu_handle_irq(struct pt_regs *regs) @@ -241,19 +242,20 @@ again: for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; + u64 last_period; handled++; if (!test_bit(bit, cpuc->active_mask)) continue; + last_period = event->hw.last_period; if (!intel_pmu_save_and_restart(event)) continue; - perf_sample_data_init(&data, 0, event->hw.last_period); + perf_sample_data_init(&data, 0, last_period); - if (perf_event_overflow(event, &data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } /* diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index f44c3d866f24..7aa59966e7c3 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -137,9 +137,9 @@ static void __intel_pmu_lbr_enable(bool pmi) if (cpuc->lbr_sel) lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel) - wrmsrl(MSR_LBR_SELECT, lbr_select); + wrmsrq(MSR_LBR_SELECT, lbr_select); - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); orig_debugctl = debugctl; if (!static_cpu_has(X86_FEATURE_ARCH_LBR)) @@ -155,10 +155,10 @@ static void __intel_pmu_lbr_enable(bool pmi) debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; if (orig_debugctl != debugctl) - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); if (static_cpu_has(X86_FEATURE_ARCH_LBR)) - wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN); + wrmsrq(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN); } void intel_pmu_lbr_reset_32(void) @@ -166,7 +166,7 @@ void intel_pmu_lbr_reset_32(void) int i; for (i = 0; i < x86_pmu.lbr_nr; i++) - wrmsrl(x86_pmu.lbr_from + i, 0); + wrmsrq(x86_pmu.lbr_from + i, 0); } void intel_pmu_lbr_reset_64(void) @@ -174,17 +174,17 @@ void intel_pmu_lbr_reset_64(void) int i; for (i = 0; i < x86_pmu.lbr_nr; i++) { - wrmsrl(x86_pmu.lbr_from + i, 0); - wrmsrl(x86_pmu.lbr_to + i, 0); + wrmsrq(x86_pmu.lbr_from + i, 0); + wrmsrq(x86_pmu.lbr_to + i, 0); if (x86_pmu.lbr_has_info) - wrmsrl(x86_pmu.lbr_info + i, 0); + wrmsrq(x86_pmu.lbr_info + i, 0); } } static void intel_pmu_arch_lbr_reset(void) { /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */ - wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); + wrmsrq(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); } void intel_pmu_lbr_reset(void) @@ -199,7 +199,7 @@ void intel_pmu_lbr_reset(void) cpuc->last_task_ctx = NULL; cpuc->last_log_id = 0; if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && cpuc->lbr_select) - wrmsrl(MSR_LBR_SELECT, 0); + wrmsrq(MSR_LBR_SELECT, 0); } /* @@ -209,7 +209,7 @@ static inline u64 intel_pmu_lbr_tos(void) { u64 tos; - rdmsrl(x86_pmu.lbr_tos, tos); + rdmsrq(x86_pmu.lbr_tos, tos); return tos; } @@ -282,17 +282,17 @@ static u64 lbr_from_signext_quirk_rd(u64 val) static __always_inline void wrlbr_from(unsigned int idx, u64 val) { val = lbr_from_signext_quirk_wr(val); - wrmsrl(x86_pmu.lbr_from + idx, val); + wrmsrq(x86_pmu.lbr_from + idx, val); } static __always_inline void wrlbr_to(unsigned int idx, u64 val) { - wrmsrl(x86_pmu.lbr_to + idx, val); + wrmsrq(x86_pmu.lbr_to + idx, val); } static __always_inline void wrlbr_info(unsigned int idx, u64 val) { - wrmsrl(x86_pmu.lbr_info + idx, val); + wrmsrq(x86_pmu.lbr_info + idx, val); } static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr) @@ -302,7 +302,7 @@ static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr) if (lbr) return lbr->from; - rdmsrl(x86_pmu.lbr_from + idx, val); + rdmsrq(x86_pmu.lbr_from + idx, val); return lbr_from_signext_quirk_rd(val); } @@ -314,7 +314,7 @@ static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr) if (lbr) return lbr->to; - rdmsrl(x86_pmu.lbr_to + idx, val); + rdmsrq(x86_pmu.lbr_to + idx, val); return val; } @@ -326,7 +326,7 @@ static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr) if (lbr) return lbr->info; - rdmsrl(x86_pmu.lbr_info + idx, val); + rdmsrq(x86_pmu.lbr_info + idx, val); return val; } @@ -380,10 +380,10 @@ void intel_pmu_lbr_restore(void *ctx) wrlbr_info(lbr_idx, 0); } - wrmsrl(x86_pmu.lbr_tos, tos); + wrmsrq(x86_pmu.lbr_tos, tos); if (cpuc->lbr_select) - wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); + wrmsrq(MSR_LBR_SELECT, task_ctx->lbr_sel); } static void intel_pmu_arch_lbr_restore(void *ctx) @@ -475,7 +475,7 @@ void intel_pmu_lbr_save(void *ctx) task_ctx->tos = tos; if (cpuc->lbr_select) - rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel); + rdmsrq(MSR_LBR_SELECT, task_ctx->lbr_sel); } static void intel_pmu_arch_lbr_save(void *ctx) @@ -752,7 +752,7 @@ void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) u64 lbr; } msr_lastbranch; - rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); + rdmsrq(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); perf_clear_branch_entry_bitfields(br); @@ -1602,7 +1602,7 @@ void __init intel_pmu_arch_lbr_init(void) goto clear_arch_lbr; /* Apply the max depth of Arch LBR */ - if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr)) + if (wrmsrq_safe(MSR_ARCH_LBR_DEPTH, lbr_nr)) goto clear_arch_lbr; x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask; @@ -1618,7 +1618,7 @@ void __init intel_pmu_arch_lbr_init(void) x86_pmu.lbr_nr = lbr_nr; if (!!x86_pmu.lbr_counters) - x86_pmu.flags |= PMU_FL_BR_CNTR; + x86_pmu.flags |= PMU_FL_BR_CNTR | PMU_FL_DYN_CONSTRAINT; if (x86_pmu.lbr_mispred) static_branch_enable(&x86_lbr_mispred); diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index c85a9fc44355..e5fd7367e45d 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -13,6 +13,7 @@ #include <asm/cpu_device_id.h> #include <asm/hardirq.h> #include <asm/apic.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -859,9 +860,9 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) u64 v; /* an official way for overflow indication */ - rdmsrl(hwc->config_base, v); + rdmsrq(hwc->config_base, v); if (v & P4_CCCR_OVF) { - wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF); + wrmsrq(hwc->config_base, v & ~P4_CCCR_OVF); return 1; } @@ -872,7 +873,7 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) * the counter has reached zero value and continued counting before * real NMI signal was received: */ - rdmsrl(hwc->event_base, v); + rdmsrq(hwc->event_base, v); if (!(v & ARCH_P4_UNFLAGGED_BIT)) return 1; @@ -897,8 +898,8 @@ static void p4_pmu_disable_pebs(void) * So at moment let leave metrics turned on forever -- it's * ok for now but need to be revisited! * - * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0); - * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0); + * (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, 0); + * (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT, 0); */ } @@ -911,7 +912,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) * state we need to clear P4_CCCR_OVF, otherwise interrupt get * asserted again and again */ - (void)wrmsrl_safe(hwc->config_base, + (void)wrmsrq_safe(hwc->config_base, p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); } @@ -944,8 +945,8 @@ static void p4_pmu_enable_pebs(u64 config) bind = &p4_pebs_bind_map[idx]; - (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); - (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); + (void)wrmsrq_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); + (void)wrmsrq_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); } static void __p4_pmu_enable_event(struct perf_event *event) @@ -979,8 +980,8 @@ static void __p4_pmu_enable_event(struct perf_event *event) */ p4_pmu_enable_pebs(hwc->config); - (void)wrmsrl_safe(escr_addr, escr_conf); - (void)wrmsrl_safe(hwc->config_base, + (void)wrmsrq_safe(escr_addr, escr_conf); + (void)wrmsrq_safe(hwc->config_base, (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); } @@ -1024,7 +1025,7 @@ static int p4_pmu_set_period(struct perf_event *event) * * the former idea is taken from OProfile code */ - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); + wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); } return ret; @@ -1072,8 +1073,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) continue; - if (perf_event_overflow(event, &data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } if (handled) @@ -1398,7 +1398,7 @@ __init int p4_pmu_init(void) */ for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { reg = x86_pmu_config_addr(i); - wrmsrl_safe(reg, 0ULL); + wrmsrq_safe(reg, 0ULL); } return 0; diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c index 65b45e9d7016..6e41de355bd8 100644 --- a/arch/x86/events/intel/p6.c +++ b/arch/x86/events/intel/p6.c @@ -3,6 +3,7 @@ #include <linux/types.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -142,9 +143,9 @@ static void p6_pmu_disable_all(void) u64 val; /* p6 only has one enable register */ - rdmsrl(MSR_P6_EVNTSEL0, val); + rdmsrq(MSR_P6_EVNTSEL0, val); val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; - wrmsrl(MSR_P6_EVNTSEL0, val); + wrmsrq(MSR_P6_EVNTSEL0, val); } static void p6_pmu_enable_all(int added) @@ -152,9 +153,9 @@ static void p6_pmu_enable_all(int added) unsigned long val; /* p6 only has one enable register */ - rdmsrl(MSR_P6_EVNTSEL0, val); + rdmsrq(MSR_P6_EVNTSEL0, val); val |= ARCH_PERFMON_EVENTSEL_ENABLE; - wrmsrl(MSR_P6_EVNTSEL0, val); + wrmsrq(MSR_P6_EVNTSEL0, val); } static inline void @@ -163,7 +164,7 @@ p6_pmu_disable_event(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; u64 val = P6_NOP_EVENT; - (void)wrmsrl_safe(hwc->config_base, val); + (void)wrmsrq_safe(hwc->config_base, val); } static void p6_pmu_enable_event(struct perf_event *event) @@ -180,7 +181,7 @@ static void p6_pmu_enable_event(struct perf_event *event) * to actually enable the events. */ - (void)wrmsrl_safe(hwc->config_base, val); + (void)wrmsrq_safe(hwc->config_base, val); } PMU_FORMAT_ATTR(event, "config:0-7" ); diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index fa37565f6418..e8cf29d2b10c 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -18,12 +18,13 @@ #include <linux/slab.h> #include <linux/device.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/perf_event.h> #include <asm/insn.h> #include <asm/io.h> #include <asm/intel_pt.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "../perf_event.h" #include "pt.h" @@ -194,7 +195,7 @@ static int __init pt_pmu_hw_init(void) int ret; long i; - rdmsrl(MSR_PLATFORM_INFO, reg); + rdmsrq(MSR_PLATFORM_INFO, reg); pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8; /* @@ -230,7 +231,7 @@ static int __init pt_pmu_hw_init(void) * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace * post-VMXON. */ - rdmsrl(MSR_IA32_VMX_MISC, reg); + rdmsrq(MSR_IA32_VMX_MISC, reg); if (reg & BIT(14)) pt_pmu.vmx = true; } @@ -426,7 +427,7 @@ static void pt_config_start(struct perf_event *event) if (READ_ONCE(pt->vmx_on)) perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); else - wrmsrl(MSR_IA32_RTIT_CTL, ctl); + wrmsrq(MSR_IA32_RTIT_CTL, ctl); WRITE_ONCE(event->hw.aux_config, ctl); } @@ -485,12 +486,12 @@ static u64 pt_config_filters(struct perf_event *event) /* avoid redundant msr writes */ if (pt->filters.filter[range].msr_a != filter->msr_a) { - wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a); + wrmsrq(pt_address_ranges[range].msr_a, filter->msr_a); pt->filters.filter[range].msr_a = filter->msr_a; } if (pt->filters.filter[range].msr_b != filter->msr_b) { - wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b); + wrmsrq(pt_address_ranges[range].msr_b, filter->msr_b); pt->filters.filter[range].msr_b = filter->msr_b; } @@ -509,7 +510,7 @@ static void pt_config(struct perf_event *event) /* First round: clear STATUS, in particular the PSB byte counter. */ if (!event->hw.aux_config) { perf_event_itrace_started(event); - wrmsrl(MSR_IA32_RTIT_STATUS, 0); + wrmsrq(MSR_IA32_RTIT_STATUS, 0); } reg = pt_config_filters(event); @@ -569,7 +570,7 @@ static void pt_config_stop(struct perf_event *event) ctl &= ~RTIT_CTL_TRACEEN; if (!READ_ONCE(pt->vmx_on)) - wrmsrl(MSR_IA32_RTIT_CTL, ctl); + wrmsrq(MSR_IA32_RTIT_CTL, ctl); WRITE_ONCE(event->hw.aux_config, ctl); @@ -658,13 +659,13 @@ static void pt_config_buffer(struct pt_buffer *buf) reg = virt_to_phys(base); if (pt->output_base != reg) { pt->output_base = reg; - wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg); + wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, reg); } reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); if (pt->output_mask != reg) { pt->output_mask = reg; - wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); + wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, reg); } } @@ -926,7 +927,7 @@ static void pt_handle_status(struct pt *pt) int advance = 0; u64 status; - rdmsrl(MSR_IA32_RTIT_STATUS, status); + rdmsrq(MSR_IA32_RTIT_STATUS, status); if (status & RTIT_STATUS_ERROR) { pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n"); @@ -970,7 +971,7 @@ static void pt_handle_status(struct pt *pt) if (advance) pt_buffer_advance(buf); - wrmsrl(MSR_IA32_RTIT_STATUS, status); + wrmsrq(MSR_IA32_RTIT_STATUS, status); } /** @@ -985,12 +986,12 @@ static void pt_read_offset(struct pt_buffer *buf) struct topa_page *tp; if (!buf->single) { - rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); + rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); tp = phys_to_virt(pt->output_base); buf->cur = &tp->topa; } - rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); + rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); /* offset within current output region */ buf->output_off = pt->output_mask >> 32; /* index of current output region within this table */ @@ -1585,7 +1586,7 @@ void intel_pt_handle_vmx(int on) /* Turn PTs back on */ if (!on && event) - wrmsrl(MSR_IA32_RTIT_CTL, event->hw.aux_config); + wrmsrq(MSR_IA32_RTIT_CTL, event->hw.aux_config); local_irq_restore(flags); } @@ -1611,7 +1612,7 @@ static void pt_event_start(struct perf_event *event, int mode) * PMI might have just cleared these, so resume_allowed * must be checked again also. */ - rdmsrl(MSR_IA32_RTIT_STATUS, status); + rdmsrq(MSR_IA32_RTIT_STATUS, status); if (!(status & (RTIT_STATUS_TRIGGEREN | RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED)) && @@ -1839,7 +1840,7 @@ static __init int pt_init(void) for_each_online_cpu(cpu) { u64 ctl; - ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl); + ret = rdmsrq_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl); if (!ret && (ctl & RTIT_CTL_TRACEEN)) prior_warn++; } @@ -1863,6 +1864,8 @@ static __init int pt_init(void) if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG; + else + pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_PREFER_LARGE; pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE | diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index a34e50fc4a8f..e0815a12db90 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -3,6 +3,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "uncore.h" #include "uncore_discovery.h" @@ -150,7 +151,7 @@ u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *eve { u64 count; - rdmsrl(event->hw.event_base, count); + rdmsrq(event->hw.event_base, count); return count; } @@ -305,17 +306,11 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) { struct intel_uncore_box *box; struct perf_event *event; - unsigned long flags; int bit; box = container_of(hrtimer, struct intel_uncore_box, hrtimer); if (!box->n_active || box->cpu != smp_processor_id()) return HRTIMER_NORESTART; - /* - * disable local interrupt to prevent uncore_pmu_event_start/stop - * to interrupt the update process - */ - local_irq_save(flags); /* * handle boxes with an active event list as opposed to active @@ -328,8 +323,6 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) uncore_perf_event_update(box, box->events[bit]); - local_irq_restore(flags); - hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); return HRTIMER_RESTART; } @@ -337,7 +330,7 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) { hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), - HRTIMER_MODE_REL_PINNED); + HRTIMER_MODE_REL_PINNED_HARD); } void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) @@ -347,7 +340,7 @@ void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) { - hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); } static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index 571e44b49691..18a3022f26a0 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/msr.h> #include "uncore.h" #include "uncore_discovery.h" @@ -441,17 +442,17 @@ static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box) void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box) { - wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); + wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); } void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box) { - wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); + wrmsrq(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); } void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box) { - wrmsrl(intel_generic_uncore_box_ctl(box), 0); + wrmsrq(intel_generic_uncore_box_ctl(box), 0); } static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box, @@ -459,7 +460,7 @@ static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box, { struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config); + wrmsrq(hwc->config_base, hwc->config); } static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box, @@ -467,7 +468,7 @@ static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box, { struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, 0); + wrmsrq(hwc->config_base, 0); } static struct intel_uncore_ops generic_uncore_msr_ops = { diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 466833478e81..8962e7cb21e3 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Nehalem-EX/Westmere-EX uncore support */ #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "uncore.h" /* NHM-EX event control */ @@ -200,12 +201,12 @@ DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) { - wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); + wrmsrq(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); } static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box) { - wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0); + wrmsrq(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0); } static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) @@ -214,12 +215,12 @@ static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) u64 config; if (msr) { - rdmsrl(msr, config); + rdmsrq(msr, config); config &= ~((1ULL << uncore_num_counters(box)) - 1); /* WBox has a fixed counter */ if (uncore_msr_fixed_ctl(box)) config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; - wrmsrl(msr, config); + wrmsrq(msr, config); } } @@ -229,18 +230,18 @@ static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) u64 config; if (msr) { - rdmsrl(msr, config); + rdmsrq(msr, config); config |= (1ULL << uncore_num_counters(box)) - 1; /* WBox has a fixed counter */ if (uncore_msr_fixed_ctl(box)) config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; - wrmsrl(msr, config); + wrmsrq(msr, config); } } static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) { - wrmsrl(event->hw.config_base, 0); + wrmsrq(event->hw.config_base, 0); } static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) @@ -248,11 +249,11 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p struct hw_perf_event *hwc = &event->hw; if (hwc->idx == UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); + wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); else - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); } #define NHMEX_UNCORE_OPS_COMMON_INIT() \ @@ -382,10 +383,10 @@ static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event_extra *reg2 = &hwc->branch_reg; if (reg1->idx != EXTRA_REG_NONE) { - wrmsrl(reg1->reg, reg1->config); - wrmsrl(reg1->reg + 1, reg2->config); + wrmsrq(reg1->reg, reg1->config); + wrmsrq(reg1->reg + 1, reg2->config); } - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | + wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); } @@ -467,12 +468,12 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event_extra *reg2 = &hwc->branch_reg; if (reg1->idx != EXTRA_REG_NONE) { - wrmsrl(reg1->reg, 0); - wrmsrl(reg1->reg + 1, reg1->config); - wrmsrl(reg1->reg + 2, reg2->config); - wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); + wrmsrq(reg1->reg, 0); + wrmsrq(reg1->reg + 1, reg1->config); + wrmsrq(reg1->reg + 2, reg2->config); + wrmsrq(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); } - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); } static struct attribute *nhmex_uncore_sbox_formats_attr[] = { @@ -842,25 +843,25 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per idx = __BITS_VALUE(reg1->idx, 0, 8); if (idx != 0xff) - wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), + wrmsrq(__BITS_VALUE(reg1->reg, 0, 16), nhmex_mbox_shared_reg_config(box, idx)); idx = __BITS_VALUE(reg1->idx, 1, 8); if (idx != 0xff) - wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), + wrmsrq(__BITS_VALUE(reg1->reg, 1, 16), nhmex_mbox_shared_reg_config(box, idx)); if (reg2->idx != EXTRA_REG_NONE) { - wrmsrl(reg2->reg, 0); + wrmsrq(reg2->reg, 0); if (reg2->config != ~0ULL) { - wrmsrl(reg2->reg + 1, + wrmsrq(reg2->reg + 1, reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); - wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & + wrmsrq(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); - wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + wrmsrq(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); } } - wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); + wrmsrq(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); } DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); @@ -1121,31 +1122,31 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per switch (idx % 6) { case 0: - wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); + wrmsrq(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); break; case 1: - wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); + wrmsrq(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); break; case 2: case 3: - wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), + wrmsrq(NHMEX_R_MSR_PORTN_QLX_CFG(port), uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); break; case 4: - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), hwc->config >> 32); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); break; case 5: - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), hwc->config >> 32); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); - wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); + wrmsrq(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); break; } - wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | + wrmsrq(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); } diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index edb7fd50efe0..a1a96833e30e 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ +#include <asm/msr.h> #include "uncore.h" #include "uncore_discovery.h" @@ -260,34 +261,34 @@ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event *hwc = &event->hw; if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); + wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); else - wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); + wrmsrq(hwc->config_base, SNB_UNC_CTL_EN); } static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) { - wrmsrl(event->hw.config_base, 0); + wrmsrq(event->hw.config_base, 0); } static void snb_uncore_msr_init_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) { - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, + wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); } } static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) { - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, + wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); } static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); + wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 0); } static struct uncore_event_desc snb_uncore_events[] = { @@ -372,7 +373,7 @@ void snb_uncore_cpu_init(void) static void skl_uncore_msr_init_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) { - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); } @@ -383,14 +384,14 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box) static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) { - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); } static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 0); } static struct intel_uncore_ops skl_uncore_msr_ops = { @@ -504,7 +505,7 @@ static int icl_get_cbox_num(void) { u64 num_boxes; - rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); + rdmsrq(ICL_UNC_CBO_CONFIG, num_boxes); return num_boxes & ICL_UNC_NUM_CBO_MASK; } @@ -525,7 +526,7 @@ static struct intel_uncore_type *tgl_msr_uncores[] = { static void rkl_uncore_msr_init_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) - wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); + wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); } void tgl_uncore_cpu_init(void) @@ -541,24 +542,24 @@ void tgl_uncore_cpu_init(void) static void adl_uncore_msr_init_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); } static void adl_uncore_msr_enable_box(struct intel_uncore_box *box) { - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); } static void adl_uncore_msr_disable_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); } static void adl_uncore_msr_exit_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) - wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); + wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); } static struct intel_uncore_ops adl_uncore_msr_ops = { @@ -691,7 +692,7 @@ static struct intel_uncore_type mtl_uncore_hac_cbox = { static void mtl_uncore_msr_init_box(struct intel_uncore_box *box) { - wrmsrl(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN); + wrmsrq(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN); } static struct intel_uncore_ops mtl_uncore_msr_ops = { @@ -758,7 +759,7 @@ static struct intel_uncore_type *lnl_msr_uncores[] = { static void lnl_uncore_msr_init_box(struct intel_uncore_box *box) { if (box->pmu->pmu_idx == 0) - wrmsrl(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); + wrmsrq(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); } static struct intel_uncore_ops lnl_uncore_msr_ops = { @@ -1306,12 +1307,12 @@ int skl_uncore_pci_init(void) /* Nehalem uncore support */ static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) { - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); + wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, 0); } static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) { - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); + wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); } static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) @@ -1319,9 +1320,9 @@ static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event *hwc = &event->hw; if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); + wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); else - wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); + wrmsrq(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); } static struct attribute *nhm_uncore_formats_attr[] = { diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 76d96df1475a..2824dc9950be 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* SandyBridge-EP/IvyTown uncore support */ #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "uncore.h" #include "uncore_discovery.h" @@ -618,9 +619,9 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) msr = uncore_msr_box_ctl(box); if (msr) { - rdmsrl(msr, config); + rdmsrq(msr, config); config |= SNBEP_PMON_BOX_CTL_FRZ; - wrmsrl(msr, config); + wrmsrq(msr, config); } } @@ -631,9 +632,9 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) msr = uncore_msr_box_ctl(box); if (msr) { - rdmsrl(msr, config); + rdmsrq(msr, config); config &= ~SNBEP_PMON_BOX_CTL_FRZ; - wrmsrl(msr, config); + wrmsrq(msr, config); } } @@ -643,9 +644,9 @@ static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct p struct hw_perf_event_extra *reg1 = &hwc->extra_reg; if (reg1->idx != EXTRA_REG_NONE) - wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); + wrmsrq(reg1->reg, uncore_shared_reg_config(box, 0)); - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); } static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, @@ -653,7 +654,7 @@ static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, { struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config); + wrmsrq(hwc->config_base, hwc->config); } static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) @@ -661,7 +662,7 @@ static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) unsigned msr = uncore_msr_box_ctl(box); if (msr) - wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); + wrmsrq(msr, SNBEP_PMON_BOX_CTL_INT); } static struct attribute *snbep_uncore_formats_attr[] = { @@ -1532,7 +1533,7 @@ static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) { unsigned msr = uncore_msr_box_ctl(box); if (msr) - wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); + wrmsrq(msr, IVBEP_PMON_BOX_CTL_INT); } static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) @@ -1783,11 +1784,11 @@ static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_ev if (reg1->idx != EXTRA_REG_NONE) { u64 filter = uncore_shared_reg_config(box, 0); - wrmsrl(reg1->reg, filter & 0xffffffff); - wrmsrl(reg1->reg + 6, filter >> 32); + wrmsrq(reg1->reg, filter & 0xffffffff); + wrmsrq(reg1->reg + 6, filter >> 32); } - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); } static struct intel_uncore_ops ivbep_uncore_cbox_ops = { @@ -2767,11 +2768,11 @@ static void hswep_cbox_enable_event(struct intel_uncore_box *box, if (reg1->idx != EXTRA_REG_NONE) { u64 filter = uncore_shared_reg_config(box, 0); - wrmsrl(reg1->reg, filter & 0xffffffff); - wrmsrl(reg1->reg + 1, filter >> 32); + wrmsrq(reg1->reg, filter & 0xffffffff); + wrmsrq(reg1->reg + 1, filter >> 32); } - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); } static struct intel_uncore_ops hswep_uncore_cbox_ops = { @@ -2816,7 +2817,7 @@ static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) for_each_set_bit(i, (unsigned long *)&init, 64) { flags |= (1ULL << i); - wrmsrl(msr, flags); + wrmsrq(msr, flags); } } } @@ -3708,7 +3709,7 @@ static void skx_iio_enable_event(struct intel_uncore_box *box, { struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); } static struct intel_uncore_ops skx_uncore_iio_ops = { @@ -3765,7 +3766,7 @@ static int skx_msr_cpu_bus_read(int cpu, u64 *topology) { u64 msr_value; - if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) || + if (rdmsrq_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) || !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT)) return -ENXIO; @@ -4655,9 +4656,9 @@ static void snr_cha_enable_event(struct intel_uncore_box *box, struct hw_perf_event_extra *reg1 = &hwc->extra_reg; if (reg1->idx != EXTRA_REG_NONE) - wrmsrl(reg1->reg, reg1->config); + wrmsrq(reg1->reg, reg1->config); - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); + wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); } static struct intel_uncore_ops snr_uncore_chabox_ops = { @@ -5882,9 +5883,9 @@ static void spr_uncore_msr_enable_event(struct intel_uncore_box *box, struct hw_perf_event_extra *reg1 = &hwc->extra_reg; if (reg1->idx != EXTRA_REG_NONE) - wrmsrl(reg1->reg, reg1->config); + wrmsrq(reg1->reg, reg1->config); - wrmsrl(hwc->config_base, hwc->config); + wrmsrq(hwc->config_base, hwc->config); } static void spr_uncore_msr_disable_event(struct intel_uncore_box *box, @@ -5894,9 +5895,9 @@ static void spr_uncore_msr_disable_event(struct intel_uncore_box *box, struct hw_perf_event_extra *reg1 = &hwc->extra_reg; if (reg1->idx != EXTRA_REG_NONE) - wrmsrl(reg1->reg, 0); + wrmsrq(reg1->reg, 0); - wrmsrl(hwc->config_base, 0); + wrmsrq(hwc->config_base, 0); } static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) @@ -6485,7 +6486,7 @@ void spr_uncore_cpu_init(void) * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it. */ - rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo); + rdmsrq(SPR_MSR_UNC_CBO_CONFIG, num_cbo); /* * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact * the EMR XCC. Don't let the value from the MSR replace the existing value. diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 45b1866ff051..7f5007a4752a 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -3,6 +3,8 @@ #include <linux/sysfs.h> #include <linux/nospec.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> + #include "probe.h" enum perf_msr_id { @@ -231,7 +233,7 @@ static inline u64 msr_read_counter(struct perf_event *event) u64 now; if (event->hw.event_base) - rdmsrl(event->hw.event_base, now); + rdmsrq(event->hw.event_base, now); else now = rdtsc_ordered(); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 46d120597bab..2b969386dcdd 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -17,6 +17,7 @@ #include <asm/fpu/xstate.h> #include <asm/intel_ds.h> #include <asm/cpu.h> +#include <asm/msr.h> /* To enable MSR tracing please use the generic trace points. */ @@ -127,6 +128,11 @@ static inline bool is_pebs_counter_event_group(struct perf_event *event) return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR); } +static inline bool is_acr_event_group(struct perf_event *event) +{ + return check_leader_group(event->group_leader, PERF_X86_EVENT_ACR); +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ @@ -268,6 +274,7 @@ struct cpu_hw_events { struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; int n_excl; /* the number of exclusive events */ + int n_late_setup; /* the num of events needs late setup */ unsigned int txn_flags; int is_fake; @@ -293,6 +300,10 @@ struct cpu_hw_events { u64 fixed_ctrl_val; u64 active_fixed_ctrl_val; + /* Intel ACR configuration */ + u64 acr_cfg_b[X86_PMC_IDX_MAX]; + u64 acr_cfg_c[X86_PMC_IDX_MAX]; + /* * Intel LBR bits */ @@ -714,6 +725,15 @@ struct x86_hybrid_pmu { u64 fixed_cntr_mask64; unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; }; + + union { + u64 acr_cntr_mask64; + unsigned long acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; + union { + u64 acr_cause_mask64; + unsigned long acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; struct event_constraint unconstrained; u64 hw_cache_event_ids @@ -796,6 +816,10 @@ struct x86_pmu { int (*hw_config)(struct perf_event *event); int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); void (*late_setup)(void); + void (*pebs_enable)(struct perf_event *event); + void (*pebs_disable)(struct perf_event *event); + void (*pebs_enable_all)(void); + void (*pebs_disable_all)(void); unsigned eventsel; unsigned perfctr; unsigned fixedctr; @@ -812,6 +836,14 @@ struct x86_pmu { u64 fixed_cntr_mask64; unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; }; + union { + u64 acr_cntr_mask64; + unsigned long acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; + union { + u64 acr_cause_mask64; + unsigned long acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + }; int cntval_bits; u64 cntval_mask; union { @@ -878,7 +910,7 @@ struct x86_pmu { */ unsigned int bts :1, bts_active :1, - pebs :1, + ds_pebs :1, pebs_active :1, pebs_broken :1, pebs_prec_dist :1, @@ -1049,6 +1081,7 @@ do { \ #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ #define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */ +#define PMU_FL_DYN_CONSTRAINT 0x800 /* Needs dynamic constraint */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr @@ -1091,6 +1124,7 @@ static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ .pmu_type = _pmu, \ } +int is_x86_event(struct perf_event *event); struct pmu *x86_get_pmu(unsigned int cpu); extern struct x86_pmu x86_pmu __read_mostly; @@ -1098,6 +1132,10 @@ DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); DECLARE_STATIC_CALL(x86_pmu_late_setup, *x86_pmu.late_setup); +DECLARE_STATIC_CALL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable); +DECLARE_STATIC_CALL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable); +DECLARE_STATIC_CALL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all); +DECLARE_STATIC_CALL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all); static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) { @@ -1205,16 +1243,16 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); if (hwc->extra_reg.reg) - wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); + wrmsrq(hwc->extra_reg.reg, hwc->extra_reg.config); /* * Add enabled Merge event on next counter * if large increment event being enabled on this counter */ if (is_counter_pair(hwc)) - wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); + wrmsrq(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); - wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); + wrmsrq(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); } void x86_pmu_enable_all(int added); @@ -1230,10 +1268,10 @@ static inline void x86_pmu_disable_event(struct perf_event *event) u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config & ~disable_mask); + wrmsrq(hwc->config_base, hwc->config & ~disable_mask); if (is_counter_pair(hwc)) - wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); + wrmsrq(x86_pmu_config_addr(hwc->idx + 1), 0); } void x86_pmu_enable_event(struct perf_event *event); @@ -1401,12 +1439,12 @@ static __always_inline void __amd_pmu_lbr_disable(void) { u64 dbg_ctl, dbg_extn_cfg; - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); - wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN); + rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); + wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN); if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl); + wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); } } @@ -1538,21 +1576,21 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) static __always_inline void __intel_pmu_pebs_disable_all(void) { - wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + wrmsrq(MSR_IA32_PEBS_ENABLE, 0); } static __always_inline void __intel_pmu_arch_lbr_disable(void) { - wrmsrl(MSR_ARCH_LBR_CTL, 0); + wrmsrq(MSR_ARCH_LBR_CTL, 0); } static __always_inline void __intel_pmu_lbr_disable(void) { u64 debugctl; - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); } int intel_pmu_save_and_restart(struct perf_event *event); @@ -1587,6 +1625,8 @@ void intel_pmu_disable_bts(void); int intel_pmu_drain_bts_buffer(void); +void intel_pmu_late_setup(void); + u64 grt_latency_data(struct perf_event *event, u64 status); u64 cmt_latency_data(struct perf_event *event, u64 status); @@ -1643,11 +1683,13 @@ void intel_pmu_pebs_disable_all(void); void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); +void intel_pmu_pebs_late_setup(struct cpu_hw_events *cpuc); + void intel_pmu_drain_pebs_buffer(void); void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); -void intel_ds_init(void); +void intel_pebs_init(void); void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, struct cpu_hw_events *cpuc, diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h index 1d9e385649b5..70078334e4a3 100644 --- a/arch/x86/events/perf_event_flags.h +++ b/arch/x86/events/perf_event_flags.h @@ -2,23 +2,24 @@ /* * struct hw_perf_event.flags flags */ -PERF_ARCH(PEBS_LDLAT, 0x00001) /* ld+ldlat data address sampling */ -PERF_ARCH(PEBS_ST, 0x00002) /* st data address sampling */ -PERF_ARCH(PEBS_ST_HSW, 0x00004) /* haswell style datala, store */ -PERF_ARCH(PEBS_LD_HSW, 0x00008) /* haswell style datala, load */ -PERF_ARCH(PEBS_NA_HSW, 0x00010) /* haswell style datala, unknown */ -PERF_ARCH(EXCL, 0x00020) /* HT exclusivity on counter */ -PERF_ARCH(DYNAMIC, 0x00040) /* dynamic alloc'd constraint */ -PERF_ARCH(PEBS_CNTR, 0x00080) /* PEBS counters snapshot */ -PERF_ARCH(EXCL_ACCT, 0x00100) /* accounted EXCL event */ -PERF_ARCH(AUTO_RELOAD, 0x00200) /* use PEBS auto-reload */ -PERF_ARCH(LARGE_PEBS, 0x00400) /* use large PEBS */ -PERF_ARCH(PEBS_VIA_PT, 0x00800) /* use PT buffer for PEBS */ -PERF_ARCH(PAIR, 0x01000) /* Large Increment per Cycle */ -PERF_ARCH(LBR_SELECT, 0x02000) /* Save/Restore MSR_LBR_SELECT */ -PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */ -PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */ -PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */ -PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */ -PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */ -PERF_ARCH(BRANCH_COUNTERS, 0x80000) /* logs the counters in the extra space of each branch */ +PERF_ARCH(PEBS_LDLAT, 0x0000001) /* ld+ldlat data address sampling */ +PERF_ARCH(PEBS_ST, 0x0000002) /* st data address sampling */ +PERF_ARCH(PEBS_ST_HSW, 0x0000004) /* haswell style datala, store */ +PERF_ARCH(PEBS_LD_HSW, 0x0000008) /* haswell style datala, load */ +PERF_ARCH(PEBS_NA_HSW, 0x0000010) /* haswell style datala, unknown */ +PERF_ARCH(EXCL, 0x0000020) /* HT exclusivity on counter */ +PERF_ARCH(DYNAMIC, 0x0000040) /* dynamic alloc'd constraint */ +PERF_ARCH(PEBS_CNTR, 0x0000080) /* PEBS counters snapshot */ +PERF_ARCH(EXCL_ACCT, 0x0000100) /* accounted EXCL event */ +PERF_ARCH(AUTO_RELOAD, 0x0000200) /* use PEBS auto-reload */ +PERF_ARCH(LARGE_PEBS, 0x0000400) /* use large PEBS */ +PERF_ARCH(PEBS_VIA_PT, 0x0000800) /* use PT buffer for PEBS */ +PERF_ARCH(PAIR, 0x0001000) /* Large Increment per Cycle */ +PERF_ARCH(LBR_SELECT, 0x0002000) /* Save/Restore MSR_LBR_SELECT */ +PERF_ARCH(TOPDOWN, 0x0004000) /* Count Topdown slots/metrics events */ +PERF_ARCH(PEBS_STLAT, 0x0008000) /* st+stlat data address sampling */ +PERF_ARCH(AMD_BRS, 0x0010000) /* AMD Branch Sampling */ +PERF_ARCH(PEBS_LAT_HYBRID, 0x0020000) /* ld and st lat for hybrid */ +PERF_ARCH(NEEDS_BRANCH_STACK, 0x0040000) /* require branch stack setup */ +PERF_ARCH(BRANCH_COUNTERS, 0x0080000) /* logs the counters in the extra space of each branch */ +PERF_ARCH(ACR, 0x0100000) /* Auto counter reload */ diff --git a/arch/x86/events/probe.c b/arch/x86/events/probe.c index 600bf8d15c0c..bb719d0d3f0b 100644 --- a/arch/x86/events/probe.c +++ b/arch/x86/events/probe.c @@ -2,6 +2,8 @@ #include <linux/export.h> #include <linux/types.h> #include <linux/bits.h> + +#include <asm/msr.h> #include "probe.h" static umode_t @@ -43,7 +45,7 @@ perf_msr_probe(struct perf_msr *msr, int cnt, bool zero, void *data) if (msr[bit].test && !msr[bit].test(bit, data)) continue; /* Virt sucks; you cannot tell if a R/O MSR is present :/ */ - if (rdmsrl_safe(msr[bit].msr, &val)) + if (rdmsrq_safe(msr[bit].msr, &val)) continue; mask = msr[bit].mask; diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index 8ddace8cea96..defd86137f12 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -65,6 +65,7 @@ #include <linux/nospec.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "perf_event.h" #include "probe.h" @@ -192,7 +193,7 @@ static inline unsigned int get_rapl_pmu_idx(int cpu, int scope) static inline u64 rapl_read_counter(struct perf_event *event) { u64 raw; - rdmsrl(event->hw.event_base, raw); + rdmsrq(event->hw.event_base, raw); return raw; } @@ -221,7 +222,7 @@ static u64 rapl_event_update(struct perf_event *event) prev_raw_count = local64_read(&hwc->prev_count); do { - rdmsrl(event->hw.event_base, new_raw_count); + rdmsrq(event->hw.event_base, new_raw_count); } while (!local64_try_cmpxchg(&hwc->prev_count, &prev_raw_count, new_raw_count)); @@ -610,8 +611,8 @@ static int rapl_check_hw_unit(void) u64 msr_rapl_power_unit_bits; int i; - /* protect rdmsrl() to handle virtualization */ - if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits)) + /* protect rdmsrq() to handle virtualization */ + if (rdmsrq_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits)) return -1; for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++) rapl_pkg_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; diff --git a/arch/x86/events/utils.c b/arch/x86/events/utils.c index dab4ed199227..77fd00b3305e 100644 --- a/arch/x86/events/utils.c +++ b/arch/x86/events/utils.c @@ -2,6 +2,7 @@ #include <asm/insn.h> #include <linux/mm.h> +#include <asm/msr.h> #include "perf_event.h" static int decode_branch_type(struct insn *insn) diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 2fd9b0cf9a5e..4bdfcf091200 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -15,6 +15,7 @@ #include <asm/cpufeature.h> #include <asm/hardirq.h> #include <asm/apic.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -254,26 +255,26 @@ static __initconst const u64 zxe_hw_cache_event_ids static void zhaoxin_pmu_disable_all(void) { - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); } static void zhaoxin_pmu_enable_all(int added) { - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); + wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); } static inline u64 zhaoxin_pmu_get_status(void) { u64 status; - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); + rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, status); return status; } static inline void zhaoxin_pmu_ack_status(u64 ack) { - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); + wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); } static inline void zxc_pmu_ack_status(u64 ack) @@ -293,9 +294,9 @@ static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) mask = 0xfULL << (idx * 4); - rdmsrl(hwc->config_base, ctrl_val); + rdmsrq(hwc->config_base, ctrl_val); ctrl_val &= ~mask; - wrmsrl(hwc->config_base, ctrl_val); + wrmsrq(hwc->config_base, ctrl_val); } static void zhaoxin_pmu_disable_event(struct perf_event *event) @@ -329,10 +330,10 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) bits <<= (idx * 4); mask = 0xfULL << (idx * 4); - rdmsrl(hwc->config_base, ctrl_val); + rdmsrq(hwc->config_base, ctrl_val); ctrl_val &= ~mask; ctrl_val |= bits; - wrmsrl(hwc->config_base, ctrl_val); + wrmsrq(hwc->config_base, ctrl_val); } static void zhaoxin_pmu_enable_event(struct perf_event *event) @@ -397,8 +398,7 @@ again: if (!x86_perf_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } /* diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 6d91ac5f9836..bfde0a3498b9 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -28,6 +28,7 @@ #include <asm/hypervisor.h> #include <asm/mshyperv.h> #include <asm/apic.h> +#include <asm/msr.h> #include <asm/trace/hyperv.h> @@ -37,7 +38,7 @@ static u64 hv_apic_icr_read(void) { u64 reg_val; - rdmsrl(HV_X64_MSR_ICR, reg_val); + rdmsrq(HV_X64_MSR_ICR, reg_val); return reg_val; } @@ -49,7 +50,7 @@ static void hv_apic_icr_write(u32 low, u32 id) reg_val = reg_val << 32; reg_val |= low; - wrmsrl(HV_X64_MSR_ICR, reg_val); + wrmsrq(HV_X64_MSR_ICR, reg_val); } static u32 hv_apic_read(u32 reg) @@ -75,10 +76,10 @@ static void hv_apic_write(u32 reg, u32 val) { switch (reg) { case APIC_EOI: - wrmsr(HV_X64_MSR_EOI, val, 0); + wrmsrq(HV_X64_MSR_EOI, val); break; case APIC_TASKPRI: - wrmsr(HV_X64_MSR_TPR, val, 0); + wrmsrq(HV_X64_MSR_TPR, val); break; default: native_apic_mem_write(reg, val); @@ -92,7 +93,7 @@ static void hv_apic_eoi_write(void) if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1)) return; - wrmsr(HV_X64_MSR_EOI, APIC_EOI_ACK, 0); + wrmsrq(HV_X64_MSR_EOI, APIC_EOI_ACK); } static bool cpu_is_self(int cpu) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index ddeb40930bc8..5d27194a2efa 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -21,6 +21,7 @@ #include <asm/hypervisor.h> #include <hyperv/hvhdk.h> #include <asm/mshyperv.h> +#include <asm/msr.h> #include <asm/idtentry.h> #include <asm/set_memory.h> #include <linux/kexec.h> @@ -62,7 +63,7 @@ static int hyperv_init_ghcb(void) * returned by MSR_AMD64_SEV_ES_GHCB is above shared * memory boundary and map it here. */ - rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa); + rdmsrq(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa); /* Mask out vTOM bit. ioremap_cache() maps decrypted */ ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary; @@ -95,7 +96,7 @@ static int hv_cpu_init(unsigned int cpu) * For root partition we get the hypervisor provided VP assist * page, instead of allocating a new page. */ - rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); + rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); *hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT, PAGE_SIZE, MEMREMAP_WB); } else { @@ -128,7 +129,7 @@ static int hv_cpu_init(unsigned int cpu) } if (!WARN_ON(!(*hvp))) { msr.enable = 1; - wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); + wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); } return hyperv_init_ghcb(); @@ -140,7 +141,7 @@ static void hv_reenlightenment_notify(struct work_struct *dummy) { struct hv_tsc_emulation_status emu_status; - rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); + rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); /* Don't issue the callback if TSC accesses are not emulated */ if (hv_reenlightenment_cb && emu_status.inprogress) @@ -153,11 +154,11 @@ void hyperv_stop_tsc_emulation(void) u64 freq; struct hv_tsc_emulation_status emu_status; - rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); + rdmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); emu_status.inprogress = 0; - wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); + wrmsrq(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); - rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq); + rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq); tsc_khz = div64_u64(freq, 1000); } EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation); @@ -203,8 +204,8 @@ void set_hv_tscchange_cb(void (*cb)(void)) re_ctrl.target_vp = hv_vp_index[get_cpu()]; - wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); - wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl)); + wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); + wrmsrq(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl)); put_cpu(); } @@ -217,9 +218,9 @@ void clear_hv_tscchange_cb(void) if (!hv_reenlightenment_available()) return; - rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); + rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); re_ctrl.enabled = 0; - wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); + wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); hv_reenlightenment_cb = NULL; } @@ -251,16 +252,16 @@ static int hv_cpu_die(unsigned int cpu) */ memunmap(hv_vp_assist_page[cpu]); hv_vp_assist_page[cpu] = NULL; - rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); + rdmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); msr.enable = 0; } - wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); + wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); } if (hv_reenlightenment_cb == NULL) return 0; - rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); + rdmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); if (re_ctrl.target_vp == hv_vp_index[cpu]) { /* * Reassign reenlightenment notifications to some other online @@ -274,7 +275,7 @@ static int hv_cpu_die(unsigned int cpu) else re_ctrl.enabled = 0; - wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); + wrmsrq(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); } return 0; @@ -331,9 +332,9 @@ static int hv_suspend(void) hv_hypercall_pg = NULL; /* Disable the hypercall page in the hypervisor */ - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); hypercall_msr.enable = 0; - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); ret = hv_cpu_die(0); return ret; @@ -348,11 +349,11 @@ static void hv_resume(void) WARN_ON(ret); /* Re-enable the hypercall page */ - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); hypercall_msr.enable = 1; hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg_saved); - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); hv_hypercall_pg = hv_hypercall_pg_saved; hv_hypercall_pg_saved = NULL; @@ -499,7 +500,7 @@ void __init hyperv_init(void) * in such a VM and is only used in such a VM. */ guest_id = hv_generate_guest_id(LINUX_VERSION_CODE); - wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); + wrmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id); /* With the paravisor, the VM must also write the ID via GHCB/GHCI */ hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id); @@ -515,7 +516,7 @@ void __init hyperv_init(void) if (hv_hypercall_pg == NULL) goto clean_guest_os_id; - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); hypercall_msr.enable = 1; if (hv_root_partition()) { @@ -532,7 +533,7 @@ void __init hyperv_init(void) * so it is populated with code, then copy the code to an * executable page. */ - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); pg = vmalloc_to_page(hv_hypercall_pg); src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE, @@ -544,7 +545,7 @@ void __init hyperv_init(void) hv_remap_tsc_clocksource(); } else { hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); } skip_hypercall_pg_init: @@ -608,7 +609,7 @@ skip_hypercall_pg_init: return; clean_guest_os_id: - wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); + wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0); hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0); cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE); free_ghcb_page: @@ -629,7 +630,7 @@ void hyperv_cleanup(void) union hv_reference_tsc_msr tsc_msr; /* Reset our OS id */ - wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); + wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0); hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0); /* @@ -667,18 +668,18 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die) return; panic_reported = true; - rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); + rdmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id); - wrmsrl(HV_X64_MSR_CRASH_P0, err); - wrmsrl(HV_X64_MSR_CRASH_P1, guest_id); - wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip); - wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax); - wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp); + wrmsrq(HV_X64_MSR_CRASH_P0, err); + wrmsrq(HV_X64_MSR_CRASH_P1, guest_id); + wrmsrq(HV_X64_MSR_CRASH_P2, regs->ip); + wrmsrq(HV_X64_MSR_CRASH_P3, regs->ax); + wrmsrq(HV_X64_MSR_CRASH_P4, regs->sp); /* * Let Hyper-V know there is crash data available */ - wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); + wrmsrq(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); } EXPORT_SYMBOL_GPL(hyperv_report_panic); @@ -701,7 +702,7 @@ bool hv_is_hyperv_initialized(void) * that the hypercall page is setup */ hypercall_msr.as_uint64 = 0; - rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); + rdmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); return hypercall_msr.enable; } diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c index 151e851bef09..81b006601370 100644 --- a/arch/x86/hyperv/hv_spinlock.c +++ b/arch/x86/hyperv/hv_spinlock.c @@ -15,6 +15,7 @@ #include <asm/mshyperv.h> #include <asm/paravirt.h> #include <asm/apic.h> +#include <asm/msr.h> static bool hv_pvspin __initdata = true; @@ -39,18 +40,18 @@ static void hv_qlock_wait(u8 *byte, u8 val) * To prevent a race against the unlock path it is required to * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between - * the lock value check and the rdmsrl() then the vCPU might be put + * the lock value check and the rdmsrq() then the vCPU might be put * into 'idle' state by the hypervisor and kept in that state for * an unspecified amount of time. */ local_irq_save(flags); /* - * Only issue the rdmsrl() when the lock state has not changed. + * Only issue the rdmsrq() when the lock state has not changed. */ if (READ_ONCE(*byte) == val) { unsigned long msr_val; - rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val); + rdmsrq(HV_X64_MSR_GUEST_IDLE, msr_val); (void)msr_val; } diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index 13242ed8ff16..4580936dcb03 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -11,6 +11,7 @@ #include <asm/desc.h> #include <asm/i8259.h> #include <asm/mshyperv.h> +#include <asm/msr.h> #include <asm/realmode.h> #include <asm/reboot.h> #include <../kernel/smpboot.h> @@ -149,11 +150,11 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored) input->vp_context.rip = rip; input->vp_context.rsp = rsp; input->vp_context.rflags = 0x0000000000000002; - input->vp_context.efer = __rdmsr(MSR_EFER); + input->vp_context.efer = native_rdmsrq(MSR_EFER); input->vp_context.cr0 = native_read_cr0(); input->vp_context.cr3 = __native_read_cr3(); input->vp_context.cr4 = native_read_cr4(); - input->vp_context.msr_cr_pat = __rdmsr(MSR_IA32_CR_PAT); + input->vp_context.msr_cr_pat = native_rdmsrq(MSR_IA32_CR_PAT); input->vp_context.idtr.limit = idt_ptr.size; input->vp_context.idtr.base = idt_ptr.address; input->vp_context.gdtr.limit = gdt_ptr.size; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 77bf05f06b9e..09a165a3c41e 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -22,6 +22,7 @@ #include <asm/realmode.h> #include <asm/e820/api.h> #include <asm/desc.h> +#include <asm/msr.h> #include <uapi/asm/vmx.h> #ifdef CONFIG_AMD_MEM_ENCRYPT @@ -110,12 +111,12 @@ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size) static inline u64 rd_ghcb_msr(void) { - return __rdmsr(MSR_AMD64_SEV_ES_GHCB); + return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB); } static inline void wr_ghcb_msr(u64 val) { - native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val); + native_wrmsrq(MSR_AMD64_SEV_ES_GHCB, val); } static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code, diff --git a/arch/x86/include/asm/acrn.h b/arch/x86/include/asm/acrn.h index 1dd14381bcb6..fab11192c60a 100644 --- a/arch/x86/include/asm/acrn.h +++ b/arch/x86/include/asm/acrn.h @@ -25,7 +25,7 @@ void acrn_remove_intr_handler(void); static inline u32 acrn_cpuid_base(void) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) - return hypervisor_cpuid_base("ACRNACRNACRN", 0); + return cpuid_base_hypervisor("ACRNACRNACRN", 0); return 0; } diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index f2294784babc..15bc07a5ebb3 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -83,6 +83,12 @@ struct alt_instr { extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; +extern s32 __retpoline_sites[], __retpoline_sites_end[]; +extern s32 __return_sites[], __return_sites_end[]; +extern s32 __cfi_sites[], __cfi_sites_end[]; +extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; +extern s32 __smp_locks[], __smp_locks_end[]; + /* * Debug flag that can be tested to see whether alternative * instructions were patched in already: @@ -367,11 +373,6 @@ void nop_func(void); __ALTERNATIVE(\oldinstr, \newinstr, \ft_flags) .endm -#define old_len 141b-140b -#define new_len1 144f-143f -#define new_len2 145f-144f -#define new_len3 146f-145f - /* * Same as ALTERNATIVE macro above but for two alternatives. If CPU * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has diff --git a/arch/x86/include/asm/amd/fch.h b/arch/x86/include/asm/amd/fch.h new file mode 100644 index 000000000000..2cf5153edbc2 --- /dev/null +++ b/arch/x86/include/asm/amd/fch.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_AMD_FCH_H_ +#define _ASM_X86_AMD_FCH_H_ + +#define FCH_PM_BASE 0xFED80300 + +/* Register offsets from PM base: */ +#define FCH_PM_DECODEEN 0x00 +#define FCH_PM_DECODEEN_SMBUS0SEL GENMASK(20, 19) +#define FCH_PM_SCRATCH 0x80 +#define FCH_PM_S5_RESET_STATUS 0xC0 + +#endif /* _ASM_X86_AMD_FCH_H_ */ diff --git a/arch/x86/include/asm/amd_hsmp.h b/arch/x86/include/asm/amd/hsmp.h index 03c2ce3edaf5..2137f62853ed 100644 --- a/arch/x86/include/asm/amd_hsmp.h +++ b/arch/x86/include/asm/amd/hsmp.h @@ -1,5 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ - #ifndef _ASM_X86_AMD_HSMP_H_ #define _ASM_X86_AMD_HSMP_H_ @@ -13,4 +12,5 @@ static inline int hsmp_send_message(struct hsmp_message *msg) return -ENODEV; } #endif + #endif /*_ASM_X86_AMD_HSMP_H_*/ diff --git a/arch/x86/include/asm/amd-ibs.h b/arch/x86/include/asm/amd/ibs.h index 77f3a589a99a..3ee5903982c2 100644 --- a/arch/x86/include/asm/amd-ibs.h +++ b/arch/x86/include/asm/amd/ibs.h @@ -1,4 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_AMD_IBS_H +#define _ASM_X86_AMD_IBS_H + /* * From PPR Vol 1 for AMD Family 19h Model 01h B1 * 55898 Rev 0.35 - Feb 5, 2021 @@ -151,3 +154,5 @@ struct perf_ibs_data { }; u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX]; }; + +#endif /* _ASM_X86_AMD_IBS_H */ diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd/nb.h index adfa0854cf2d..ddb5108cf46c 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd/nb.h @@ -4,7 +4,7 @@ #include <linux/ioport.h> #include <linux/pci.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> struct amd_nb_bus_dev_range { u8 bus; diff --git a/arch/x86/include/asm/amd_node.h b/arch/x86/include/asm/amd/node.h index 23fe617898a8..23fe617898a8 100644 --- a/arch/x86/include/asm/amd_node.h +++ b/arch/x86/include/asm/amd/node.h diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c903d358405d..68e10e30fe9b 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -120,7 +120,7 @@ static inline bool apic_is_x2apic_enabled(void) { u64 msr; - if (rdmsrl_safe(MSR_IA32_APICBASE, &msr)) + if (rdmsrq_safe(MSR_IA32_APICBASE, &msr)) return false; return msr & X2APIC_ENABLE; } @@ -209,12 +209,12 @@ static inline void native_apic_msr_write(u32 reg, u32 v) reg == APIC_LVR) return; - wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); + wrmsrq(APIC_BASE_MSR + (reg >> 4), v); } static inline void native_apic_msr_eoi(void) { - __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); + native_wrmsrq(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK); } static inline u32 native_apic_msr_read(u32 reg) @@ -224,20 +224,20 @@ static inline u32 native_apic_msr_read(u32 reg) if (reg == APIC_DFR) return -1; - rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); + rdmsrq(APIC_BASE_MSR + (reg >> 4), msr); return (u32)msr; } static inline void native_x2apic_icr_write(u32 low, u32 id) { - wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); + wrmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); } static inline u64 native_x2apic_icr_read(void) { unsigned long val; - rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); + rdmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), val); return val; } diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h index cbc6157f0b4b..b5982b94bdba 100644 --- a/arch/x86/include/asm/arch_hweight.h +++ b/arch/x86/include/asm/arch_hweight.h @@ -16,8 +16,7 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w) { unsigned int res; - asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE - "call __sw_hweight32", + asm_inline (ALTERNATIVE("call __sw_hweight32", "popcntl %[val], %[cnt]", X86_FEATURE_POPCNT) : [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT : [val] REG_IN (w)); @@ -46,8 +45,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w) { unsigned long res; - asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE - "call __sw_hweight64", + asm_inline (ALTERNATIVE("call __sw_hweight64", "popcntq %[val], %[cnt]", X86_FEATURE_POPCNT) : [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT : [val] REG_IN (w)); diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index cc2881576c2c..f963848024a5 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -114,17 +114,12 @@ #endif #ifndef __ASSEMBLER__ -#ifndef __pic__ static __always_inline __pure void *rip_rel_ptr(void *p) { asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p)); return p; } -#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var))) -#else -#define RIP_REL_REF(var) (var) -#endif #endif /* @@ -243,5 +238,24 @@ register unsigned long current_stack_pointer asm(_ASM_SP); #define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT) +/* + * Both i386 and x86_64 returns 64-bit values in edx:eax for certain + * instructions, but GCC's "A" constraint has different meanings. + * For i386, "A" means exactly edx:eax, while for x86_64 it + * means rax *or* rdx. + * + * These helpers wrapping these semantic differences save one instruction + * clearing the high half of 'low': + */ +#ifdef CONFIG_X86_64 +# define EAX_EDX_DECLARE_ARGS(val, low, high) unsigned long low, high +# define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) +# define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) +#else +# define EAX_EDX_DECLARE_ARGS(val, low, high) u64 val +# define EAX_EDX_VAL(val, low, high) (val) +# define EAX_EDX_RET(val, low, high) "=A" (val) +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 100413aff640..eebbc8889e70 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -248,7 +248,7 @@ arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) static __always_inline unsigned long variable__ffs(unsigned long word) { - asm("rep; bsf %1,%0" + asm("tzcnt %1,%0" : "=r" (word) : ASM_INPUT_RM (word)); return word; @@ -267,10 +267,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word) static __always_inline unsigned long variable_ffz(unsigned long word) { - asm("rep; bsf %1,%0" - : "=r" (word) - : "r" (~word)); - return word; + return variable__ffs(~word); } /** diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 3f02ff6d333d..02b23aa78955 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h @@ -74,6 +74,11 @@ # define BOOT_STACK_SIZE 0x1000 #endif +#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE) + +#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE +#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0 + #ifndef __ASSEMBLER__ extern unsigned int output_len; extern const unsigned long kernel_text_size; @@ -83,6 +88,11 @@ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr, void (*error)(char *x)); extern struct boot_params *boot_params_ptr; +extern unsigned long *trampoline_32bit; +extern const u16 trampoline_ljmp_imm_offset; + +void trampoline_32bit_src(void *trampoline, bool enable_5lvl); + #endif #endif /* _ASM_X86_BOOT_H */ diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h index e7225452963f..e1dbf8df1b69 100644 --- a/arch/x86/include/asm/coco.h +++ b/arch/x86/include/asm/coco.h @@ -22,7 +22,7 @@ static inline u64 cc_get_mask(void) static inline void cc_set_mask(u64 mask) { - RIP_REL_REF(cc_mask) = mask; + cc_mask = mask; } u64 cc_mkenc(u64 val); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 30144ef9ef02..5b50e0e35129 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -476,12 +476,13 @@ #define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* Clear branch history at syscall entry using SW loop */ #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */ #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */ -#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */ -#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */ -#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */ -#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */ -#define X86_FEATURE_PREFER_YMM (21*32 + 8) /* Avoid ZMM registers due to downclocking */ -#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 9) /* Use thunk for indirect branches in lower half of cacheline */ +#define X86_FEATURE_CLEAR_BHB_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */ +#define X86_FEATURE_AMD_FAST_CPPC (21*32+ 5) /* Fast CPPC */ +#define X86_FEATURE_AMD_HTR_CORES (21*32+ 6) /* Heterogeneous Core Topology */ +#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32+ 7) /* Workload Classification */ +#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */ +#define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */ +#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */ /* * BUG word(s) @@ -520,7 +521,7 @@ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* "itlb_multihit" CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* "srbds" CPU may leak RNG bits if not mitigated */ #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* "mmio_stale_data" CPU is affected by Processor MMIO Stale Data vulnerabilities */ -#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */ +/* unused, was #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */ #define X86_BUG_RETBLEED X86_BUG(27) /* "retbleed" CPU is affected by RETBleed */ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* "eibrs_pbrsb" EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* "smt_rsb" CPU is vulnerable to Cross-Thread Return Address Predictions */ @@ -528,12 +529,14 @@ #define X86_BUG_TDX_PW_MCE X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */ /* BUG word 2 */ -#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */ -#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */ -#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ -#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ -#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ -#define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */ -#define X86_BUG_ITS X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */ -#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ +#define X86_BUG_SRSO X86_BUG( 1*32+ 0) /* "srso" AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG( 1*32+ 1) /* "div0" AMD DIV0 speculation bug */ +#define X86_BUG_RFDS X86_BUG( 1*32+ 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ +#define X86_BUG_BHI X86_BUG( 1*32+ 3) /* "bhi" CPU is affected by Branch History Injection */ +#define X86_BUG_IBPB_NO_RET X86_BUG( 1*32+ 4) /* "ibpb_no_ret" IBPB omits return target predictions */ +#define X86_BUG_SPECTRE_V2_USER X86_BUG( 1*32+ 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */ +#define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */ +#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */ +#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ + #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/cpuid/api.h b/arch/x86/include/asm/cpuid/api.h index 9c180c9cc58e..44fa82e1267c 100644 --- a/arch/x86/include/asm/cpuid/api.h +++ b/arch/x86/include/asm/cpuid/api.h @@ -14,9 +14,9 @@ */ #ifdef CONFIG_X86_32 -bool have_cpuid_p(void); +bool cpuid_feature(void); #else -static inline bool have_cpuid_p(void) +static inline bool cpuid_feature(void) { return true; } @@ -36,9 +36,9 @@ static inline void native_cpuid(u32 *eax, u32 *ebx, } #define NATIVE_CPUID_REG(reg) \ -static inline u32 native_cpuid_##reg(u32 op) \ +static inline u32 native_cpuid_##reg(u32 op) \ { \ - u32 eax = op, ebx, ecx = 0, edx; \ + u32 eax = op, ebx, ecx = 0, edx; \ \ native_cpuid(&eax, &ebx, &ecx, &edx); \ \ @@ -160,6 +160,10 @@ static inline void __cpuid_read_reg(u32 leaf, u32 subleaf, __cpuid_read_reg(leaf, 0, regidx, (u32 *)(reg)); \ } +/* + * Hypervisor-related APIs: + */ + static __always_inline bool cpuid_function_is_indexed(u32 function) { switch (function) { @@ -184,14 +188,14 @@ static __always_inline bool cpuid_function_is_indexed(u32 function) return false; } -#define for_each_possible_hypervisor_cpuid_base(function) \ +#define for_each_possible_cpuid_base_hypervisor(function) \ for (function = 0x40000000; function < 0x40010000; function += 0x100) -static inline u32 hypervisor_cpuid_base(const char *sig, u32 leaves) +static inline u32 cpuid_base_hypervisor(const char *sig, u32 leaves) { u32 base, eax, signature[3]; - for_each_possible_hypervisor_cpuid_base(base) { + for_each_possible_cpuid_base_hypervisor(base) { cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); /* @@ -207,4 +211,82 @@ static inline u32 hypervisor_cpuid_base(const char *sig, u32 leaves) return 0; } +/* + * CPUID(0x2) parsing: + */ + +/** + * cpuid_leaf_0x2() - Return sanitized CPUID(0x2) register output + * @regs: Output parameter + * + * Query CPUID(0x2) and store its output in @regs. Force set any + * invalid 1-byte descriptor returned by the hardware to zero (the NULL + * cache/TLB descriptor) before returning it to the caller. + * + * Use for_each_cpuid_0x2_desc() to iterate over the register output in + * parsed form. + */ +static inline void cpuid_leaf_0x2(union leaf_0x2_regs *regs) +{ + cpuid_leaf(0x2, regs); + + /* + * All Intel CPUs must report an iteration count of 1. In case + * of bogus hardware, treat all returned descriptors as NULL. + */ + if (regs->desc[0] != 0x01) { + for (int i = 0; i < 4; i++) + regs->regv[i] = 0; + return; + } + + /* + * The most significant bit (MSB) of each register must be clear. + * If a register is invalid, replace its descriptors with NULL. + */ + for (int i = 0; i < 4; i++) { + if (regs->reg[i].invalid) + regs->regv[i] = 0; + } +} + +/** + * for_each_cpuid_0x2_desc() - Iterator for parsed CPUID(0x2) descriptors + * @_regs: CPUID(0x2) register output, as returned by cpuid_leaf_0x2() + * @_ptr: u8 pointer, for macro internal use only + * @_desc: Pointer to the parsed CPUID(0x2) descriptor at each iteration + * + * Loop over the 1-byte descriptors in the passed CPUID(0x2) output registers + * @_regs. Provide the parsed information for each descriptor through @_desc. + * + * To handle cache-specific descriptors, switch on @_desc->c_type. For TLB + * descriptors, switch on @_desc->t_type. + * + * Example usage for cache descriptors:: + * + * const struct leaf_0x2_table *desc; + * union leaf_0x2_regs regs; + * u8 *ptr; + * + * cpuid_leaf_0x2(®s); + * for_each_cpuid_0x2_desc(regs, ptr, desc) { + * switch (desc->c_type) { + * ... + * } + * } + */ +#define for_each_cpuid_0x2_desc(_regs, _ptr, _desc) \ + for (_ptr = &(_regs).desc[1]; \ + _ptr < &(_regs).desc[16] && (_desc = &cpuid_0x2_table[*_ptr]); \ + _ptr++) + +/* + * CPUID(0x80000006) parsing: + */ + +static inline bool cpuid_amd_hygon_has_l3_cache(void) +{ + return cpuid_edx(0x80000006); +} + #endif /* _ASM_X86_CPUID_API_H */ diff --git a/arch/x86/include/asm/cpuid/types.h b/arch/x86/include/asm/cpuid/types.h index 8582e27e836d..8a00364b79de 100644 --- a/arch/x86/include/asm/cpuid/types.h +++ b/arch/x86/include/asm/cpuid/types.h @@ -2,6 +2,7 @@ #ifndef _ASM_X86_CPUID_TYPES_H #define _ASM_X86_CPUID_TYPES_H +#include <linux/build_bug.h> #include <linux/types.h> /* @@ -29,4 +30,98 @@ enum cpuid_regs_idx { #define CPUID_LEAF_FREQ 0x16 #define CPUID_LEAF_TILE 0x1d +/* + * Types for CPUID(0x2) parsing: + */ + +struct leaf_0x2_reg { + u32 : 31, + invalid : 1; +}; + +union leaf_0x2_regs { + struct leaf_0x2_reg reg[4]; + u32 regv[4]; + u8 desc[16]; +}; + +/* + * Leaf 0x2 1-byte descriptors' cache types + * To be used for their mappings at cpuid_0x2_table[] + * + * Start at 1 since type 0 is reserved for HW byte descriptors which are + * not recognized by the kernel; i.e., those without an explicit mapping. + */ +enum _cache_table_type { + CACHE_L1_INST = 1, + CACHE_L1_DATA, + CACHE_L2, + CACHE_L3 + /* Adjust __TLB_TABLE_TYPE_BEGIN before adding more types */ +} __packed; +#ifndef __CHECKER__ +static_assert(sizeof(enum _cache_table_type) == 1); +#endif + +/* + * Ensure that leaf 0x2 cache and TLB type values do not intersect, + * since they share the same type field at struct cpuid_0x2_table. + */ +#define __TLB_TABLE_TYPE_BEGIN (CACHE_L3 + 1) + +/* + * Leaf 0x2 1-byte descriptors' TLB types + * To be used for their mappings at cpuid_0x2_table[] + */ +enum _tlb_table_type { + TLB_INST_4K = __TLB_TABLE_TYPE_BEGIN, + TLB_INST_4M, + TLB_INST_2M_4M, + TLB_INST_ALL, + + TLB_DATA_4K, + TLB_DATA_4M, + TLB_DATA_2M_4M, + TLB_DATA_4K_4M, + TLB_DATA_1G, + TLB_DATA_1G_2M_4M, + + TLB_DATA0_4K, + TLB_DATA0_4M, + TLB_DATA0_2M_4M, + + STLB_4K, + STLB_4K_2M, +} __packed; +#ifndef __CHECKER__ +static_assert(sizeof(enum _tlb_table_type) == 1); +#endif + +/* + * Combined parsing table for leaf 0x2 cache and TLB descriptors. + */ + +struct leaf_0x2_table { + union { + enum _cache_table_type c_type; + enum _tlb_table_type t_type; + }; + union { + short c_size; + short entries; + }; +}; + +extern const struct leaf_0x2_table cpuid_0x2_table[256]; + +/* + * All of leaf 0x2's one-byte TLB descriptors implies the same number of entries + * for their respective TLB types. TLB descriptor 0x63 is an exception: it + * implies 4 dTLB entries for 1GB pages and 32 dTLB entries for 2MB or 4MB pages. + * + * Encode that descriptor's dTLB entry count for 2MB/4MB pages here, as the entry + * count for dTLB 1GB pages is already encoded at the cpuid_0x2_table[]'s mapping. + */ +#define TLB_0x63_2M_4M_ENTRIES 32 + #endif /* _ASM_X86_CPUID_TYPES_H */ diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index fdbbbfec745a..363110e6b2e3 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -23,7 +23,7 @@ DECLARE_PER_CPU(unsigned long, cpu_dr7); static __always_inline unsigned long native_get_debugreg(int regno) { - unsigned long val = 0; /* Damn you, gcc! */ + unsigned long val; switch (regno) { case 0: @@ -43,7 +43,7 @@ static __always_inline unsigned long native_get_debugreg(int regno) break; case 7: /* - * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them + * Use "asm volatile" for DR7 reads to forbid re-ordering them * with other code. * * This is needed because a DR7 access can cause a #VC exception @@ -55,7 +55,7 @@ static __always_inline unsigned long native_get_debugreg(int regno) * re-ordered to happen before the call to sev_es_ist_enter(), * causing stack recursion. */ - asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER); + asm volatile("mov %%db7, %0" : "=r" (val)); break; default: BUG(); @@ -83,15 +83,15 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value) break; case 7: /* - * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them + * Use "asm volatile" for DR7 writes to forbid re-ordering them * with other code. * * While is didn't happen with a DR7 write (see the DR7 read * comment above which explains where it happened), add the - * __FORCE_ORDER here too to avoid similar problems in the + * "asm volatile" here too to avoid similar problems in the * future. */ - asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER); + asm volatile("mov %0, %%db7" ::"r" (value)); break; default: BUG(); @@ -169,7 +169,7 @@ static inline unsigned long get_debugctlmsr(void) if (boot_cpu_data.x86 < 6) return 0; #endif - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr); return debugctlmsr; } @@ -180,7 +180,7 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr) if (boot_cpu_data.x86 < 6) return; #endif - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr); } #endif /* _ASM_X86_DEBUGREG_H */ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 128602612eca..6c8fdc96be7e 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -76,12 +76,8 @@ typedef struct user_i387_struct elf_fpregset_t; #include <asm/vdso.h> -#ifdef CONFIG_X86_64 extern unsigned int vdso64_enabled; -#endif -#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) extern unsigned int vdso32_enabled; -#endif /* * This is used to ensure we don't load something for the wrong architecture. diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 77d20555e04d..d535a97c7284 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -53,7 +53,6 @@ static inline void arch_exit_work(unsigned long ti_work) if (unlikely(ti_work & _TIF_IO_BITMAP)) tss_update_io_bitmap(); - fpregs_assert_state_consistent(); if (unlikely(ti_work & _TIF_NEED_FPU_LOAD)) switch_fpu_return(); } @@ -61,7 +60,9 @@ static inline void arch_exit_work(unsigned long ti_work) static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, unsigned long ti_work) { - if (IS_ENABLED(CONFIG_X86_DEBUG_FPU) || unlikely(ti_work)) + fpregs_assert_state_consistent(); + + if (unlikely(ti_work)) arch_exit_work(ti_work); fred_update_rsp0(); diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index f42de5f05e7e..cd6f194a912b 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -126,6 +126,7 @@ static inline void fpstate_init_soft(struct swregs_state *soft) {} #endif /* State tracking */ +DECLARE_PER_CPU(bool, kernel_fpu_allowed); DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); /* Process cleanup */ @@ -136,7 +137,7 @@ static inline void fpstate_free(struct fpu *fpu) { } #endif /* fpstate-related functions which are exported to KVM */ -extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature); +extern void fpstate_clear_xstate_component(struct fpstate *fpstate, unsigned int xfeature); extern u64 xstate_get_guest_group_perm(void); diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h index c485f1944c5f..c060549c6c94 100644 --- a/arch/x86/include/asm/fpu/sched.h +++ b/arch/x86/include/asm/fpu/sched.h @@ -10,7 +10,7 @@ #include <asm/trace/fpu.h> extern void save_fpregs_to_fpstate(struct fpu *fpu); -extern void fpu__drop(struct fpu *fpu); +extern void fpu__drop(struct task_struct *tsk); extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal, unsigned long shstk_addr); extern void fpu_flush_thread(void); @@ -18,31 +18,25 @@ extern void fpu_flush_thread(void); /* * FPU state switching for scheduling. * - * This is a two-stage process: + * switch_fpu() saves the old state and sets TIF_NEED_FPU_LOAD if + * TIF_NEED_FPU_LOAD is not set. This is done within the context + * of the old process. * - * - switch_fpu_prepare() saves the old state. - * This is done within the context of the old process. - * - * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state - * will get loaded on return to userspace, or when the kernel needs it. - * - * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers - * are saved in the current thread's FPU register state. - * - * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not - * hold current()'s FPU registers. It is required to load the + * Once TIF_NEED_FPU_LOAD is set, it is required to load the * registers before returning to userland or using the content * otherwise. * * The FPU context is only stored/restored for a user task and * PF_KTHREAD is used to distinguish between kernel and user threads. */ -static inline void switch_fpu_prepare(struct task_struct *old, int cpu) +static inline void switch_fpu(struct task_struct *old, int cpu) { - if (cpu_feature_enabled(X86_FEATURE_FPU) && + if (!test_tsk_thread_flag(old, TIF_NEED_FPU_LOAD) && + cpu_feature_enabled(X86_FEATURE_FPU) && !(old->flags & (PF_KTHREAD | PF_USER_WORKER))) { - struct fpu *old_fpu = &old->thread.fpu; + struct fpu *old_fpu = x86_task_fpu(old); + set_tsk_thread_flag(old, TIF_NEED_FPU_LOAD); save_fpregs_to_fpstate(old_fpu); /* * The save operation preserved register state, so the @@ -50,7 +44,7 @@ static inline void switch_fpu_prepare(struct task_struct *old, int cpu) * current CPU number in @old_fpu, so the next return * to user space can avoid the FPU register restore * when is returns on the same CPU and still owns the - * context. + * context. See fpregs_restore_userregs(). */ old_fpu->last_cpu = cpu; @@ -58,14 +52,4 @@ static inline void switch_fpu_prepare(struct task_struct *old, int cpu) } } -/* - * Delay loading of the complete FPU state until the return to userland. - * PKRU is handled separately. - */ -static inline void switch_fpu_finish(struct task_struct *new) -{ - if (cpu_feature_enabled(X86_FEATURE_FPU)) - set_tsk_thread_flag(new, TIF_NEED_FPU_LOAD); -} - #endif /* _ASM_X86_FPU_SCHED_H */ diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index de16862bf230..1c94121acd3d 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -125,6 +125,7 @@ enum xfeature { XFEATURE_RSRVD_COMP_16, XFEATURE_XTILE_CFG, XFEATURE_XTILE_DATA, + XFEATURE_APX, XFEATURE_MAX, }; @@ -145,6 +146,7 @@ enum xfeature { #define XFEATURE_MASK_LBR (1 << XFEATURE_LBR) #define XFEATURE_MASK_XTILE_CFG (1 << XFEATURE_XTILE_CFG) #define XFEATURE_MASK_XTILE_DATA (1 << XFEATURE_XTILE_DATA) +#define XFEATURE_MASK_APX (1 << XFEATURE_APX) #define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE) #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \ @@ -304,6 +306,13 @@ struct xtile_data { } __packed; /* + * State component 19: 8B extended general purpose register. + */ +struct apx_state { + u64 egpr[16]; +} __packed; + +/* * State component 10 is supervisor state used for context-switching the * PASID state. */ @@ -407,9 +416,11 @@ struct fpu_state_perm { /* * @__state_perm: * - * This bitmap indicates the permission for state components, which - * are available to a thread group. The permission prctl() sets the - * enabled state bits in thread_group_leader()->thread.fpu. + * This bitmap indicates the permission for state components + * available to a thread group, including both user and supervisor + * components and software-defined bits like FPU_GUEST_PERM_LOCKED. + * The permission prctl() sets the enabled state bits in + * thread_group_leader()->thread.fpu. * * All run time operations use the per thread information in the * currently active fpu.fpstate which contains the xfeature masks @@ -525,13 +536,6 @@ struct fpu_guest { u64 xfeatures; /* - * @perm: xfeature bitmap of features which are - * permitted to be enabled for the guest - * vCPU. - */ - u64 perm; - - /* * @xfd_err: Save the guest value. */ u64 xfd_err; diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index 7f39fe7980c5..b308a76afbb7 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -32,7 +32,8 @@ XFEATURE_MASK_PKRU | \ XFEATURE_MASK_BNDREGS | \ XFEATURE_MASK_BNDCSR | \ - XFEATURE_MASK_XTILE) + XFEATURE_MASK_XTILE | \ + XFEATURE_MASK_APX) /* * Features which are restored when returning to user space. diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h index 2a29e5216881..12b34d5b2953 100644 --- a/arch/x86/include/asm/fred.h +++ b/arch/x86/include/asm/fred.h @@ -9,6 +9,7 @@ #include <linux/const.h> #include <asm/asm.h> +#include <asm/msr.h> #include <asm/trapnr.h> /* diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h index 02f239569b93..ab2547f97c2c 100644 --- a/arch/x86/include/asm/fsgsbase.h +++ b/arch/x86/include/asm/fsgsbase.h @@ -60,7 +60,7 @@ static inline unsigned long x86_fsbase_read_cpu(void) if (boot_cpu_has(X86_FEATURE_FSGSBASE)) fsbase = rdfsbase(); else - rdmsrl(MSR_FS_BASE, fsbase); + rdmsrq(MSR_FS_BASE, fsbase); return fsbase; } @@ -70,7 +70,7 @@ static inline void x86_fsbase_write_cpu(unsigned long fsbase) if (boot_cpu_has(X86_FEATURE_FSGSBASE)) wrfsbase(fsbase); else - wrmsrl(MSR_FS_BASE, fsbase); + wrmsrq(MSR_FS_BASE, fsbase); } extern unsigned long x86_gsbase_read_cpu_inactive(void); diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 53e4015242b4..97f341777db5 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -82,6 +82,7 @@ #define INAT_NO_REX2 (1 << (INAT_FLAG_OFFS + 8)) #define INAT_REX2_VARIANT (1 << (INAT_FLAG_OFFS + 9)) #define INAT_EVEX_SCALABLE (1 << (INAT_FLAG_OFFS + 10)) +#define INAT_INV64 (1 << (INAT_FLAG_OFFS + 11)) /* Attribute making macros for attribute tables */ #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) @@ -242,4 +243,9 @@ static inline int inat_evex_scalable(insn_attr_t attr) { return attr & INAT_EVEX_SCALABLE; } + +static inline int inat_is_invalid64(insn_attr_t attr) +{ + return attr & INAT_INV64; +} #endif diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index e889c3bab5a2..ca309a3227c7 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -217,7 +217,7 @@ void memset_io(volatile void __iomem *, int, size_t); static inline void __iowrite32_copy(void __iomem *to, const void *from, size_t count) { - asm volatile("rep ; movsl" + asm volatile("rep movsl" : "=&c"(count), "=&D"(to), "=&S"(from) : "0"(count), "1"(to), "2"(from) : "memory"); @@ -282,7 +282,7 @@ static inline void outs##bwl(u16 port, const void *addr, unsigned long count) \ count--; \ } \ } else { \ - asm volatile("rep; outs" #bwl \ + asm volatile("rep outs" #bwl \ : "+S"(addr), "+c"(count) \ : "d"(port) : "memory"); \ } \ @@ -298,7 +298,7 @@ static inline void ins##bwl(u16 port, void *addr, unsigned long count) \ count--; \ } \ } else { \ - asm volatile("rep; ins" #bwl \ + asm volatile("rep ins" #bwl \ : "+D"(addr), "+c"(count) \ : "d"(port) : "memory"); \ } \ diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 5432457d2338..f2ad77929d6e 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -8,6 +8,9 @@ # define PA_PGD 2 # define PA_SWAP_PAGE 3 # define PAGES_NR 4 +#else +/* Size of each exception handler referenced by the IDT */ +# define KEXEC_DEBUG_EXC_HANDLER_SIZE 6 /* PUSHI, PUSHI, 2-byte JMP */ #endif # define KEXEC_CONTROL_PAGE_SIZE 4096 @@ -59,6 +62,10 @@ struct kimage; extern unsigned long kexec_va_control_page; extern unsigned long kexec_pa_table_page; extern unsigned long kexec_pa_swap_page; +extern gate_desc kexec_debug_idt[]; +extern unsigned char kexec_debug_exc_vectors[]; +extern uint16_t kexec_debug_8250_port; +extern unsigned long kexec_debug_8250_mmio32; #endif /* diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7bc174a1f1cb..9c971f846108 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -34,6 +34,7 @@ #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/msr-index.h> +#include <asm/msr.h> #include <asm/asm.h> #include <asm/irq_remapping.h> #include <asm/kvm_page_track.h> @@ -2278,7 +2279,7 @@ static inline unsigned long read_msr(unsigned long msr) { u64 value; - rdmsrl(msr, value); + rdmsrq(msr, value); return value; } #endif diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index b51d8a4673f5..9d38ae744a2e 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -141,5 +141,15 @@ #define SYM_FUNC_START_WEAK_NOALIGN(name) \ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) +/* + * Expose 'sym' to the startup code in arch/x86/boot/startup/, by emitting an + * alias prefixed with __pi_ + */ +#ifdef __ASSEMBLER__ +#define SYM_PIC_ALIAS(sym) SYM_ALIAS(__pi_ ## sym, sym, SYM_L_GLOBAL) +#else +#define SYM_PIC_ALIAS(sym) extern typeof(sym) __PASTE(__pi_, sym) __alias(sym) +#endif + #endif /* _ASM_X86_LINKAGE_H */ diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 1530ee301dfe..ea6494628cb0 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -61,7 +61,7 @@ void __init sev_es_init_vc_handling(void); static inline u64 sme_get_me_mask(void) { - return RIP_REL_REF(sme_me_mask); + return sme_me_mask; } #define __bss_decrypted __section(".bss..decrypted") diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index be7cddc414e4..8b41f26f003b 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_MICROCODE_H #define _ASM_X86_MICROCODE_H +#include <asm/msr.h> + struct cpu_signature { unsigned int sig; unsigned int pf; @@ -63,7 +65,7 @@ static inline u32 intel_get_microcode_revision(void) { u32 rev, dummy; - native_wrmsrl(MSR_IA32_UCODE_REV, 0); + native_wrmsrq(MSR_IA32_UCODE_REV, 0); /* As documented in the SDM: Do a CPUID 1 here */ native_cpuid_eax(1); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 8b8055a8eb9e..0fe9c569d171 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -16,6 +16,8 @@ #define MM_CONTEXT_LOCK_LAM 2 /* Allow LAM and SVA coexisting */ #define MM_CONTEXT_FORCE_TAGGED_SVA 3 +/* Tracks mm_cpumask */ +#define MM_CONTEXT_NOTRACK 4 /* * x86 has arch-specific MMU state beyond what lives in mm_struct. @@ -44,9 +46,7 @@ typedef struct { struct ldt_struct *ldt; #endif -#ifdef CONFIG_X86_64 unsigned long flags; -#endif #ifdef CONFIG_ADDRESS_MASKING /* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 2398058b6e83..73bf3b1b44e8 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -190,7 +190,7 @@ extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, #define activate_mm(prev, next) \ do { \ paravirt_enter_mmap(next); \ - switch_mm((prev), (next), NULL); \ + switch_mm_irqs_off((prev), (next), NULL); \ } while (0); #ifdef CONFIG_X86_32 @@ -247,6 +247,16 @@ static inline bool is_64bit_mm(struct mm_struct *mm) } #endif +static inline bool is_notrack_mm(struct mm_struct *mm) +{ + return test_bit(MM_CONTEXT_NOTRACK, &mm->context.flags); +} + +static inline void set_notrack_mm(struct mm_struct *mm) +{ + set_bit(MM_CONTEXT_NOTRACK, &mm->context.flags); +} + /* * We only want to enforce protection keys on the current process * because we effectively have no access to PKRU for other @@ -272,4 +282,7 @@ unsigned long __get_current_cr3_fast(void); #include <asm-generic/mmu_context.h> +extern struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm); +extern void unuse_temporary_mm(struct mm_struct *prev_mm); + #endif /* _ASM_X86_MMU_CONTEXT_H */ diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index bab5ccfc60a7..778444310cfb 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -8,6 +8,7 @@ #include <linux/io.h> #include <asm/nospec-branch.h> #include <asm/paravirt.h> +#include <asm/msr.h> #include <hyperv/hvhdk.h> /* @@ -304,7 +305,7 @@ void hv_set_non_nested_msr(unsigned int reg, u64 value); static __always_inline u64 hv_raw_get_msr(unsigned int reg) { - return __rdmsr(reg); + return native_rdmsrq(reg); } #else /* CONFIG_HYPERV */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index e7d2f460fcc6..b7dded3c8113 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -533,7 +533,7 @@ #define MSR_HWP_CAPABILITIES 0x00000771 #define MSR_HWP_REQUEST_PKG 0x00000772 #define MSR_HWP_INTERRUPT 0x00000773 -#define MSR_HWP_REQUEST 0x00000774 +#define MSR_HWP_REQUEST 0x00000774 #define MSR_HWP_STATUS 0x00000777 /* CPUID.6.EAX */ @@ -550,16 +550,16 @@ #define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff) /* IA32_HWP_REQUEST */ -#define HWP_MIN_PERF(x) (x & 0xff) -#define HWP_MAX_PERF(x) ((x & 0xff) << 8) +#define HWP_MIN_PERF(x) (x & 0xff) +#define HWP_MAX_PERF(x) ((x & 0xff) << 8) #define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) -#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24) +#define HWP_ENERGY_PERF_PREFERENCE(x) (((u64)x & 0xff) << 24) #define HWP_EPP_PERFORMANCE 0x00 #define HWP_EPP_BALANCE_PERFORMANCE 0x80 #define HWP_EPP_BALANCE_POWERSAVE 0xC0 #define HWP_EPP_POWERSAVE 0xFF -#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32) -#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42) +#define HWP_ACTIVITY_WINDOW(x) ((u64)(x & 0xff3) << 32) +#define HWP_PACKAGE_CONTROL(x) ((u64)(x & 0x1) << 42) /* IA32_HWP_STATUS */ #define HWP_GUARANTEED_CHANGE(x) (x & 0x1) @@ -602,7 +602,11 @@ /* V6 PMON MSR range */ #define MSR_IA32_PMC_V6_GP0_CTR 0x1900 #define MSR_IA32_PMC_V6_GP0_CFG_A 0x1901 +#define MSR_IA32_PMC_V6_GP0_CFG_B 0x1902 +#define MSR_IA32_PMC_V6_GP0_CFG_C 0x1903 #define MSR_IA32_PMC_V6_FX0_CTR 0x1980 +#define MSR_IA32_PMC_V6_FX0_CFG_B 0x1982 +#define MSR_IA32_PMC_V6_FX0_CFG_C 0x1983 #define MSR_IA32_PMC_V6_STEP 4 /* KeyID partitioning between MKTME and TDX */ diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 9397a319d165..4096b8af4ba7 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -12,6 +12,7 @@ #include <uapi/asm/msr.h> #include <asm/shared/msr.h> +#include <linux/types.h> #include <linux/percpu.h> struct msr_info { @@ -37,23 +38,6 @@ struct saved_msrs { }; /* - * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" - * constraint has different meanings. For i386, "A" means exactly - * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, - * it means rax *or* rdx. - */ -#ifdef CONFIG_X86_64 -/* Using 64-bit values saves one instruction clearing the high half of low */ -#define DECLARE_ARGS(val, low, high) unsigned long low, high -#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) -#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) -#else -#define DECLARE_ARGS(val, low, high) unsigned long long val -#define EAX_EDX_VAL(val, low, high) (val) -#define EAX_EDX_RET(val, low, high) "=A" (val) -#endif - -/* * Be very careful with includes. This header is prone to include loops. */ #include <asm/atomic.h> @@ -63,13 +47,13 @@ struct saved_msrs { DECLARE_TRACEPOINT(read_msr); DECLARE_TRACEPOINT(write_msr); DECLARE_TRACEPOINT(rdpmc); -extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); -extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); -extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); +extern void do_trace_write_msr(u32 msr, u64 val, int failed); +extern void do_trace_read_msr(u32 msr, u64 val, int failed); +extern void do_trace_rdpmc(u32 msr, u64 val, int failed); #else -static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} -static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} -static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} +static inline void do_trace_write_msr(u32 msr, u64 val, int failed) {} +static inline void do_trace_read_msr(u32 msr, u64 val, int failed) {} +static inline void do_trace_rdpmc(u32 msr, u64 val, int failed) {} #endif /* @@ -79,9 +63,9 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} * think of extending them - you will be slapped with a stinking trout or a frozen * shark will reach you, wherever you are! You've been warned. */ -static __always_inline unsigned long long __rdmsr(unsigned int msr) +static __always_inline u64 __rdmsr(u32 msr) { - DECLARE_ARGS(val, low, high); + EAX_EDX_DECLARE_ARGS(val, low, high); asm volatile("1: rdmsr\n" "2:\n" @@ -91,12 +75,12 @@ static __always_inline unsigned long long __rdmsr(unsigned int msr) return EAX_EDX_VAL(val, low, high); } -static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) +static __always_inline void __wrmsrq(u32 msr, u64 val) { asm volatile("1: wrmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) - : : "c" (msr), "a"(low), "d" (high) : "memory"); + : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)) : "memory"); } #define native_rdmsr(msr, val1, val2) \ @@ -106,16 +90,20 @@ do { \ (void)((val2) = (u32)(__val >> 32)); \ } while (0) +static __always_inline u64 native_rdmsrq(u32 msr) +{ + return __rdmsr(msr); +} + #define native_wrmsr(msr, low, high) \ - __wrmsr(msr, low, high) + __wrmsrq((msr), (u64)(high) << 32 | (low)) -#define native_wrmsrl(msr, val) \ - __wrmsr((msr), (u32)((u64)(val)), \ - (u32)((u64)(val) >> 32)) +#define native_wrmsrq(msr, val) \ + __wrmsrq((msr), (val)) -static inline unsigned long long native_read_msr(unsigned int msr) +static inline u64 native_read_msr(u32 msr) { - unsigned long long val; + u64 val; val = __rdmsr(msr); @@ -125,34 +113,35 @@ static inline unsigned long long native_read_msr(unsigned int msr) return val; } -static inline unsigned long long native_read_msr_safe(unsigned int msr, - int *err) +static inline int native_read_msr_safe(u32 msr, u64 *p) { - DECLARE_ARGS(val, low, high); + int err; + EAX_EDX_DECLARE_ARGS(val, low, high); asm volatile("1: rdmsr ; xor %[err],%[err]\n" "2:\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err]) - : [err] "=r" (*err), EAX_EDX_RET(val, low, high) + : [err] "=r" (err), EAX_EDX_RET(val, low, high) : "c" (msr)); if (tracepoint_enabled(read_msr)) - do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); - return EAX_EDX_VAL(val, low, high); + do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err); + + *p = EAX_EDX_VAL(val, low, high); + + return err; } /* Can be uninlined because referenced by paravirt */ -static inline void notrace -native_write_msr(unsigned int msr, u32 low, u32 high) +static inline void notrace native_write_msr(u32 msr, u64 val) { - __wrmsr(msr, low, high); + native_wrmsrq(msr, val); if (tracepoint_enabled(write_msr)) - do_trace_write_msr(msr, ((u64)high << 32 | low), 0); + do_trace_write_msr(msr, val, 0); } /* Can be uninlined because referenced by paravirt */ -static inline int notrace -native_write_msr_safe(unsigned int msr, u32 low, u32 high) +static inline int notrace native_write_msr_safe(u32 msr, u64 val) { int err; @@ -160,73 +149,19 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high) "2:\n\t" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err]) : [err] "=a" (err) - : "c" (msr), "0" (low), "d" (high) + : "c" (msr), "0" ((u32)val), "d" ((u32)(val >> 32)) : "memory"); if (tracepoint_enabled(write_msr)) - do_trace_write_msr(msr, ((u64)high << 32 | low), err); + do_trace_write_msr(msr, val, err); return err; } extern int rdmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]); -/** - * rdtsc() - returns the current TSC without ordering constraints - * - * rdtsc() returns the result of RDTSC as a 64-bit integer. The - * only ordering constraint it supplies is the ordering implied by - * "asm volatile": it will put the RDTSC in the place you expect. The - * CPU can and will speculatively execute that RDTSC, though, so the - * results can be non-monotonic if compared on different CPUs. - */ -static __always_inline unsigned long long rdtsc(void) +static inline u64 native_read_pmc(int counter) { - DECLARE_ARGS(val, low, high); - - asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); - - return EAX_EDX_VAL(val, low, high); -} - -/** - * rdtsc_ordered() - read the current TSC in program order - * - * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. - * It is ordered like a load to a global in-memory counter. It should - * be impossible to observe non-monotonic rdtsc_unordered() behavior - * across multiple CPUs as long as the TSC is synced. - */ -static __always_inline unsigned long long rdtsc_ordered(void) -{ - DECLARE_ARGS(val, low, high); - - /* - * The RDTSC instruction is not ordered relative to memory - * access. The Intel SDM and the AMD APM are both vague on this - * point, but empirically an RDTSC instruction can be - * speculatively executed before prior loads. An RDTSC - * immediately after an appropriate barrier appears to be - * ordered as a normal load, that is, it provides the same - * ordering guarantees as reading from a global memory location - * that some other imaginary CPU is updating continuously with a - * time stamp. - * - * Thus, use the preferred barrier on the respective CPU, aiming for - * RDTSCP as the default. - */ - asm volatile(ALTERNATIVE_2("rdtsc", - "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, - "rdtscp", X86_FEATURE_RDTSCP) - : EAX_EDX_RET(val, low, high) - /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ - :: "ecx"); - - return EAX_EDX_VAL(val, low, high); -} - -static inline unsigned long long native_read_pmc(int counter) -{ - DECLARE_ARGS(val, low, high); + EAX_EDX_DECLARE_ARGS(val, low, high); asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); if (tracepoint_enabled(rdpmc)) @@ -251,51 +186,44 @@ do { \ (void)((high) = (u32)(__val >> 32)); \ } while (0) -static inline void wrmsr(unsigned int msr, u32 low, u32 high) +static inline void wrmsr(u32 msr, u32 low, u32 high) { - native_write_msr(msr, low, high); + native_write_msr(msr, (u64)high << 32 | low); } -#define rdmsrl(msr, val) \ +#define rdmsrq(msr, val) \ ((val) = native_read_msr((msr))) -static inline void wrmsrl(unsigned int msr, u64 val) +static inline void wrmsrq(u32 msr, u64 val) { - native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); + native_write_msr(msr, val); } /* wrmsr with exception handling */ -static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high) +static inline int wrmsrq_safe(u32 msr, u64 val) { - return native_write_msr_safe(msr, low, high); + return native_write_msr_safe(msr, val); } /* rdmsr with exception handling */ #define rdmsr_safe(msr, low, high) \ ({ \ - int __err; \ - u64 __val = native_read_msr_safe((msr), &__err); \ + u64 __val; \ + int __err = native_read_msr_safe((msr), &__val); \ (*low) = (u32)__val; \ (*high) = (u32)(__val >> 32); \ __err; \ }) -static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p) +static inline int rdmsrq_safe(u32 msr, u64 *p) { - int err; - - *p = native_read_msr_safe(msr, &err); - return err; + return native_read_msr_safe(msr, p); } -#define rdpmc(counter, low, high) \ -do { \ - u64 _l = native_read_pmc((counter)); \ - (low) = (u32)_l; \ - (high) = (u32)(_l >> 32); \ -} while (0) - -#define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) +static __always_inline u64 rdpmc(int counter) +{ + return native_read_pmc(counter); +} #endif /* !CONFIG_PARAVIRT_XXL */ @@ -315,11 +243,11 @@ static __always_inline void wrmsrns(u32 msr, u64 val) } /* - * 64-bit version of wrmsr_safe(): + * Dual u32 version of wrmsrq_safe(): */ -static inline int wrmsrl_safe(u32 msr, u64 val) +static inline int wrmsr_safe(u32 msr, u32 low, u32 high) { - return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); + return wrmsrq_safe(msr, (u64)high << 32 | low); } struct msr __percpu *msrs_alloc(void); @@ -330,14 +258,14 @@ int msr_clear_bit(u32 msr, u8 bit); #ifdef CONFIG_SMP int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); -int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); +int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q); void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); -int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); +int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); #else /* CONFIG_SMP */ @@ -351,14 +279,14 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) wrmsr(msr_no, l, h); return 0; } -static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { - rdmsrl(msr_no, *q); + rdmsrq(msr_no, *q); return 0; } -static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { - wrmsrl(msr_no, q); + wrmsrq(msr_no, q); return 0; } static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, @@ -380,13 +308,13 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { return wrmsr_safe(msr_no, l, h); } -static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { - return rdmsrl_safe(msr_no, q); + return rdmsrq_safe(msr_no, q); } -static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { - return wrmsrl_safe(msr_no, q); + return wrmsrq_safe(msr_no, q); } static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { @@ -397,5 +325,11 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) return wrmsr_safe_regs(regs); } #endif /* CONFIG_SMP */ + +/* Compatibility wrappers: */ +#define rdmsrl(msr, val) rdmsrq(msr, val) +#define wrmsrl(msr, val) wrmsrq(msr, val) +#define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q) + #endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_MSR_H */ diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index ce857ef54cf1..dd2b129b0418 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -25,29 +25,31 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 -static __always_inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) +static __always_inline void __monitor(const void *eax, u32 ecx, u32 edx) { - /* "monitor %eax, %ecx, %edx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc8;" - :: "a" (eax), "c" (ecx), "d"(edx)); + /* + * Use the instruction mnemonic with implicit operands, as the LLVM + * assembler fails to assemble the mnemonic with explicit operands: + */ + asm volatile("monitor" :: "a" (eax), "c" (ecx), "d" (edx)); } -static __always_inline void __monitorx(const void *eax, unsigned long ecx, - unsigned long edx) +static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx) { - /* "monitorx %eax, %ecx, %edx;" */ - asm volatile(".byte 0x0f, 0x01, 0xfa;" + /* "monitorx %eax, %ecx, %edx" */ + asm volatile(".byte 0x0f, 0x01, 0xfa" :: "a" (eax), "c" (ecx), "d"(edx)); } -static __always_inline void __mwait(unsigned long eax, unsigned long ecx) +static __always_inline void __mwait(u32 eax, u32 ecx) { mds_idle_clear_cpu_buffers(); - /* "mwait %eax, %ecx;" */ - asm volatile(".byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); + /* + * Use the instruction mnemonic with implicit operands, as the LLVM + * assembler fails to assemble the mnemonic with explicit operands: + */ + asm volatile("mwait" :: "a" (eax), "c" (ecx)); } /* @@ -76,13 +78,12 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx) * EAX (logical) address to monitor * ECX #GP if not zero */ -static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, - unsigned long ecx) +static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) { /* No MDS buffer clear as this is AMD/HYGON only */ - /* "mwaitx %eax, %ebx, %ecx;" */ - asm volatile(".byte 0x0f, 0x01, 0xfb;" + /* "mwaitx %eax, %ebx, %ecx" */ + asm volatile(".byte 0x0f, 0x01, 0xfb" :: "a" (eax), "b" (ebx), "c" (ecx)); } @@ -95,12 +96,11 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, * executing mwait, it would otherwise go unnoticed and the next tick * would not be reprogrammed accordingly before mwait ever wakes up. */ -static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) +static __always_inline void __sti_mwait(u32 eax, u32 ecx) { mds_idle_clear_cpu_buffers(); - /* "mwait %eax, %ecx;" */ - asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" - :: "a" (eax), "c" (ecx)); + + asm volatile("sti; mwait" :: "a" (eax), "c" (ecx)); } /* @@ -113,16 +113,13 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ -static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) +static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx) { if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { - if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { - mb(); - clflush((void *)¤t_thread_info()->flags); - mb(); - } + const void *addr = ¤t_thread_info()->flags; - __monitor((void *)¤t_thread_info()->flags, 0, 0); + alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); + __monitor(addr, 0, 0); if (!need_resched()) { if (ecx & 1) { @@ -144,16 +141,9 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo */ static inline void __tpause(u32 ecx, u32 edx, u32 eax) { - /* "tpause %ecx, %edx, %eax;" */ - #ifdef CONFIG_AS_TPAUSE - asm volatile("tpause %%ecx\n" - : - : "c"(ecx), "d"(edx), "a"(eax)); - #else - asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n" - : - : "c"(ecx), "d"(edx), "a"(eax)); - #endif + /* "tpause %ecx" */ + asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1" + :: "c" (ecx), "d" (edx), "a" (eax)); } #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index f677382093f3..79d88d12c8fb 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -14,12 +14,26 @@ extern void release_perfctr_nmi(unsigned int); extern int reserve_evntsel_nmi(unsigned int); extern void release_evntsel_nmi(unsigned int); -extern int unknown_nmi_panic; - #endif /* CONFIG_X86_LOCAL_APIC */ +extern int unknown_nmi_panic; +extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; + +/* NMI handler flags */ #define NMI_FLAG_FIRST 1 +/** + * enum - NMI types. + * @NMI_LOCAL: Local NMI, CPU-specific NMI generated by the Local APIC. + * @NMI_UNKNOWN: Unknown NMI, the source of the NMI may not be identified. + * @NMI_SERR: System Error NMI, typically triggered by PCI errors. + * @NMI_IO_CHECK: I/O Check NMI, related to I/O errors. + * @NMI_MAX: Maximum value for NMI types. + * + * NMI types are used to categorize NMIs and to dispatch them to the + * appropriate handler. + */ enum { NMI_LOCAL=0, NMI_UNKNOWN, @@ -28,6 +42,7 @@ enum { NMI_MAX }; +/* NMI handler return values */ #define NMI_DONE 0 #define NMI_HANDLED 1 @@ -41,6 +56,25 @@ struct nmiaction { const char *name; }; +/** + * register_nmi_handler - Register a handler for a specific NMI type + * @t: NMI type (e.g. NMI_LOCAL) + * @fn: The NMI handler + * @fg: Flags associated with the NMI handler + * @n: Name of the NMI handler + * @init: Optional __init* attributes for struct nmiaction + * + * Adds the provided handler to the list of handlers for the specified + * NMI type. Handlers flagged with NMI_FLAG_FIRST would be executed first. + * + * Sometimes the source of an NMI can't be reliably determined which + * results in an NMI being tagged as "unknown". Register an additional + * handler using the NMI type - NMI_UNKNOWN to handle such cases. The + * caller would get one last chance to assume responsibility for the + * NMI. + * + * Return: 0 on success, or an error code on failure. + */ #define register_nmi_handler(t, fn, fg, n, init...) \ ({ \ static struct nmiaction init fn##_na = { \ @@ -54,7 +88,16 @@ struct nmiaction { int __register_nmi_handler(unsigned int, struct nmiaction *); -void unregister_nmi_handler(unsigned int, const char *); +/** + * unregister_nmi_handler - Unregister a handler for a specific NMI type + * @type: NMI type (e.g. NMI_LOCAL) + * @name: Name of the NMI handler used during registration + * + * Removes the handler associated with the specified NMI type from the + * NMI handler list. The "name" is used as a lookup key to identify the + * handler. + */ +void unregister_nmi_handler(unsigned int type, const char *name); void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler); diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 7d04ade33541..20d754b98f3f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -327,7 +327,7 @@ .endm .macro CLEAR_BRANCH_HISTORY_VMEXIT - ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT + ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_VMEXIT .endm #else #define CLEAR_BRANCH_HISTORY @@ -571,7 +571,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); +DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); extern u16 mds_verw_sel; diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index a9b62e0e6f79..623f1e9f493e 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -73,7 +73,6 @@ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; extern void find_low_pfn_range(void); -extern void setup_bootmem_allocator(void); #endif /* !__ASSEMBLER__ */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index d3aab6f4e59a..015d23f3e01f 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -62,7 +62,6 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); KCFI_REFERENCE(copy_page); -#ifdef CONFIG_X86_5LEVEL /* * User space process size. This is the first address outside the user range. * There are a few constraints that determine this: @@ -93,7 +92,6 @@ static __always_inline unsigned long task_size_max(void) return ret; } -#endif /* CONFIG_X86_5LEVEL */ #endif /* !__ASSEMBLER__ */ diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 1faa8f88850a..7400dab373fe 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -41,25 +41,14 @@ #define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) #define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) -#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT #define __PAGE_OFFSET page_offset_base -#else -#define __PAGE_OFFSET __PAGE_OFFSET_BASE_L4 -#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ #define __START_KERNEL_map _AC(0xffffffff80000000, UL) /* See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map. */ #define __PHYSICAL_MASK_SHIFT 52 - -#ifdef CONFIG_X86_5LEVEL #define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled() ? 56 : 47) -/* See task_size_max() in <asm/page_64.h> */ -#else -#define __VIRTUAL_MASK_SHIFT 47 -#define task_size_max() ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) -#endif #define TASK_SIZE_MAX task_size_max() #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 9f77bf03d747..018a8d906ca3 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -29,9 +29,7 @@ #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC /* Physical address where kernel should be loaded. */ -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ - + (CONFIG_PHYSICAL_ALIGN - 1)) \ - & ~(CONFIG_PHYSICAL_ALIGN - 1)) +#define LOAD_PHYSICAL_ADDR __ALIGN_KERNEL_MASK(CONFIG_PHYSICAL_START, CONFIG_PHYSICAL_ALIGN - 1) #define __START_KERNEL (__START_KERNEL_map + LOAD_PHYSICAL_ADDR) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index c4c23190925c..b5e59a7ba0d0 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -175,26 +175,24 @@ static inline void __write_cr4(unsigned long x) PVOP_VCALL1(cpu.write_cr4, x); } -static inline u64 paravirt_read_msr(unsigned msr) +static inline u64 paravirt_read_msr(u32 msr) { return PVOP_CALL1(u64, cpu.read_msr, msr); } -static inline void paravirt_write_msr(unsigned msr, - unsigned low, unsigned high) +static inline void paravirt_write_msr(u32 msr, u64 val) { - PVOP_VCALL3(cpu.write_msr, msr, low, high); + PVOP_VCALL2(cpu.write_msr, msr, val); } -static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) +static inline int paravirt_read_msr_safe(u32 msr, u64 *val) { - return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err); + return PVOP_CALL2(int, cpu.read_msr_safe, msr, val); } -static inline int paravirt_write_msr_safe(unsigned msr, - unsigned low, unsigned high) +static inline int paravirt_write_msr_safe(u32 msr, u64 val) { - return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high); + return PVOP_CALL2(int, cpu.write_msr_safe, msr, val); } #define rdmsr(msr, val1, val2) \ @@ -204,55 +202,46 @@ do { \ val2 = _l >> 32; \ } while (0) -#define wrmsr(msr, val1, val2) \ -do { \ - paravirt_write_msr(msr, val1, val2); \ -} while (0) +static __always_inline void wrmsr(u32 msr, u32 low, u32 high) +{ + paravirt_write_msr(msr, (u64)high << 32 | low); +} -#define rdmsrl(msr, val) \ +#define rdmsrq(msr, val) \ do { \ val = paravirt_read_msr(msr); \ } while (0) -static inline void wrmsrl(unsigned msr, u64 val) +static inline void wrmsrq(u32 msr, u64 val) { - wrmsr(msr, (u32)val, (u32)(val>>32)); + paravirt_write_msr(msr, val); } -#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b) +static inline int wrmsrq_safe(u32 msr, u64 val) +{ + return paravirt_write_msr_safe(msr, val); +} /* rdmsr with exception handling */ #define rdmsr_safe(msr, a, b) \ ({ \ - int _err; \ - u64 _l = paravirt_read_msr_safe(msr, &_err); \ + u64 _l; \ + int _err = paravirt_read_msr_safe((msr), &_l); \ (*a) = (u32)_l; \ - (*b) = _l >> 32; \ + (*b) = (u32)(_l >> 32); \ _err; \ }) -static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) +static __always_inline int rdmsrq_safe(u32 msr, u64 *p) { - int err; - - *p = paravirt_read_msr_safe(msr, &err); - return err; + return paravirt_read_msr_safe(msr, p); } -static inline unsigned long long paravirt_read_pmc(int counter) +static __always_inline u64 rdpmc(int counter) { return PVOP_CALL1(u64, cpu.read_pmc, counter); } -#define rdpmc(counter, low, high) \ -do { \ - u64 _l = paravirt_read_pmc(counter); \ - low = (u32)_l; \ - high = _l >> 32; \ -} while (0) - -#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) - static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) { PVOP_VCALL2(cpu.alloc_ldt, ldt, entries); @@ -474,8 +463,6 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) PVOP_VCALL2(mmu.set_p4d, p4dp, val); } -#if CONFIG_PGTABLE_LEVELS >= 5 - static inline p4d_t __p4d(p4dval_t val) { p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val, @@ -507,8 +494,6 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) set_pgd(pgdp, native_make_pgd(0)); \ } while (0) -#endif /* CONFIG_PGTABLE_LEVELS == 5 */ - static inline void p4d_clear(p4d_t *p4dp) { set_p4d(p4dp, native_make_p4d(0)); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 631c306ce1ff..37a8627d8277 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -91,15 +91,15 @@ struct pv_cpu_ops { unsigned int *ecx, unsigned int *edx); /* Unsafe MSR operations. These will warn or panic on failure. */ - u64 (*read_msr)(unsigned int msr); - void (*write_msr)(unsigned int msr, unsigned low, unsigned high); + u64 (*read_msr)(u32 msr); + void (*write_msr)(u32 msr, u64 val); /* * Safe MSR operations. - * read sets err to 0 or -EIO. write returns 0 or -EIO. + * Returns 0 or -EIO. */ - u64 (*read_msr_safe)(unsigned int msr, int *err); - int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); + int (*read_msr_safe)(u32 msr, u64 *val); + int (*write_msr_safe)(u32 msr, u64 val); u64 (*read_pmc)(int counter); @@ -189,12 +189,10 @@ struct pv_mmu_ops { void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); -#if CONFIG_PGTABLE_LEVELS >= 5 struct paravirt_callee_save p4d_val; struct paravirt_callee_save make_p4d; void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); -#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ struct pv_lazy_ops lazy_mode; diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 5fe314a2e73e..b0d03b6c279b 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -29,6 +29,8 @@ #ifdef CONFIG_SMP +#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":" + #ifdef CONFIG_CC_HAS_NAMED_AS #ifdef __CHECKER__ @@ -36,23 +38,23 @@ # define __seg_fs __attribute__((address_space(__seg_fs))) #endif +#define __percpu_prefix #define __percpu_seg_override CONCATENATE(__seg_, __percpu_seg) -#define __percpu_prefix "" #else /* !CONFIG_CC_HAS_NAMED_AS: */ +#define __percpu_prefix __force_percpu_prefix #define __percpu_seg_override -#define __percpu_prefix "%%"__stringify(__percpu_seg)":" #endif /* CONFIG_CC_HAS_NAMED_AS */ -#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":" -#define __my_cpu_offset this_cpu_read(this_cpu_off) - /* * Compared to the generic __my_cpu_offset version, the following * saves one instruction and avoids clobbering a temp register. - * + */ +#define __my_cpu_offset this_cpu_read(this_cpu_off) + +/* * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit * kernel, because games are played with CONFIG_X86_64 there and * sizeof(this_cpu_off) becames 4. @@ -77,9 +79,9 @@ #else /* !CONFIG_SMP: */ +#define __force_percpu_prefix +#define __percpu_prefix #define __percpu_seg_override -#define __percpu_prefix "" -#define __force_percpu_prefix "" #define PER_CPU_VAR(var) (var)__percpu_rel @@ -97,8 +99,8 @@ # define __my_cpu_var(var) (*__my_cpu_ptr(&(var))) #endif -#define __percpu_arg(x) __percpu_prefix "%" #x #define __force_percpu_arg(x) __force_percpu_prefix "%" #x +#define __percpu_arg(x) __percpu_prefix "%" #x /* * For arch-specific code, we can use direct single-insn ops (they diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 812dac3f79f0..70d1d94aca7e 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -195,6 +195,7 @@ union cpuid10_edx { */ #define ARCH_PERFMON_EXT_LEAF 0x00000023 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 +#define ARCH_PERFMON_ACR_LEAF 0x2 union cpuid35_eax { struct { diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h index 66425424ce91..54690bd4ddbe 100644 --- a/arch/x86/include/asm/pgtable-2level_types.h +++ b/arch/x86/include/asm/pgtable-2level_types.h @@ -18,8 +18,6 @@ typedef union { } pte_t; #endif /* !__ASSEMBLER__ */ -#define SHARED_KERNEL_PMD 0 - #define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED /* diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h index 9d5b257d44e3..580b09bf6a45 100644 --- a/arch/x86/include/asm/pgtable-3level_types.h +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -27,9 +27,7 @@ typedef union { } pmd_t; #endif /* !__ASSEMBLER__ */ -#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI)) - -#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED) +#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED /* * PGDIR_SHIFT determines what a top-level page table entry can map diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 7bd6bd6df4a1..5ddba366d3b4 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -292,13 +292,6 @@ static inline unsigned long pgd_pfn(pgd_t pgd) return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; } -#define p4d_leaf p4d_leaf -static inline bool p4d_leaf(p4d_t p4d) -{ - /* No 512 GiB pages yet */ - return 0; -} - #define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define pmd_leaf pmd_leaf @@ -1472,9 +1465,6 @@ static inline bool pgdp_maps_userspace(void *__ptr) return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START); } -#define pgd_leaf pgd_leaf -static inline bool pgd_leaf(pgd_t pgd) { return false; } - #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION /* * All top-level MITIGATION_PAGE_TABLE_ISOLATION page tables are order-1 pages diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index b89f8f1194a9..f06e5d6a2747 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -41,11 +41,9 @@ static inline void sync_initial_page_table(void) { } pr_err("%s:%d: bad pud %p(%016lx)\n", \ __FILE__, __LINE__, &(e), pud_val(e)) -#if CONFIG_PGTABLE_LEVELS >= 5 #define p4d_ERROR(e) \ pr_err("%s:%d: bad p4d %p(%016lx)\n", \ __FILE__, __LINE__, &(e), p4d_val(e)) -#endif #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd %p(%016lx)\n", \ diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 5bb782d856f2..4604f924d8b8 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -23,7 +23,6 @@ typedef struct { pmdval_t pmd; } pmd_t; extern unsigned int __pgtable_l5_enabled; -#ifdef CONFIG_X86_5LEVEL #ifdef USE_EARLY_PGTABLE_L5 /* * cpu_feature_enabled() is not available in early boot code. @@ -37,19 +36,11 @@ static inline bool pgtable_l5_enabled(void) #define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57) #endif /* USE_EARLY_PGTABLE_L5 */ -#else -#define pgtable_l5_enabled() 0 -#endif /* CONFIG_X86_5LEVEL */ - extern unsigned int pgdir_shift; extern unsigned int ptrs_per_p4d; #endif /* !__ASSEMBLER__ */ -#define SHARED_KERNEL_PMD 0 - -#ifdef CONFIG_X86_5LEVEL - /* * PGDIR_SHIFT determines what a top-level page table entry can map */ @@ -67,17 +58,6 @@ extern unsigned int ptrs_per_p4d; #define MAX_POSSIBLE_PHYSMEM_BITS 52 -#else /* CONFIG_X86_5LEVEL */ - -/* - * PGDIR_SHIFT determines what a top-level page table entry can map - */ -#define PGDIR_SHIFT 39 -#define PTRS_PER_PGD 512 -#define MAX_PTRS_PER_P4D 1 - -#endif /* CONFIG_X86_5LEVEL */ - /* * 3rd level page */ @@ -130,15 +110,9 @@ extern unsigned int ptrs_per_p4d; #define __VMEMMAP_BASE_L4 0xffffea0000000000UL #define __VMEMMAP_BASE_L5 0xffd4000000000000UL -#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base # define VMALLOC_SIZE_TB (pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) # define VMEMMAP_START vmemmap_base -#else -# define VMALLOC_START __VMALLOC_BASE_L4 -# define VMALLOC_SIZE_TB VMALLOC_SIZE_TB_L4 -# define VMEMMAP_START __VMEMMAP_BASE_L4 -#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ #ifdef CONFIG_RANDOMIZE_MEMORY # define DIRECT_MAP_PHYSMEM_END direct_map_physmem_end diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 5d2f7e5aff26..bde58f6510ac 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -16,7 +16,7 @@ struct vm86; #include <uapi/asm/sigcontext.h> #include <asm/current.h> #include <asm/cpufeatures.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/page.h> #include <asm/pgtable_types.h> #include <asm/percpu.h> @@ -514,15 +514,14 @@ struct thread_struct { struct thread_shstk shstk; #endif - - /* Floating point and extended processor state */ - struct fpu fpu; - /* - * WARNING: 'fpu' is dynamically-sized. It *MUST* be at - * the end. - */ }; +#ifdef CONFIG_X86_DEBUG_FPU +extern struct fpu *x86_task_fpu(struct task_struct *task); +#else +# define x86_task_fpu(task) ((struct fpu *)((void *)(task) + sizeof(*(task)))) +#endif + extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size); static inline void arch_thread_struct_whitelist(unsigned long *offset, @@ -734,6 +733,7 @@ void store_cpu_caps(struct cpuinfo_x86 *info); enum l1tf_mitigations { L1TF_MITIGATION_OFF, + L1TF_MITIGATION_AUTO, L1TF_MITIGATION_FLUSH_NOWARN, L1TF_MITIGATION_FLUSH, L1TF_MITIGATION_FLUSH_NOSMT, diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 011bf67a1866..bd6afe805cf6 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -9,6 +9,8 @@ #include <linux/resctrl_types.h> #include <linux/sched.h> +#include <asm/msr.h> + /* * This value can never be a valid CLOSID, and is used when mapping a * (closid, rmid) pair to an index and back. On x86 only the RMID is diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ad9212df0ec0..6324f4c6c545 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -52,6 +52,7 @@ extern void reserve_standard_io_resources(void); extern void i386_reserve_resources(void); extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp); extern void startup_64_setup_gdt_idt(void); +extern void startup_64_load_idt(void *vc_handler); extern void early_setup_idt(void); extern void __init do_early_exception(struct pt_regs *regs, int trapnr); diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/include/asm/sev-internal.h new file mode 100644 index 000000000000..3dfd306d1c9e --- /dev/null +++ b/arch/x86/include/asm/sev-internal.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define DR7_RESET_VALUE 0x400 + +extern struct ghcb boot_ghcb_page; +extern u64 sev_hv_features; +extern u64 sev_secrets_pa; + +/* #VC handler runtime per-CPU data */ +struct sev_es_runtime_data { + struct ghcb ghcb_page; + + /* + * Reserve one page per CPU as backup storage for the unencrypted GHCB. + * It is needed when an NMI happens while the #VC handler uses the real + * GHCB, and the NMI handler itself is causing another #VC exception. In + * that case the GHCB content of the first handler needs to be backed up + * and restored. + */ + struct ghcb backup_ghcb; + + /* + * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions. + * There is no need for it to be atomic, because nothing is written to + * the GHCB between the read and the write of ghcb_active. So it is safe + * to use it when a nested #VC exception happens before the write. + * + * This is necessary for example in the #VC->NMI->#VC case when the NMI + * happens while the first #VC handler uses the GHCB. When the NMI code + * raises a second #VC handler it might overwrite the contents of the + * GHCB written by the first handler. To avoid this the content of the + * GHCB is saved and restored when the GHCB is detected to be in use + * already. + */ + bool ghcb_active; + bool backup_ghcb_active; + + /* + * Cached DR7 value - write it on DR7 writes and return it on reads. + * That value will never make it to the real hardware DR7 as debugging + * is currently unsupported in SEV-ES guests. + */ + unsigned long dr7; +}; + +struct ghcb_state { + struct ghcb *ghcb; +}; + +extern struct svsm_ca boot_svsm_ca_page; + +struct ghcb *__sev_get_ghcb(struct ghcb_state *state); +void __sev_put_ghcb(struct ghcb_state *state); + +DECLARE_PER_CPU(struct sev_es_runtime_data*, runtime_data); +DECLARE_PER_CPU(struct sev_es_save_area *, sev_vmsa); + +void early_set_pages_state(unsigned long vaddr, unsigned long paddr, + unsigned long npages, enum psc_op op); + +DECLARE_PER_CPU(struct svsm_ca *, svsm_caa); +DECLARE_PER_CPU(u64, svsm_caa_pa); + +extern struct svsm_ca *boot_svsm_caa; +extern u64 boot_svsm_caa_pa; + +static __always_inline struct svsm_ca *svsm_get_caa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa); + else + return boot_svsm_caa; +} + +static __always_inline u64 svsm_get_caa_pa(void) +{ + if (sev_cfg.use_cas) + return this_cpu_read(svsm_caa_pa); + else + return boot_svsm_caa_pa; +} + +int svsm_perform_call_protocol(struct svsm_call *call); + +static inline u64 sev_es_rd_ghcb_msr(void) +{ + return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB); +} + +static __always_inline void sev_es_wr_ghcb_msr(u64 val) +{ + u32 low, high; + + low = (u32)(val); + high = (u32)(val >> 32); + + native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); +} + +void snp_register_ghcb_early(unsigned long paddr); +bool sev_es_negotiate_protocol(void); +bool sev_es_check_cpu_features(void); +u64 get_hv_features(void); + +const struct snp_cpuid_table *snp_cpuid_get_table(void); diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index ba7999f66abe..6158893786d6 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -15,6 +15,7 @@ #include <asm/sev-common.h> #include <asm/coco.h> #include <asm/set_memory.h> +#include <asm/svm.h> #define GHCB_PROTOCOL_MIN 1ULL #define GHCB_PROTOCOL_MAX 2ULL @@ -83,6 +84,36 @@ extern void vc_no_ghcb(void); extern void vc_boot_ghcb(void); extern bool handle_vc_boot_ghcb(struct pt_regs *regs); +/* + * Individual entries of the SNP CPUID table, as defined by the SNP + * Firmware ABI, Revision 0.9, Section 7.1, Table 14. + */ +struct snp_cpuid_fn { + u32 eax_in; + u32 ecx_in; + u64 xcr0_in; + u64 xss_in; + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; + u64 __reserved; +} __packed; + +/* + * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9, + * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit + * of 64 entries per CPUID table. + */ +#define SNP_CPUID_COUNT_MAX 64 + +struct snp_cpuid_table { + u32 count; + u32 __reserved1; + u64 __reserved2; + struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX]; +} __packed; + /* PVALIDATE return codes */ #define PVALIDATE_FAIL_SIZEMISMATCH 6 @@ -484,6 +515,34 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req void __init snp_secure_tsc_prepare(void); void __init snp_secure_tsc_init(void); +static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb) +{ + ghcb->save.sw_exit_code = 0; + __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); +} + +void vc_forward_exception(struct es_em_ctxt *ctxt); + +/* I/O parameters for CPUID-related helpers */ +struct cpuid_leaf { + u32 fn; + u32 subfn; + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; +}; + +int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf); + +void __noreturn sev_es_terminate(unsigned int set, unsigned int reason); +enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + u64 exit_code, u64 exit_info_1, + u64 exit_info_2); + +extern struct ghcb *boot_ghcb; + #else /* !CONFIG_AMD_MEM_ENCRYPT */ #define snp_vmpl 0 diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h index a341c878e977..b8027b63cd7a 100644 --- a/arch/x86/include/asm/simd.h +++ b/arch/x86/include/asm/simd.h @@ -1,6 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SIMD_H +#define _ASM_SIMD_H #include <asm/fpu/api.h> +#include <linux/compiler_attributes.h> +#include <linux/types.h> /* * may_use_simd - whether it is allowable at this time to issue SIMD @@ -10,3 +14,5 @@ static __must_check inline bool may_use_simd(void) { return irq_fpu_usable(); } + +#endif /* _ASM_SIMD_H */ diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h index 658b690b2ccb..00b7e0398210 100644 --- a/arch/x86/include/asm/spec-ctrl.h +++ b/arch/x86/include/asm/spec-ctrl.h @@ -84,7 +84,7 @@ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) static __always_inline void __update_spec_ctrl(u64 val) { __this_cpu_write(x86_spec_ctrl_current, val); - native_wrmsrl(MSR_IA32_SPEC_CTRL, val); + native_wrmsrq(MSR_IA32_SPEC_CTRL, val); } #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 6266d6b9e0b8..ecda17efa042 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -10,30 +10,19 @@ #include <linux/irqflags.h> #include <linux/jump_label.h> -/* - * The compiler should not reorder volatile asm statements with respect to each - * other: they should execute in program order. However GCC 4.9.x and 5.x have - * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder - * volatile asm. The write functions are not affected since they have memory - * clobbers preventing reordering. To prevent reads from being reordered with - * respect to writes, use a dummy memory operand. - */ - -#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL) - void native_write_cr0(unsigned long val); static inline unsigned long native_read_cr0(void) { unsigned long val; - asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER); + asm volatile("mov %%cr0,%0" : "=r" (val)); return val; } static __always_inline unsigned long native_read_cr2(void) { unsigned long val; - asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER); + asm volatile("mov %%cr2,%0" : "=r" (val)); return val; } @@ -45,7 +34,7 @@ static __always_inline void native_write_cr2(unsigned long val) static __always_inline unsigned long __native_read_cr3(void) { unsigned long val; - asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER); + asm volatile("mov %%cr3,%0" : "=r" (val)); return val; } @@ -66,10 +55,10 @@ static inline unsigned long native_read_cr4(void) asm volatile("1: mov %%cr4, %0\n" "2:\n" _ASM_EXTABLE(1b, 2b) - : "=r" (val) : "0" (0), __FORCE_ORDER); + : "=r" (val) : "0" (0)); #else /* CR4 always exists on x86_64. */ - asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER); + asm volatile("mov %%cr4,%0" : "=r" (val)); #endif return val; } diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 32c0d981a82a..e9cce169bb4c 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -33,11 +33,11 @@ extern size_t strlen(const char *s); static __always_inline void *__memcpy(void *to, const void *from, size_t n) { int d0, d1, d2; - asm volatile("rep ; movsl\n\t" + asm volatile("rep movsl\n\t" "movl %4,%%ecx\n\t" "andl $3,%%ecx\n\t" "jz 1f\n\t" - "rep ; movsb\n\t" + "rep movsb\n\t" "1:" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) @@ -89,7 +89,7 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, if (n >= 5 * 4) { /* large block: use rep prefix */ int ecx; - asm volatile("rep ; movsl" + asm volatile("rep movsl" : "=&c" (ecx), "=&D" (edi), "=&S" (esi) : "0" (n / 4), "1" (edi), "2" (esi) : "memory" @@ -165,8 +165,7 @@ extern void *memchr(const void *cs, int c, size_t count); static inline void *__memset_generic(void *s, char c, size_t count) { int d0, d1; - asm volatile("rep\n\t" - "stosb" + asm volatile("rep stosb" : "=&c" (d0), "=&D" (d1) : "a" (c), "1" (s), "0" (count) : "memory"); @@ -199,8 +198,7 @@ extern void *memset(void *, int, size_t); static inline void *memset16(uint16_t *s, uint16_t v, size_t n) { int d0, d1; - asm volatile("rep\n\t" - "stosw" + asm volatile("rep stosw" : "=&c" (d0), "=&D" (d1) : "a" (v), "1" (s), "0" (n) : "memory"); @@ -211,8 +209,7 @@ static inline void *memset16(uint16_t *s, uint16_t v, size_t n) static inline void *memset32(uint32_t *s, uint32_t v, size_t n) { int d0, d1; - asm volatile("rep\n\t" - "stosl" + asm volatile("rep stosl" : "=&c" (d0), "=&D" (d1) : "a" (v), "1" (s), "0" (n) : "memory"); diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index d8416b3bf832..e8e5aab06255 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -9,6 +9,7 @@ #include <asm/desc.h> #include <asm/fpu/api.h> +#include <asm/msr.h> /* image of the saved processor state */ struct saved_context { diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 54df06687d83..b512f9665f78 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h @@ -9,6 +9,7 @@ #include <asm/desc.h> #include <asm/fpu/api.h> +#include <asm/msr.h> /* * Image of the saved processor state, used by the low level ACPI suspend to diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 75248546403d..499b1c15cc8b 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -52,6 +52,8 @@ do { \ } while (0) #ifdef CONFIG_X86_32 +#include <asm/msr.h> + static inline void refresh_sysenter_cs(struct thread_struct *thread) { /* Only happens when SEP is enabled, no need to test "SEP"arately: */ @@ -59,7 +61,7 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread) return; this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); + wrmsrq(MSR_IA32_SYSENTER_CS, thread->sysenter_cs); } #endif diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index ab9e143ec9fe..5337f1be18f6 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -11,11 +11,11 @@ * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. * Raise it if needed. */ -#define POKE_MAX_OPCODE_SIZE 5 +#define TEXT_POKE_MAX_OPCODE_SIZE 5 extern void text_poke_early(void *addr, const void *opcode, size_t len); -extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len); +extern void text_poke_apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len); /* * Clear and restore the kernel write-protection flag on the local CPU. @@ -32,17 +32,17 @@ extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u * an inconsistent instruction while you patch. */ extern void *text_poke(void *addr, const void *opcode, size_t len); -extern void text_poke_sync(void); +extern void smp_text_poke_sync_each_cpu(void); extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); extern void *text_poke_copy(void *addr, const void *opcode, size_t len); #define text_poke_copy text_poke_copy extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok); extern void *text_poke_set(void *addr, int c, size_t len); -extern int poke_int3_handler(struct pt_regs *regs); -extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); +extern int smp_text_poke_int3_handler(struct pt_regs *regs); +extern void smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate); -extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); -extern void text_poke_finish(void); +extern void smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate); +extern void smp_text_poke_batch_finish(void); #define INT3_INSN_SIZE 1 #define INT3_INSN_OPCODE 0xCC @@ -82,7 +82,7 @@ static __always_inline int text_opcode_size(u8 opcode) } union text_poke_insn { - u8 text[POKE_MAX_OPCODE_SIZE]; + u8 text[TEXT_POKE_MAX_OPCODE_SIZE]; struct { u8 opcode; s32 disp; @@ -128,8 +128,8 @@ void *text_gen_insn(u8 opcode, const void *addr, const void *dest) } extern int after_bootmem; -extern __ro_after_init struct mm_struct *poking_mm; -extern __ro_after_init unsigned long poking_addr; +extern __ro_after_init struct mm_struct *text_poke_mm; +extern __ro_after_init unsigned long text_poke_mm_addr; #ifndef CONFIG_UML_X86 static __always_inline @@ -142,13 +142,14 @@ static __always_inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) { /* - * The int3 handler in entry_64.S adds a gap between the + * The INT3 handler in entry_64.S adds a gap between the * stack where the break point happened, and the saving of * pt_regs. We can extend the original stack because of - * this gap. See the idtentry macro's create_gap option. + * this gap. See the idtentry macro's X86_TRAP_BP logic. * - * Similarly entry_32.S will have a gap on the stack for (any) hardware - * exception and pt_regs; see FIXUP_FRAME. + * Similarly, entry_32.S will have a gap on the stack for + * (any) hardware exception and pt_regs; see the + * FIXUP_FRAME macro. */ regs->sp -= sizeof(unsigned long); *(unsigned long *)regs->sp = val; diff --git a/arch/x86/include/asm/trace/common.h b/arch/x86/include/asm/trace/common.h deleted file mode 100644 index f0f9bcdb74d9..000000000000 --- a/arch/x86/include/asm/trace/common.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_TRACE_COMMON_H -#define _ASM_TRACE_COMMON_H - -#ifdef CONFIG_TRACING -DECLARE_STATIC_KEY_FALSE(trace_pagefault_key); -#define trace_pagefault_enabled() \ - static_branch_unlikely(&trace_pagefault_key) -#else -static inline bool trace_pagefault_enabled(void) { return false; } -#endif - -#endif diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index 4645a6334063..0454d5e60e5d 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -74,11 +74,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_dropped, TP_ARGS(fpu) ); -DEFINE_EVENT(x86_fpu, x86_fpu_copy_src, - TP_PROTO(struct fpu *fpu), - TP_ARGS(fpu) -); - DEFINE_EVENT(x86_fpu, x86_fpu_copy_dst, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 88e7f0f3bf62..7408bebdfde0 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h @@ -6,7 +6,6 @@ #define _TRACE_IRQ_VECTORS_H #include <linux/tracepoint.h> -#include <asm/trace/common.h> #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 94408a784c8e..4f7f09f50552 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -5,10 +5,65 @@ #ifndef _ASM_X86_TSC_H #define _ASM_X86_TSC_H +#include <asm/asm.h> #include <asm/cpufeature.h> #include <asm/processor.h> #include <asm/msr.h> +/** + * rdtsc() - returns the current TSC without ordering constraints + * + * rdtsc() returns the result of RDTSC as a 64-bit integer. The + * only ordering constraint it supplies is the ordering implied by + * "asm volatile": it will put the RDTSC in the place you expect. The + * CPU can and will speculatively execute that RDTSC, though, so the + * results can be non-monotonic if compared on different CPUs. + */ +static __always_inline u64 rdtsc(void) +{ + EAX_EDX_DECLARE_ARGS(val, low, high); + + asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); + + return EAX_EDX_VAL(val, low, high); +} + +/** + * rdtsc_ordered() - read the current TSC in program order + * + * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. + * It is ordered like a load to a global in-memory counter. It should + * be impossible to observe non-monotonic rdtsc_unordered() behavior + * across multiple CPUs as long as the TSC is synced. + */ +static __always_inline u64 rdtsc_ordered(void) +{ + EAX_EDX_DECLARE_ARGS(val, low, high); + + /* + * The RDTSC instruction is not ordered relative to memory + * access. The Intel SDM and the AMD APM are both vague on this + * point, but empirically an RDTSC instruction can be + * speculatively executed before prior loads. An RDTSC + * immediately after an appropriate barrier appears to be + * ordered as a normal load, that is, it provides the same + * ordering guarantees as reading from a global memory location + * that some other imaginary CPU is updating continuously with a + * time stamp. + * + * Thus, use the preferred barrier on the respective CPU, aiming for + * RDTSCP as the default. + */ + asm volatile(ALTERNATIVE_2("rdtsc", + "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, + "rdtscp", X86_FEATURE_RDTSCP) + : EAX_EDX_RET(val, low, high) + /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ + :: "ecx"); + + return EAX_EDX_VAL(val, low, high); +} + /* * Standard way to access the cycle counter. */ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index c52f0133425b..c8a5ae35c871 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -26,8 +26,8 @@ extern unsigned long USER_PTR_MAX; */ static inline unsigned long __untagged_addr(unsigned long addr) { - asm (ALTERNATIVE("", - "and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM) + asm_inline (ALTERNATIVE("", "and " __percpu_arg([mask]) ", %[addr]", + X86_FEATURE_LAM) : [addr] "+r" (addr) : [mask] "m" (__my_cpu_var(tlbstate_untag_mask))); @@ -54,7 +54,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, #endif #define valid_user_address(x) \ - ((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) + likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) /* * Masking the user address is an alternative to a conditional diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index 80be0da733df..b7253ef3205a 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h @@ -27,17 +27,9 @@ struct vdso_image { long sym_vdso32_rt_sigreturn_landing_pad; }; -#ifdef CONFIG_X86_64 extern const struct vdso_image vdso_image_64; -#endif - -#ifdef CONFIG_X86_X32_ABI extern const struct vdso_image vdso_image_x32; -#endif - -#if defined CONFIG_X86_32 || defined CONFIG_COMPAT extern const struct vdso_image vdso_image_32; -#endif extern int __init init_vdso_image(const struct vdso_image *image); diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h index c9b2ba7a9ec4..7000aeb59aa2 100644 --- a/arch/x86/include/asm/vdso/processor.h +++ b/arch/x86/include/asm/vdso/processor.h @@ -7,15 +7,15 @@ #ifndef __ASSEMBLER__ -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static __always_inline void rep_nop(void) +/* PAUSE is a good thing to insert into busy-wait loops. */ +static __always_inline void native_pause(void) { - asm volatile("rep; nop" ::: "memory"); + asm volatile("pause" ::: "memory"); } static __always_inline void cpu_relax(void) { - rep_nop(); + native_pause(); } struct getcpu_cache; diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 213cf5379a5a..36698cc9fb44 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -292,6 +292,7 @@ struct x86_hyper_runtime { * @set_wallclock: set time back to HW clock * @is_untracked_pat_range exclude from PAT logic * @nmi_init enable NMI on cpus + * @get_nmi_reason get the reason an NMI was received * @save_sched_clock_state: save state for sched_clock() on suspend * @restore_sched_clock_state: restore state for sched_clock() on resume * @apic_post_init: adjust apic if needed diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index bd0fc69a10a7..c2fc7869b996 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -43,7 +43,7 @@ extern struct start_info *xen_start_info; static inline uint32_t xen_cpuid_base(void) { - return hypervisor_cpuid_base(XEN_SIGNATURE, 2); + return cpuid_base_hypervisor(XEN_SIGNATURE, 2); } struct pci_dev; diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 84cfa179802c..99a783fd4691 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -141,7 +141,6 @@ obj-$(CONFIG_OF) += devicetree.o obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o -obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o obj-$(CONFIG_X86_UMIP) += umip.o diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c index 77bfb846490c..7047124490f6 100644 --- a/arch/x86/kernel/acpi/cppc.c +++ b/arch/x86/kernel/acpi/cppc.c @@ -49,7 +49,7 @@ int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) { int err; - err = rdmsrl_safe_on_cpu(cpunum, reg->address, val); + err = rdmsrq_safe_on_cpu(cpunum, reg->address, val); if (!err) { u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, reg->bit_offset); @@ -65,7 +65,7 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) u64 rd_val; int err; - err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val); + err = rdmsrq_safe_on_cpu(cpunum, reg->address, &rd_val); if (!err) { u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, reg->bit_offset); @@ -74,7 +74,7 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) val &= mask; rd_val &= ~mask; rd_val |= val; - err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val); + err = wrmsrq_safe_on_cpu(cpunum, reg->address, rd_val); } return err; } @@ -147,7 +147,7 @@ int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf) int ret; if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val); + ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val); if (ret) goto out; @@ -272,7 +272,7 @@ int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator) } /* detect if running on heterogeneous design */ - if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) { + if (cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES)) { switch (core_type) { case TOPO_CPU_TYPE_UNKNOWN: pr_warn("Undefined core type found for cpu %d\n", cpu); diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index d5ac34186555..8698d66563ed 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -14,7 +14,7 @@ #include <acpi/processor.h> #include <asm/cpu_device_id.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/mwait.h> #include <asm/special_insns.h> #include <asm/smp.h> diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 6dfecb27b846..91fa262f0e30 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -16,6 +16,7 @@ #include <asm/cacheflush.h> #include <asm/realmode.h> #include <asm/hypervisor.h> +#include <asm/msr.h> #include <asm/smp.h> #include <linux/ftrace.h> diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 45bcff181cba..ecfe7b497cad 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1,39 +1,17 @@ // SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) "SMP alternatives: " fmt -#include <linux/module.h> -#include <linux/sched.h> +#include <linux/mmu_context.h> #include <linux/perf_event.h> -#include <linux/mutex.h> -#include <linux/list.h> -#include <linux/stringify.h> -#include <linux/highmem.h> -#include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/memory.h> -#include <linux/stop_machine.h> -#include <linux/slab.h> -#include <linux/kdebug.h> -#include <linux/kprobes.h> -#include <linux/mmu_context.h> -#include <linux/bsearch.h> -#include <linux/sync_core.h> #include <linux/execmem.h> + #include <asm/text-patching.h> -#include <asm/alternative.h> -#include <asm/sections.h> -#include <asm/mce.h> -#include <asm/nmi.h> -#include <asm/cacheflush.h> -#include <asm/tlbflush.h> #include <asm/insn.h> -#include <asm/io.h> -#include <asm/fixmap.h> -#include <asm/paravirt.h> -#include <asm/asm-prototypes.h> -#include <asm/cfi.h> #include <asm/ibt.h> #include <asm/set_memory.h> +#include <asm/nmi.h> int __read_mostly alternatives_patched; @@ -339,13 +317,6 @@ static void add_nop(u8 *buf, unsigned int len) *buf = INT3_INSN_OPCODE; } -extern s32 __retpoline_sites[], __retpoline_sites_end[]; -extern s32 __return_sites[], __return_sites_end[]; -extern s32 __cfi_sites[], __cfi_sites_end[]; -extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; -extern s32 __smp_locks[], __smp_locks_end[]; -void text_poke_early(void *addr, const void *opcode, size_t len); - /* * Matches NOP and NOPL, not any of the other possible NOPs. */ @@ -537,7 +508,7 @@ static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, } } -void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len) +void text_poke_apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len) { __apply_relocation(buf, instr, instrlen, repl, repl_len); optimize_nops(instr, buf, instrlen); @@ -625,7 +596,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, DPRINTK(ALT, "alt table %px, -> %px", start, end); /* - * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using + * KASAN_SHADOW_START is defined using * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here. * During the process, KASAN becomes confused seeing partial LA57 * conversion and triggers a false-positive out-of-bound report. @@ -693,7 +664,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, for (; insn_buff_sz < a->instrlen; insn_buff_sz++) insn_buff[insn_buff_sz] = 0x90; - apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen); + text_poke_apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen); DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr); DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement); @@ -2305,7 +2276,7 @@ __visible noinline void __init __alt_reloc_selftest(void *arg) static noinline void __init alt_reloc_selftest(void) { /* - * Tests apply_relocation(). + * Tests text_poke_apply_relocation(). * * This has a relative immediate (CALL) in a place other than the first * instruction and additionally on x86_64 we get a RIP-relative LEA: @@ -2442,76 +2413,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode, } } -typedef struct { - struct mm_struct *mm; -} temp_mm_state_t; - -/* - * Using a temporary mm allows to set temporary mappings that are not accessible - * by other CPUs. Such mappings are needed to perform sensitive memory writes - * that override the kernel memory protections (e.g., W^X), without exposing the - * temporary page-table mappings that are required for these write operations to - * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the - * mapping is torn down. - * - * Context: The temporary mm needs to be used exclusively by a single core. To - * harden security IRQs must be disabled while the temporary mm is - * loaded, thereby preventing interrupt handler bugs from overriding - * the kernel memory protection. - */ -static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) -{ - temp_mm_state_t temp_state; - - lockdep_assert_irqs_disabled(); - - /* - * Make sure not to be in TLB lazy mode, as otherwise we'll end up - * with a stale address space WITHOUT being in lazy mode after - * restoring the previous mm. - */ - if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) - leave_mm(); - - temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); - switch_mm_irqs_off(NULL, mm, current); - - /* - * If breakpoints are enabled, disable them while the temporary mm is - * used. Userspace might set up watchpoints on addresses that are used - * in the temporary mm, which would lead to wrong signals being sent or - * crashes. - * - * Note that breakpoints are not disabled selectively, which also causes - * kernel breakpoints (e.g., perf's) to be disabled. This might be - * undesirable, but still seems reasonable as the code that runs in the - * temporary mm should be short. - */ - if (hw_breakpoint_active()) - hw_breakpoint_disable(); - - return temp_state; -} - -__ro_after_init struct mm_struct *poking_mm; -__ro_after_init unsigned long poking_addr; - -static inline void unuse_temporary_mm(temp_mm_state_t prev_state) -{ - lockdep_assert_irqs_disabled(); - - switch_mm_irqs_off(NULL, prev_state.mm, current); - - /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */ - cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm)); - - /* - * Restore the breakpoints if they were disabled before the temporary mm - * was loaded. - */ - if (hw_breakpoint_active()) - hw_breakpoint_restore(); -} +__ro_after_init struct mm_struct *text_poke_mm; +__ro_after_init unsigned long text_poke_mm_addr; static void text_poke_memcpy(void *dst, const void *src, size_t len) { @@ -2531,7 +2434,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l { bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; struct page *pages[2] = {NULL}; - temp_mm_state_t prev; + struct mm_struct *prev_mm; unsigned long flags; pte_t pte, *ptep; spinlock_t *ptl; @@ -2568,7 +2471,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l /* * The lock is not really needed, but this allows to avoid open-coding. */ - ptep = get_locked_pte(poking_mm, poking_addr, &ptl); + ptep = get_locked_pte(text_poke_mm, text_poke_mm_addr, &ptl); /* * This must not fail; preallocated in poking_init(). @@ -2578,21 +2481,21 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l local_irq_save(flags); pte = mk_pte(pages[0], pgprot); - set_pte_at(poking_mm, poking_addr, ptep, pte); + set_pte_at(text_poke_mm, text_poke_mm_addr, ptep, pte); if (cross_page_boundary) { pte = mk_pte(pages[1], pgprot); - set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); + set_pte_at(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1, pte); } /* * Loading the temporary mm behaves as a compiler barrier, which * guarantees that the PTE will be set at the time memcpy() is done. */ - prev = use_temporary_mm(poking_mm); + prev_mm = use_temporary_mm(text_poke_mm); kasan_disable_current(); - func((u8 *)poking_addr + offset_in_page(addr), src, len); + func((u8 *)text_poke_mm_addr + offset_in_page(addr), src, len); kasan_enable_current(); /* @@ -2601,22 +2504,22 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l */ barrier(); - pte_clear(poking_mm, poking_addr, ptep); + pte_clear(text_poke_mm, text_poke_mm_addr, ptep); if (cross_page_boundary) - pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); + pte_clear(text_poke_mm, text_poke_mm_addr + PAGE_SIZE, ptep + 1); /* * Loading the previous page-table hierarchy requires a serializing * instruction that already allows the core to see the updated version. * Xen-PV is assumed to serialize execution in a similar manner. */ - unuse_temporary_mm(prev); + unuse_temporary_mm(prev_mm); /* * Flushing the TLB might involve IPIs, which would require enabled * IRQs, but not if the mm is not used, as it is in this point. */ - flush_tlb_mm_range(poking_mm, poking_addr, poking_addr + + flush_tlb_mm_range(text_poke_mm, text_poke_mm_addr, text_poke_mm_addr + (cross_page_boundary ? 2 : 1) * PAGE_SIZE, PAGE_SHIFT, false); @@ -2752,7 +2655,7 @@ static void do_sync_core(void *info) sync_core(); } -void text_poke_sync(void) +void smp_text_poke_sync_each_cpu(void) { on_each_cpu(do_sync_core, NULL, 1); } @@ -2762,64 +2665,66 @@ void text_poke_sync(void) * this thing. When len == 6 everything is prefixed with 0x0f and we map * opcode to Jcc.d8, using len to distinguish. */ -struct text_poke_loc { +struct smp_text_poke_loc { /* addr := _stext + rel_addr */ s32 rel_addr; s32 disp; u8 len; u8 opcode; - const u8 text[POKE_MAX_OPCODE_SIZE]; - /* see text_poke_bp_batch() */ + const u8 text[TEXT_POKE_MAX_OPCODE_SIZE]; + /* see smp_text_poke_batch_finish() */ u8 old; }; -struct bp_patching_desc { - struct text_poke_loc *vec; +#define TEXT_POKE_ARRAY_MAX (PAGE_SIZE / sizeof(struct smp_text_poke_loc)) + +static struct smp_text_poke_array { + struct smp_text_poke_loc vec[TEXT_POKE_ARRAY_MAX]; int nr_entries; - atomic_t refs; -}; +} text_poke_array; -static struct bp_patching_desc bp_desc; +static DEFINE_PER_CPU(atomic_t, text_poke_array_refs); -static __always_inline -struct bp_patching_desc *try_get_desc(void) +/* + * These four __always_inline annotations imply noinstr, necessary + * due to smp_text_poke_int3_handler() being noinstr: + */ + +static __always_inline bool try_get_text_poke_array(void) { - struct bp_patching_desc *desc = &bp_desc; + atomic_t *refs = this_cpu_ptr(&text_poke_array_refs); - if (!raw_atomic_inc_not_zero(&desc->refs)) - return NULL; + if (!raw_atomic_inc_not_zero(refs)) + return false; - return desc; + return true; } -static __always_inline void put_desc(void) +static __always_inline void put_text_poke_array(void) { - struct bp_patching_desc *desc = &bp_desc; + atomic_t *refs = this_cpu_ptr(&text_poke_array_refs); smp_mb__before_atomic(); - raw_atomic_dec(&desc->refs); + raw_atomic_dec(refs); } -static __always_inline void *text_poke_addr(struct text_poke_loc *tp) +static __always_inline void *text_poke_addr(const struct smp_text_poke_loc *tpl) { - return _stext + tp->rel_addr; + return _stext + tpl->rel_addr; } -static __always_inline int patch_cmp(const void *key, const void *elt) +static __always_inline int patch_cmp(const void *tpl_a, const void *tpl_b) { - struct text_poke_loc *tp = (struct text_poke_loc *) elt; - - if (key < text_poke_addr(tp)) + if (tpl_a < text_poke_addr(tpl_b)) return -1; - if (key > text_poke_addr(tp)) + if (tpl_a > text_poke_addr(tpl_b)) return 1; return 0; } -noinstr int poke_int3_handler(struct pt_regs *regs) +noinstr int smp_text_poke_int3_handler(struct pt_regs *regs) { - struct bp_patching_desc *desc; - struct text_poke_loc *tp; + struct smp_text_poke_loc *tpl; int ret = 0; void *ip; @@ -2828,41 +2733,40 @@ noinstr int poke_int3_handler(struct pt_regs *regs) /* * Having observed our INT3 instruction, we now must observe - * bp_desc with non-zero refcount: + * text_poke_array with non-zero refcount: * - * bp_desc.refs = 1 INT3 - * WMB RMB - * write INT3 if (bp_desc.refs != 0) + * text_poke_array_refs = 1 INT3 + * WMB RMB + * write INT3 if (text_poke_array_refs != 0) */ smp_rmb(); - desc = try_get_desc(); - if (!desc) + if (!try_get_text_poke_array()) return 0; /* - * Discount the INT3. See text_poke_bp_batch(). + * Discount the INT3. See smp_text_poke_batch_finish(). */ ip = (void *) regs->ip - INT3_INSN_SIZE; /* * Skip the binary search if there is a single member in the vector. */ - if (unlikely(desc->nr_entries > 1)) { - tp = __inline_bsearch(ip, desc->vec, desc->nr_entries, - sizeof(struct text_poke_loc), + if (unlikely(text_poke_array.nr_entries > 1)) { + tpl = __inline_bsearch(ip, text_poke_array.vec, text_poke_array.nr_entries, + sizeof(struct smp_text_poke_loc), patch_cmp); - if (!tp) + if (!tpl) goto out_put; } else { - tp = desc->vec; - if (text_poke_addr(tp) != ip) + tpl = text_poke_array.vec; + if (text_poke_addr(tpl) != ip) goto out_put; } - ip += tp->len; + ip += tpl->len; - switch (tp->opcode) { + switch (tpl->opcode) { case INT3_INSN_OPCODE: /* * Someone poked an explicit INT3, they'll want to handle it, @@ -2875,16 +2779,16 @@ noinstr int poke_int3_handler(struct pt_regs *regs) break; case CALL_INSN_OPCODE: - int3_emulate_call(regs, (long)ip + tp->disp); + int3_emulate_call(regs, (long)ip + tpl->disp); break; case JMP32_INSN_OPCODE: case JMP8_INSN_OPCODE: - int3_emulate_jmp(regs, (long)ip + tp->disp); + int3_emulate_jmp(regs, (long)ip + tpl->disp); break; case 0x70 ... 0x7f: /* Jcc */ - int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp); + int3_emulate_jcc(regs, tpl->opcode & 0xf, (long)ip, tpl->disp); break; default: @@ -2894,51 +2798,50 @@ noinstr int poke_int3_handler(struct pt_regs *regs) ret = 1; out_put: - put_desc(); + put_text_poke_array(); return ret; } -#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc)) -static struct text_poke_loc tp_vec[TP_VEC_MAX]; -static int tp_vec_nr; - /** - * text_poke_bp_batch() -- update instructions on live kernel on SMP - * @tp: vector of instructions to patch - * @nr_entries: number of entries in the vector + * smp_text_poke_batch_finish() -- update instructions on live kernel on SMP * - * Modify multi-byte instruction by using int3 breakpoint on SMP. - * We completely avoid stop_machine() here, and achieve the - * synchronization using int3 breakpoint. + * Input state: + * text_poke_array.vec: vector of instructions to patch + * text_poke_array.nr_entries: number of entries in the vector + * + * Modify multi-byte instructions by using INT3 breakpoints on SMP. + * We completely avoid using stop_machine() here, and achieve the + * synchronization using INT3 breakpoints and SMP cross-calls. * * The way it is done: * - For each entry in the vector: - * - add a int3 trap to the address that will be patched - * - sync cores + * - add an INT3 trap to the address that will be patched + * - SMP sync all CPUs * - For each entry in the vector: * - update all but the first byte of the patched range - * - sync cores + * - SMP sync all CPUs * - For each entry in the vector: - * - replace the first byte (int3) by the first byte of + * - replace the first byte (INT3) by the first byte of the * replacing opcode - * - sync cores + * - SMP sync all CPUs */ -static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) +void smp_text_poke_batch_finish(void) { unsigned char int3 = INT3_INSN_OPCODE; unsigned int i; int do_sync; - lockdep_assert_held(&text_mutex); + if (!text_poke_array.nr_entries) + return; - bp_desc.vec = tp; - bp_desc.nr_entries = nr_entries; + lockdep_assert_held(&text_mutex); /* - * Corresponds to the implicit memory barrier in try_get_desc() to - * ensure reading a non-zero refcount provides up to date bp_desc data. + * Corresponds to the implicit memory barrier in try_get_text_poke_array() to + * ensure reading a non-zero refcount provides up to date text_poke_array data. */ - atomic_set_release(&bp_desc.refs, 1); + for_each_possible_cpu(i) + atomic_set_release(per_cpu_ptr(&text_poke_array_refs, i), 1); /* * Function tracing can enable thousands of places that need to be @@ -2951,33 +2854,33 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries cond_resched(); /* - * Corresponding read barrier in int3 notifier for making sure the - * nr_entries and handler are correctly ordered wrt. patching. + * Corresponding read barrier in INT3 notifier for making sure the + * text_poke_array.nr_entries and handler are correctly ordered wrt. patching. */ smp_wmb(); /* - * First step: add a int3 trap to the address that will be patched. + * First step: add a INT3 trap to the address that will be patched. */ - for (i = 0; i < nr_entries; i++) { - tp[i].old = *(u8 *)text_poke_addr(&tp[i]); - text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE); + for (i = 0; i < text_poke_array.nr_entries; i++) { + text_poke_array.vec[i].old = *(u8 *)text_poke_addr(&text_poke_array.vec[i]); + text_poke(text_poke_addr(&text_poke_array.vec[i]), &int3, INT3_INSN_SIZE); } - text_poke_sync(); + smp_text_poke_sync_each_cpu(); /* * Second step: update all but the first byte of the patched range. */ - for (do_sync = 0, i = 0; i < nr_entries; i++) { - u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, }; - u8 _new[POKE_MAX_OPCODE_SIZE+1]; - const u8 *new = tp[i].text; - int len = tp[i].len; + for (do_sync = 0, i = 0; i < text_poke_array.nr_entries; i++) { + u8 old[TEXT_POKE_MAX_OPCODE_SIZE+1] = { text_poke_array.vec[i].old, }; + u8 _new[TEXT_POKE_MAX_OPCODE_SIZE+1]; + const u8 *new = text_poke_array.vec[i].text; + int len = text_poke_array.vec[i].len; if (len - INT3_INSN_SIZE > 0) { memcpy(old + INT3_INSN_SIZE, - text_poke_addr(&tp[i]) + INT3_INSN_SIZE, + text_poke_addr(&text_poke_array.vec[i]) + INT3_INSN_SIZE, len - INT3_INSN_SIZE); if (len == 6) { @@ -2986,7 +2889,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries new = _new; } - text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE, + text_poke(text_poke_addr(&text_poke_array.vec[i]) + INT3_INSN_SIZE, new + INT3_INSN_SIZE, len - INT3_INSN_SIZE); @@ -3017,7 +2920,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries * The old instruction is recorded so that the event can be * processed forwards or backwards. */ - perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len); + perf_event_text_poke(text_poke_addr(&text_poke_array.vec[i]), old, len, new, len); } if (do_sync) { @@ -3026,63 +2929,79 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries * not necessary and we'd be safe even without it. But * better safe than sorry (plus there's not only Intel). */ - text_poke_sync(); + smp_text_poke_sync_each_cpu(); } /* - * Third step: replace the first byte (int3) by the first byte of + * Third step: replace the first byte (INT3) by the first byte of the * replacing opcode. */ - for (do_sync = 0, i = 0; i < nr_entries; i++) { - u8 byte = tp[i].text[0]; + for (do_sync = 0, i = 0; i < text_poke_array.nr_entries; i++) { + u8 byte = text_poke_array.vec[i].text[0]; - if (tp[i].len == 6) + if (text_poke_array.vec[i].len == 6) byte = 0x0f; if (byte == INT3_INSN_OPCODE) continue; - text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE); + text_poke(text_poke_addr(&text_poke_array.vec[i]), &byte, INT3_INSN_SIZE); do_sync++; } if (do_sync) - text_poke_sync(); + smp_text_poke_sync_each_cpu(); /* * Remove and wait for refs to be zero. + * + * Notably, if after step-3 above the INT3 got removed, then the + * smp_text_poke_sync_each_cpu() will have serialized against any running INT3 + * handlers and the below spin-wait will not happen. + * + * IOW. unless the replacement instruction is INT3, this case goes + * unused. */ - if (!atomic_dec_and_test(&bp_desc.refs)) - atomic_cond_read_acquire(&bp_desc.refs, !VAL); + for_each_possible_cpu(i) { + atomic_t *refs = per_cpu_ptr(&text_poke_array_refs, i); + + if (unlikely(!atomic_dec_and_test(refs))) + atomic_cond_read_acquire(refs, !VAL); + } + + /* They are all completed: */ + text_poke_array.nr_entries = 0; } -static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, - const void *opcode, size_t len, const void *emulate) +static void __smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate) { + struct smp_text_poke_loc *tpl; struct insn insn; int ret, i = 0; + tpl = &text_poke_array.vec[text_poke_array.nr_entries++]; + if (len == 6) i = 1; - memcpy((void *)tp->text, opcode+i, len-i); + memcpy((void *)tpl->text, opcode+i, len-i); if (!emulate) emulate = opcode; ret = insn_decode_kernel(&insn, emulate); BUG_ON(ret < 0); - tp->rel_addr = addr - (void *)_stext; - tp->len = len; - tp->opcode = insn.opcode.bytes[0]; + tpl->rel_addr = addr - (void *)_stext; + tpl->len = len; + tpl->opcode = insn.opcode.bytes[0]; if (is_jcc32(&insn)) { /* * Map Jcc.d32 onto Jcc.d8 and use len to distinguish. */ - tp->opcode = insn.opcode.bytes[1] - 0x10; + tpl->opcode = insn.opcode.bytes[1] - 0x10; } - switch (tp->opcode) { + switch (tpl->opcode) { case RET_INSN_OPCODE: case JMP32_INSN_OPCODE: case JMP8_INSN_OPCODE: @@ -3091,14 +3010,14 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, * next instruction can be padded with INT3. */ for (i = insn.length; i < len; i++) - BUG_ON(tp->text[i] != INT3_INSN_OPCODE); + BUG_ON(tpl->text[i] != INT3_INSN_OPCODE); break; default: BUG_ON(len != insn.length); } - switch (tp->opcode) { + switch (tpl->opcode) { case INT3_INSN_OPCODE: case RET_INSN_OPCODE: break; @@ -3107,21 +3026,21 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, case JMP32_INSN_OPCODE: case JMP8_INSN_OPCODE: case 0x70 ... 0x7f: /* Jcc */ - tp->disp = insn.immediate.value; + tpl->disp = insn.immediate.value; break; default: /* assume NOP */ switch (len) { case 2: /* NOP2 -- emulate as JMP8+0 */ BUG_ON(memcmp(emulate, x86_nops[len], len)); - tp->opcode = JMP8_INSN_OPCODE; - tp->disp = 0; + tpl->opcode = JMP8_INSN_OPCODE; + tpl->disp = 0; break; case 5: /* NOP5 -- emulate as JMP32+0 */ BUG_ON(memcmp(emulate, x86_nops[len], len)); - tp->opcode = JMP32_INSN_OPCODE; - tp->disp = 0; + tpl->opcode = JMP32_INSN_OPCODE; + tpl->disp = 0; break; default: /* unknown instruction */ @@ -3132,51 +3051,50 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, } /* - * We hard rely on the tp_vec being ordered; ensure this is so by flushing + * We hard rely on the text_poke_array.vec being ordered; ensure this is so by flushing * early if needed. */ -static bool tp_order_fail(void *addr) +static bool text_poke_addr_ordered(void *addr) { - struct text_poke_loc *tp; - - if (!tp_vec_nr) - return false; + WARN_ON_ONCE(!addr); - if (!addr) /* force */ + if (!text_poke_array.nr_entries) return true; - tp = &tp_vec[tp_vec_nr - 1]; - if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr) - return true; - - return false; -} - -static void text_poke_flush(void *addr) -{ - if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) { - text_poke_bp_batch(tp_vec, tp_vec_nr); - tp_vec_nr = 0; - } -} + /* + * If the last current entry's address is higher than the + * new entry's address we'd like to add, then ordering + * is violated and we must first flush all pending patching + * requests: + */ + if (text_poke_addr(text_poke_array.vec + text_poke_array.nr_entries-1) > addr) + return false; -void text_poke_finish(void) -{ - text_poke_flush(NULL); + return true; } -void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate) +/** + * smp_text_poke_batch_add() -- update instruction on live kernel on SMP, batched + * @addr: address to patch + * @opcode: opcode of new instruction + * @len: length to copy + * @emulate: instruction to be emulated + * + * Add a new instruction to the current queue of to-be-patched instructions + * the kernel maintains. The patching request will not be executed immediately, + * but becomes part of an array of patching requests, optimized for batched + * execution. All pending patching requests will be executed on the next + * smp_text_poke_batch_finish() call. + */ +void __ref smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate) { - struct text_poke_loc *tp; - - text_poke_flush(addr); - - tp = &tp_vec[tp_vec_nr++]; - text_poke_loc_init(tp, addr, opcode, len, emulate); + if (text_poke_array.nr_entries == TEXT_POKE_ARRAY_MAX || !text_poke_addr_ordered(addr)) + smp_text_poke_batch_finish(); + __smp_text_poke_batch_add(addr, opcode, len, emulate); } /** - * text_poke_bp() -- update instructions on live kernel on SMP + * smp_text_poke_single() -- update instruction on live kernel on SMP immediately * @addr: address to patch * @opcode: opcode of new instruction * @len: length to copy @@ -3184,12 +3102,11 @@ void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const voi * * Update a single instruction with the vector in the stack, avoiding * dynamically allocated memory. This function should be used when it is - * not possible to allocate memory. + * not possible to allocate memory for a vector. The single instruction + * is patched in immediately. */ -void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate) +void __ref smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate) { - struct text_poke_loc tp; - - text_poke_loc_init(&tp, addr, opcode, len, emulate); - text_poke_bp_batch(&tp, 1); + __smp_text_poke_batch_add(addr, opcode, len, emulate); + smp_text_poke_batch_finish(); } diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index c884deca839b..3485d419c2f5 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -39,7 +39,7 @@ #include <asm/gart.h> #include <asm/set_memory.h> #include <asm/dma.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/x86_init.h> static unsigned long iommu_bus_base; /* GART remapping area (physical) */ diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6d12a9b69432..c1acead6227a 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -13,7 +13,9 @@ #include <linux/export.h> #include <linux/spinlock.h> #include <linux/pci_ids.h> -#include <asm/amd_nb.h> + +#include <asm/amd/nb.h> +#include <asm/cpuid/api.h> static u32 *flush_words; @@ -91,10 +93,7 @@ static int amd_cache_northbridges(void) if (amd_gart_present()) amd_northbridges.flags |= AMD_NB_GART; - /* - * Check for L3 cache presence. - */ - if (!cpuid_edx(0x80000006)) + if (!cpuid_amd_hygon_has_l3_cache()) return 0; /* @@ -151,7 +150,7 @@ struct resource *amd_get_mmconfig_range(struct resource *res) /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ if (boot_cpu_data.x86 < 0x10 || - rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) + rdmsrq_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) return NULL; /* mmconfig is not enabled */ diff --git a/arch/x86/kernel/amd_node.c b/arch/x86/kernel/amd_node.c index b670fa85c61b..a40176b62eb5 100644 --- a/arch/x86/kernel/amd_node.c +++ b/arch/x86/kernel/amd_node.c @@ -9,7 +9,7 @@ */ #include <linux/debugfs.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> /* * AMD Nodes are a physical collection of I/O devices within an SoC. There can be one diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 89c0c8a3fc7e..769321185a08 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -29,7 +29,7 @@ #include <asm/gart.h> #include <asm/pci-direct.h> #include <asm/dma.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/x86_init.h> #include <linux/crash_dump.h> diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 62584a347931..d73ba5a7b623 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -59,6 +59,7 @@ #include <asm/time.h> #include <asm/smp.h> #include <asm/mce.h> +#include <asm/msr.h> #include <asm/tsc.h> #include <asm/hypervisor.h> #include <asm/cpu_device_id.h> @@ -425,7 +426,7 @@ static int lapic_next_deadline(unsigned long delta, weak_wrmsr_fence(); tsc = rdtsc(); - wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + wrmsrq(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); return 0; } @@ -449,7 +450,7 @@ static int lapic_timer_shutdown(struct clock_event_device *evt) * the timer _and_ zero the counter registers: */ if (v & APIC_LVT_TIMER_TSCDEADLINE) - wrmsrl(MSR_IA32_TSC_DEADLINE, 0); + wrmsrq(MSR_IA32_TSC_DEADLINE, 0); else apic_write(APIC_TMICT, 0); @@ -1694,7 +1695,7 @@ static bool x2apic_hw_locked(void) x86_arch_cap_msr = x86_read_arch_cap_msr(); if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) { - rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr); + rdmsrq(MSR_IA32_XAPIC_DISABLE_STATUS, msr); return (msr & LEGACY_XAPIC_DISABLED); } return false; @@ -1707,12 +1708,12 @@ static void __x2apic_disable(void) if (!boot_cpu_has(X86_FEATURE_APIC)) return; - rdmsrl(MSR_IA32_APICBASE, msr); + rdmsrq(MSR_IA32_APICBASE, msr); if (!(msr & X2APIC_ENABLE)) return; /* Disable xapic and x2apic first and then reenable xapic mode */ - wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE)); - wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE); + wrmsrq(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE)); + wrmsrq(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE); printk_once(KERN_INFO "x2apic disabled\n"); } @@ -1720,10 +1721,10 @@ static void __x2apic_enable(void) { u64 msr; - rdmsrl(MSR_IA32_APICBASE, msr); + rdmsrq(MSR_IA32_APICBASE, msr); if (msr & X2APIC_ENABLE) return; - wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); + wrmsrq(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); printk_once(KERN_INFO "x2apic enabled\n"); } diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 16410f087b7a..e272bc7fdc8e 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/pgtable.h> +#include <asm/msr.h> #include <asm/numachip/numachip.h> #include <asm/numachip/numachip_csr.h> @@ -31,7 +32,7 @@ static u32 numachip1_get_apic_id(u32 x) unsigned int id = (x >> 24) & 0xff; if (static_cpu_has(X86_FEATURE_NODEID_MSR)) { - rdmsrl(MSR_FAM10H_NODE_ID, value); + rdmsrq(MSR_FAM10H_NODE_ID, value); id |= (value << 2) & 0xff00; } @@ -42,7 +43,7 @@ static u32 numachip2_get_apic_id(u32 x) { u64 mcfg; - rdmsrl(MSR_FAM10H_MMIO_CONF_BASE, mcfg); + rdmsrq(MSR_FAM10H_MMIO_CONF_BASE, mcfg); return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24); } @@ -150,7 +151,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) /* Account for nodes per socket in multi-core-module processors */ if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { - rdmsrl(MSR_FAM10H_NODE_ID, val); + rdmsrq(MSR_FAM10H_NODE_ID, val); nodes = ((val >> 3) & 7) + 1; } diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index eebc360ed1bb..5ba2feb2c04c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1486,7 +1486,7 @@ static void __init delay_with_tsc(void) * 1 GHz == 40 jiffies */ do { - rep_nop(); + native_pause(); now = rdtsc(); } while ((now - start) < 40000000000ULL / HZ && time_before_eq(jiffies, end)); } @@ -2225,7 +2225,7 @@ static int mp_irqdomain_create(int ioapic) /* Handle device tree enumerated APICs proper */ if (cfg->dev) { - fn = of_node_to_fwnode(cfg->dev); + fn = of_fwnode_handle(cfg->dev); } else { fn = irq_domain_alloc_named_id_fwnode("IO-APIC", mpc_ioapic_id(ioapic)); if (!fn) diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index ad4ea6fb3b6c..6259b474073b 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -33,6 +33,14 @@ static void __used common(void) { + OFFSET(CPUINFO_x86, cpuinfo_x86, x86); + OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); + OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping); + OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); + OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); + OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); + BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); #ifdef CONFIG_STACKPROTECTOR diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 2b411cd00a4e..e0a292db97b2 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -12,15 +12,6 @@ void foo(void); void foo(void) { - OFFSET(CPUINFO_x86, cpuinfo_x86, x86); - OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); - OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); - OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping); - OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); - OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); - OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); - BLANK(); - OFFSET(PT_EBX, pt_regs, bx); OFFSET(PT_ECX, pt_regs, cx); OFFSET(PT_EDX, pt_regs, dx); diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c index d86d7d6e750c..a951333c5995 100644 --- a/arch/x86/kernel/callthunks.c +++ b/arch/x86/kernel/callthunks.c @@ -185,7 +185,7 @@ static void *patch_dest(void *dest, bool direct) u8 *pad = dest - tsize; memcpy(insn_buff, skl_call_thunk_template, tsize); - apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize); + text_poke_apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize); /* Already patched? */ if (!bcmp(pad, insn_buff, tsize)) @@ -294,7 +294,7 @@ static bool is_callthunk(void *addr) pad = (void *)(dest - tmpl_size); memcpy(insn_buff, skl_call_thunk_template, tmpl_size); - apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size); + text_poke_apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size); return !bcmp(pad, insn_buff, tmpl_size); } @@ -312,7 +312,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip) return 0; memcpy(insn_buff, skl_call_thunk_template, tmpl_size); - apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size); + text_poke_apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size); memcpy(*pprog, insn_buff, tmpl_size); *pprog += tmpl_size; diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c index 303bf74d175b..99444409c026 100644 --- a/arch/x86/kernel/cet.c +++ b/arch/x86/kernel/cet.c @@ -2,6 +2,7 @@ #include <linux/ptrace.h> #include <asm/bugs.h> +#include <asm/msr.h> #include <asm/traps.h> enum cp_error_code { @@ -55,7 +56,7 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code) * will be whatever is live in userspace. So read the SSP before enabling * interrupts so locking the fpregs to do it later is not required. */ - rdmsrl(MSR_IA32_PL3_SSP, ssp); + rdmsrq(MSR_IA32_PL3_SSP, ssp); cond_local_irq_enable(regs); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 4efdf5c2efc8..1e26179ff18c 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -24,7 +24,7 @@ obj-y += rdrand.o obj-y += match.o obj-y += bugs.o obj-y += aperfmperf.o -obj-y += cpuid-deps.o +obj-y += cpuid-deps.o cpuid_0x2_table.o obj-y += umwait.o obj-y += capflags.o powerflags.o @@ -38,6 +38,9 @@ obj-y += intel.o tsx.o obj-$(CONFIG_PM) += intel_epb.o endif obj-$(CONFIG_CPU_SUP_AMD) += amd.o +ifeq ($(CONFIG_AMD_NB)$(CONFIG_SYSFS),yy) +obj-y += amd_cache_disable.o +endif obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 4e06baab40bb..93da466dfe2c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -9,6 +9,7 @@ #include <linux/sched/clock.h> #include <linux/random.h> #include <linux/topology.h> +#include <asm/amd/fch.h> #include <asm/processor.h> #include <asm/apic.h> #include <asm/cacheinfo.h> @@ -21,6 +22,7 @@ #include <asm/delay.h> #include <asm/debugreg.h> #include <asm/resctrl.h> +#include <asm/msr.h> #include <asm/sev.h> #ifdef CONFIG_X86_64 @@ -31,7 +33,7 @@ u16 invlpgb_count_max __ro_after_init; -static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) +static inline int rdmsrq_amd_safe(unsigned msr, u64 *p) { u32 gprs[8] = { 0 }; int err; @@ -49,7 +51,7 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) return err; } -static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) +static inline int wrmsrq_amd_safe(unsigned msr, u64 val) { u32 gprs[8] = { 0 }; @@ -383,7 +385,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) (c->x86 == 0x10 && c->x86_model >= 0x2)) { u64 val; - rdmsrl(MSR_K7_HWCR, val); + rdmsrq(MSR_K7_HWCR, val); if (!(val & BIT(24))) pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); } @@ -422,7 +424,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) * Try to cache the base value so further operations can * avoid RMW. If that faults, do not enable SSBD. */ - if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { + if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); setup_force_cpu_cap(X86_FEATURE_SSBD); x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; @@ -513,7 +515,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) */ if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { /* Check if memory encryption is enabled */ - rdmsrl(MSR_AMD64_SYSCFG, msr); + rdmsrq(MSR_AMD64_SYSCFG, msr); if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) goto clear_all; @@ -530,7 +532,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) if (!sme_me_mask) setup_clear_cpu_cap(X86_FEATURE_SME); - rdmsrl(MSR_K7_HWCR, msr); + rdmsrq(MSR_K7_HWCR, msr); if (!(msr & MSR_K7_HWCR_SMMLOCK)) goto clear_sev; @@ -617,7 +619,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) { if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB)) setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); - else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { + else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); setup_force_cpu_cap(X86_FEATURE_SBPB); } @@ -641,14 +643,14 @@ static void init_amd_k8(struct cpuinfo_x86 *c) */ if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) { clear_cpu_cap(c, X86_FEATURE_LAHF_LM); - if (!rdmsrl_amd_safe(0xc001100d, &value)) { + if (!rdmsrq_amd_safe(0xc001100d, &value)) { value &= ~BIT_64(32); - wrmsrl_amd_safe(0xc001100d, value); + wrmsrq_amd_safe(0xc001100d, value); } } if (!c->x86_model_id[0]) - strcpy(c->x86_model_id, "Hammer"); + strscpy(c->x86_model_id, "Hammer"); #ifdef CONFIG_SMP /* @@ -793,9 +795,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c) * Disable it on the affected CPUs. */ if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { - if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { + if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { value |= 0x1E; - wrmsrl_safe(MSR_F15H_IC_CFG, value); + wrmsrq_safe(MSR_F15H_IC_CFG, value); } } @@ -844,9 +846,9 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) * suppresses non-branch predictions. */ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { - if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { + if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; - wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); + wrmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); } } #endif @@ -1030,7 +1032,7 @@ static void init_amd(struct cpuinfo_x86 *c) init_amd_cacheinfo(c); if (cpu_has(c, X86_FEATURE_SVM)) { - rdmsrl(MSR_VM_CR, vm_cr); + rdmsrq(MSR_VM_CR, vm_cr); if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); clear_cpu_cap(c, X86_FEATURE_SVM); @@ -1211,7 +1213,7 @@ void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask) return; - wrmsr(amd_msr_dr_addr_masks[dr], mask, 0); + wrmsrq(amd_msr_dr_addr_masks[dr], mask); per_cpu(amd_dr_addr_mask, cpu)[dr] = mask; } @@ -1242,3 +1244,56 @@ void amd_check_microcode(void) if (cpu_feature_enabled(X86_FEATURE_ZEN2)) on_each_cpu(zenbleed_check_cpu, NULL, 1); } + +static const char * const s5_reset_reason_txt[] = { + [0] = "thermal pin BP_THERMTRIP_L was tripped", + [1] = "power button was pressed for 4 seconds", + [2] = "shutdown pin was tripped", + [4] = "remote ASF power off command was received", + [9] = "internal CPU thermal limit was tripped", + [16] = "system reset pin BP_SYS_RST_L was tripped", + [17] = "software issued PCI reset", + [18] = "software wrote 0x4 to reset control register 0xCF9", + [19] = "software wrote 0x6 to reset control register 0xCF9", + [20] = "software wrote 0xE to reset control register 0xCF9", + [21] = "ACPI power state transition occurred", + [22] = "keyboard reset pin KB_RST_L was tripped", + [23] = "internal CPU shutdown event occurred", + [24] = "system failed to boot before failed boot timer expired", + [25] = "hardware watchdog timer expired", + [26] = "remote ASF reset command was received", + [27] = "an uncorrected error caused a data fabric sync flood event", + [29] = "FCH and MP1 failed warm reset handshake", + [30] = "a parity error occurred", + [31] = "a software sync flood event occurred", +}; + +static __init int print_s5_reset_status_mmio(void) +{ + unsigned long value; + void __iomem *addr; + int i; + + if (!cpu_feature_enabled(X86_FEATURE_ZEN)) + return 0; + + addr = ioremap(FCH_PM_BASE + FCH_PM_S5_RESET_STATUS, sizeof(value)); + if (!addr) + return 0; + + value = ioread32(addr); + iounmap(addr); + + for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) { + if (!(value & BIT(i))) + continue; + + if (s5_reset_reason_txt[i]) { + pr_info("x86/amd: Previous system reset reason [0x%08lx]: %s\n", + value, s5_reset_reason_txt[i]); + } + } + + return 0; +} +late_initcall(print_s5_reset_status_mmio); diff --git a/arch/x86/kernel/cpu/amd_cache_disable.c b/arch/x86/kernel/cpu/amd_cache_disable.c new file mode 100644 index 000000000000..8843b9557aea --- /dev/null +++ b/arch/x86/kernel/cpu/amd_cache_disable.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AMD L3 cache_disable_{0,1} sysfs handling + * Documentation/ABI/testing/sysfs-devices-system-cpu + */ + +#include <linux/cacheinfo.h> +#include <linux/capability.h> +#include <linux/pci.h> +#include <linux/sysfs.h> + +#include <asm/amd/nb.h> + +#include "cpu.h" + +/* + * L3 cache descriptors + */ +static void amd_calc_l3_indices(struct amd_northbridge *nb) +{ + struct amd_l3_cache *l3 = &nb->l3_cache; + unsigned int sc0, sc1, sc2, sc3; + u32 val = 0; + + pci_read_config_dword(nb->misc, 0x1C4, &val); + + /* calculate subcache sizes */ + l3->subcaches[0] = sc0 = !(val & BIT(0)); + l3->subcaches[1] = sc1 = !(val & BIT(4)); + + if (boot_cpu_data.x86 == 0x15) { + l3->subcaches[0] = sc0 += !(val & BIT(1)); + l3->subcaches[1] = sc1 += !(val & BIT(5)); + } + + l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); + l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); + + l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; +} + +/* + * check whether a slot used for disabling an L3 index is occupied. + * @l3: L3 cache descriptor + * @slot: slot number (0..1) + * + * @returns: the disabled index if used or negative value if slot free. + */ +static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned int slot) +{ + unsigned int reg = 0; + + pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®); + + /* check whether this slot is activated already */ + if (reg & (3UL << 30)) + return reg & 0xfff; + + return -1; +} + +static ssize_t show_cache_disable(struct cacheinfo *ci, char *buf, unsigned int slot) +{ + int index; + struct amd_northbridge *nb = ci->priv; + + index = amd_get_l3_disable_slot(nb, slot); + if (index >= 0) + return sysfs_emit(buf, "%d\n", index); + + return sysfs_emit(buf, "FREE\n"); +} + +#define SHOW_CACHE_DISABLE(slot) \ +static ssize_t \ +cache_disable_##slot##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct cacheinfo *ci = dev_get_drvdata(dev); \ + return show_cache_disable(ci, buf, slot); \ +} + +SHOW_CACHE_DISABLE(0) +SHOW_CACHE_DISABLE(1) + +static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, + unsigned int slot, unsigned long idx) +{ + int i; + + idx |= BIT(30); + + /* + * disable index in all 4 subcaches + */ + for (i = 0; i < 4; i++) { + u32 reg = idx | (i << 20); + + if (!nb->l3_cache.subcaches[i]) + continue; + + pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); + + /* + * We need to WBINVD on a core on the node containing the L3 + * cache which indices we disable therefore a simple wbinvd() + * is not sufficient. + */ + wbinvd_on_cpu(cpu); + + reg |= BIT(31); + pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); + } +} + +/* + * disable a L3 cache index by using a disable-slot + * + * @l3: L3 cache descriptor + * @cpu: A CPU on the node containing the L3 cache + * @slot: slot number (0..1) + * @index: index to disable + * + * @return: 0 on success, error status on failure + */ +static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, + unsigned int slot, unsigned long index) +{ + int ret = 0; + + /* check if @slot is already used or the index is already disabled */ + ret = amd_get_l3_disable_slot(nb, slot); + if (ret >= 0) + return -EEXIST; + + if (index > nb->l3_cache.indices) + return -EINVAL; + + /* check whether the other slot has disabled the same index already */ + if (index == amd_get_l3_disable_slot(nb, !slot)) + return -EEXIST; + + amd_l3_disable_index(nb, cpu, slot, index); + + return 0; +} + +static ssize_t store_cache_disable(struct cacheinfo *ci, const char *buf, + size_t count, unsigned int slot) +{ + struct amd_northbridge *nb = ci->priv; + unsigned long val = 0; + int cpu, err = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + cpu = cpumask_first(&ci->shared_cpu_map); + + if (kstrtoul(buf, 10, &val) < 0) + return -EINVAL; + + err = amd_set_l3_disable_slot(nb, cpu, slot, val); + if (err) { + if (err == -EEXIST) + pr_warn("L3 slot %d in use/index already disabled!\n", + slot); + return err; + } + return count; +} + +#define STORE_CACHE_DISABLE(slot) \ +static ssize_t \ +cache_disable_##slot##_store(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct cacheinfo *ci = dev_get_drvdata(dev); \ + return store_cache_disable(ci, buf, count, slot); \ +} + +STORE_CACHE_DISABLE(0) +STORE_CACHE_DISABLE(1) + +static ssize_t subcaches_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cacheinfo *ci = dev_get_drvdata(dev); + int cpu = cpumask_first(&ci->shared_cpu_map); + + return sysfs_emit(buf, "%x\n", amd_get_subcaches(cpu)); +} + +static ssize_t subcaches_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cacheinfo *ci = dev_get_drvdata(dev); + int cpu = cpumask_first(&ci->shared_cpu_map); + unsigned long val; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (kstrtoul(buf, 16, &val) < 0) + return -EINVAL; + + if (amd_set_subcaches(cpu, val)) + return -EINVAL; + + return count; +} + +static DEVICE_ATTR_RW(cache_disable_0); +static DEVICE_ATTR_RW(cache_disable_1); +static DEVICE_ATTR_RW(subcaches); + +static umode_t cache_private_attrs_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + struct device *dev = kobj_to_dev(kobj); + struct cacheinfo *ci = dev_get_drvdata(dev); + umode_t mode = attr->mode; + + if (!ci->priv) + return 0; + + if ((attr == &dev_attr_subcaches.attr) && + amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + return mode; + + if ((attr == &dev_attr_cache_disable_0.attr || + attr == &dev_attr_cache_disable_1.attr) && + amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) + return mode; + + return 0; +} + +static struct attribute_group cache_private_group = { + .is_visible = cache_private_attrs_is_visible, +}; + +static void init_amd_l3_attrs(void) +{ + static struct attribute **amd_l3_attrs; + int n = 1; + + if (amd_l3_attrs) /* already initialized */ + return; + + if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) + n += 2; + if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + n += 1; + + amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL); + if (!amd_l3_attrs) + return; + + n = 0; + if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { + amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr; + amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr; + } + if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) + amd_l3_attrs[n++] = &dev_attr_subcaches.attr; + + cache_private_group.attrs = amd_l3_attrs; +} + +const struct attribute_group *cache_get_priv_group(struct cacheinfo *ci) +{ + struct amd_northbridge *nb = ci->priv; + + if (ci->level < 3 || !nb) + return NULL; + + if (nb && nb->l3_cache.indices) + init_amd_l3_attrs(); + + return &cache_private_group; +} + +struct amd_northbridge *amd_init_l3_cache(int index) +{ + struct amd_northbridge *nb; + int node; + + /* only for L3, and not in virtualized environments */ + if (index < 3) + return NULL; + + node = topology_amd_node_id(smp_processor_id()); + nb = node_to_amd_nb(node); + if (nb && !nb->l3_cache.indices) + amd_calc_l3_indices(nb); + + return nb; +} diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 6cf31a1649c4..a315b0627dfb 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -20,6 +20,7 @@ #include <asm/cpu.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "cpu.h" @@ -40,8 +41,8 @@ static void init_counter_refs(void) { u64 aperf, mperf; - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); this_cpu_write(cpu_samples.aperf, aperf); this_cpu_write(cpu_samples.mperf, mperf); @@ -99,7 +100,7 @@ static bool __init turbo_disabled(void) u64 misc_en; int err; - err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en); + err = rdmsrq_safe(MSR_IA32_MISC_ENABLE, &misc_en); if (err) return false; @@ -110,11 +111,11 @@ static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) { int err; - err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq); + err = rdmsrq_safe(MSR_ATOM_CORE_RATIOS, base_freq); if (err) return false; - err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq); + err = rdmsrq_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq); if (err) return false; @@ -152,13 +153,13 @@ static bool __init knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int err, i; u64 msr; - err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); + err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr); if (err) return false; @@ -190,17 +191,17 @@ static bool __init skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int s u32 group_size; int err, i; - err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); + err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios); + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &ratios); if (err) return false; - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts); + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT1, &counts); if (err) return false; @@ -220,11 +221,11 @@ static bool __init core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) u64 msr; int err; - err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); + err = rdmsrq_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); + err = rdmsrq_safe(MSR_TURBO_RATIO_LIMIT, &msr); if (err) return false; @@ -474,8 +475,8 @@ void arch_scale_freq_tick(void) if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) return; - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); acnt = aperf - s->aperf; mcnt = mperf - s->mperf; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 8596ce85026c..7f94e6a5497d 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -34,22 +34,66 @@ #include "cpu.h" +/* + * Speculation Vulnerability Handling + * + * Each vulnerability is handled with the following functions: + * <vuln>_select_mitigation() -- Selects a mitigation to use. This should + * take into account all relevant command line + * options. + * <vuln>_update_mitigation() -- This is called after all vulnerabilities have + * selected a mitigation, in case the selection + * may want to change based on other choices + * made. This function is optional. + * <vuln>_apply_mitigation() -- Enable the selected mitigation. + * + * The compile-time mitigation in all cases should be AUTO. An explicit + * command-line option can override AUTO. If no such option is + * provided, <vuln>_select_mitigation() will override AUTO to the best + * mitigation option. + */ + static void __init spectre_v1_select_mitigation(void); +static void __init spectre_v1_apply_mitigation(void); static void __init spectre_v2_select_mitigation(void); +static void __init spectre_v2_update_mitigation(void); +static void __init spectre_v2_apply_mitigation(void); static void __init retbleed_select_mitigation(void); +static void __init retbleed_update_mitigation(void); +static void __init retbleed_apply_mitigation(void); static void __init spectre_v2_user_select_mitigation(void); +static void __init spectre_v2_user_update_mitigation(void); +static void __init spectre_v2_user_apply_mitigation(void); static void __init ssb_select_mitigation(void); +static void __init ssb_apply_mitigation(void); static void __init l1tf_select_mitigation(void); +static void __init l1tf_apply_mitigation(void); static void __init mds_select_mitigation(void); -static void __init md_clear_update_mitigation(void); -static void __init md_clear_select_mitigation(void); +static void __init mds_update_mitigation(void); +static void __init mds_apply_mitigation(void); static void __init taa_select_mitigation(void); +static void __init taa_update_mitigation(void); +static void __init taa_apply_mitigation(void); static void __init mmio_select_mitigation(void); +static void __init mmio_update_mitigation(void); +static void __init mmio_apply_mitigation(void); +static void __init rfds_select_mitigation(void); +static void __init rfds_update_mitigation(void); +static void __init rfds_apply_mitigation(void); static void __init srbds_select_mitigation(void); +static void __init srbds_apply_mitigation(void); static void __init l1d_flush_select_mitigation(void); static void __init srso_select_mitigation(void); +static void __init srso_update_mitigation(void); +static void __init srso_apply_mitigation(void); static void __init gds_select_mitigation(void); +static void __init gds_apply_mitigation(void); +static void __init bhi_select_mitigation(void); +static void __init bhi_update_mitigation(void); +static void __init bhi_apply_mitigation(void); static void __init its_select_mitigation(void); +static void __init its_update_mitigation(void); +static void __init its_apply_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -79,7 +123,7 @@ static void __init set_return_thunk(void *thunk) static void update_spec_ctrl(u64 val) { this_cpu_write(x86_spec_ctrl_current, val); - wrmsrl(MSR_IA32_SPEC_CTRL, val); + wrmsrq(MSR_IA32_SPEC_CTRL, val); } /* @@ -98,7 +142,7 @@ void update_spec_ctrl_cond(u64 val) * forced the update can be delayed until that time. */ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) - wrmsrl(MSR_IA32_SPEC_CTRL, val); + wrmsrq(MSR_IA32_SPEC_CTRL, val); } noinstr u64 spec_ctrl_current(void) @@ -136,9 +180,13 @@ EXPORT_SYMBOL_GPL(mds_idle_clear); */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ -DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); -EXPORT_SYMBOL_GPL(mmio_stale_data_clear); +/* + * Controls CPU Fill buffer clear before VMenter. This is a subset of + * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only + * mitigation is required. + */ +DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); +EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); void __init cpu_select_mitigations(void) { @@ -148,7 +196,7 @@ void __init cpu_select_mitigations(void) * init code as it is not enumerated and depends on the family. */ if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { - rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); /* * Previously running kernel (kexec), may have some controls @@ -163,31 +211,67 @@ void __init cpu_select_mitigations(void) /* Select the proper CPU mitigations before patching alternatives: */ spectre_v1_select_mitigation(); spectre_v2_select_mitigation(); - /* - * retbleed_select_mitigation() relies on the state set by - * spectre_v2_select_mitigation(); specifically it wants to know about - * spectre_v2=ibrs. - */ retbleed_select_mitigation(); - /* - * spectre_v2_user_select_mitigation() relies on the state set by - * retbleed_select_mitigation(); specifically the STIBP selection is - * forced for UNRET or IBPB. - */ spectre_v2_user_select_mitigation(); ssb_select_mitigation(); l1tf_select_mitigation(); - md_clear_select_mitigation(); + mds_select_mitigation(); + taa_select_mitigation(); + mmio_select_mitigation(); + rfds_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); - - /* - * srso_select_mitigation() depends and must run after - * retbleed_select_mitigation(). - */ srso_select_mitigation(); gds_select_mitigation(); its_select_mitigation(); + bhi_select_mitigation(); + + /* + * After mitigations are selected, some may need to update their + * choices. + */ + spectre_v2_update_mitigation(); + /* + * retbleed_update_mitigation() relies on the state set by + * spectre_v2_update_mitigation(); specifically it wants to know about + * spectre_v2=ibrs. + */ + retbleed_update_mitigation(); + /* + * its_update_mitigation() depends on spectre_v2_update_mitigation() + * and retbleed_update_mitigation(). + */ + its_update_mitigation(); + + /* + * spectre_v2_user_update_mitigation() depends on + * retbleed_update_mitigation(), specifically the STIBP + * selection is forced for UNRET or IBPB. + */ + spectre_v2_user_update_mitigation(); + mds_update_mitigation(); + taa_update_mitigation(); + mmio_update_mitigation(); + rfds_update_mitigation(); + bhi_update_mitigation(); + /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ + srso_update_mitigation(); + + spectre_v1_apply_mitigation(); + spectre_v2_apply_mitigation(); + retbleed_apply_mitigation(); + spectre_v2_user_apply_mitigation(); + ssb_apply_mitigation(); + l1tf_apply_mitigation(); + mds_apply_mitigation(); + taa_apply_mitigation(); + mmio_apply_mitigation(); + rfds_apply_mitigation(); + srbds_apply_mitigation(); + srso_apply_mitigation(); + gds_apply_mitigation(); + its_apply_mitigation(); + bhi_apply_mitigation(); } /* @@ -237,9 +321,9 @@ static void x86_amd_ssb_disable(void) u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) - wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); + wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) - wrmsrl(MSR_AMD64_LS_CFG, msrval); + wrmsrq(MSR_AMD64_LS_CFG, msrval); } #undef pr_fmt @@ -290,6 +374,12 @@ enum rfds_mitigations { static enum rfds_mitigations rfds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; +/* + * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing + * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. + */ +static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; + static void __init mds_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { @@ -300,12 +390,34 @@ static void __init mds_select_mitigation(void) if (mds_mitigation == MDS_MITIGATION_AUTO) mds_mitigation = MDS_MITIGATION_FULL; + if (mds_mitigation == MDS_MITIGATION_OFF) + return; + + verw_clear_cpu_buf_mitigation_selected = true; +} + +static void __init mds_update_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) + return; + + /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ + if (verw_clear_cpu_buf_mitigation_selected) + mds_mitigation = MDS_MITIGATION_FULL; + if (mds_mitigation == MDS_MITIGATION_FULL) { if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) mds_mitigation = MDS_MITIGATION_VMWERV; + } - setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + pr_info("%s\n", mds_strings[mds_mitigation]); +} +static void __init mds_apply_mitigation(void) +{ + if (mds_mitigation == MDS_MITIGATION_FULL || + mds_mitigation == MDS_MITIGATION_VMWERV) { + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && (mds_nosmt || cpu_mitigations_auto_nosmt())) cpu_smt_disable(false); @@ -345,6 +457,11 @@ static const char * const taa_strings[] = { [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", }; +static bool __init taa_vulnerable(void) +{ + return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM); +} + static void __init taa_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_TAA)) { @@ -358,48 +475,63 @@ static void __init taa_select_mitigation(void) return; } - if (cpu_mitigations_off()) { + if (cpu_mitigations_off()) taa_mitigation = TAA_MITIGATION_OFF; - return; - } - /* - * TAA mitigation via VERW is turned off if both - * tsx_async_abort=off and mds=off are specified. - */ - if (taa_mitigation == TAA_MITIGATION_OFF && - mds_mitigation == MDS_MITIGATION_OFF) + /* Microcode will be checked in taa_update_mitigation(). */ + if (taa_mitigation == TAA_MITIGATION_AUTO) + taa_mitigation = TAA_MITIGATION_VERW; + + if (taa_mitigation != TAA_MITIGATION_OFF) + verw_clear_cpu_buf_mitigation_selected = true; +} + +static void __init taa_update_mitigation(void) +{ + if (!taa_vulnerable() || cpu_mitigations_off()) return; - if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) + if (verw_clear_cpu_buf_mitigation_selected) taa_mitigation = TAA_MITIGATION_VERW; - else - taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; - /* - * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. - * A microcode update fixes this behavior to clear CPU buffers. It also - * adds support for MSR_IA32_TSX_CTRL which is enumerated by the - * ARCH_CAP_TSX_CTRL_MSR bit. - * - * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode - * update is required. - */ - if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) && - !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) - taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + if (taa_mitigation == TAA_MITIGATION_VERW) { + /* Check if the requisite ucode is available. */ + if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; - /* - * TSX is enabled, select alternate mitigation for TAA which is - * the same as MDS. Enable MDS static branch to clear CPU buffers. - * - * For guests that can't determine whether the correct microcode is - * present on host, enable the mitigation for UCODE_NEEDED as well. - */ - setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + /* + * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. + * A microcode update fixes this behavior to clear CPU buffers. It also + * adds support for MSR_IA32_TSX_CTRL which is enumerated by the + * ARCH_CAP_TSX_CTRL_MSR bit. + * + * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode + * update is required. + */ + if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && + !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) + taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + } - if (taa_nosmt || cpu_mitigations_auto_nosmt()) - cpu_smt_disable(false); + pr_info("%s\n", taa_strings[taa_mitigation]); +} + +static void __init taa_apply_mitigation(void) +{ + if (taa_mitigation == TAA_MITIGATION_VERW || + taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { + /* + * TSX is enabled, select alternate mitigation for TAA which is + * the same as MDS. Enable MDS static branch to clear CPU buffers. + * + * For guests that can't determine whether the correct microcode is + * present on host, enable the mitigation for UCODE_NEEDED as well. + */ + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + + if (taa_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); + } } static int __init tsx_async_abort_parse_cmdline(char *str) @@ -437,31 +569,67 @@ static const char * const mmio_strings[] = { static void __init mmio_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || - boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || cpu_mitigations_off()) { mmio_mitigation = MMIO_MITIGATION_OFF; return; } + /* Microcode will be checked in mmio_update_mitigation(). */ + if (mmio_mitigation == MMIO_MITIGATION_AUTO) + mmio_mitigation = MMIO_MITIGATION_VERW; + if (mmio_mitigation == MMIO_MITIGATION_OFF) return; /* * Enable CPU buffer clear mitigation for host and VMM, if also affected - * by MDS or TAA. Otherwise, enable mitigation for VMM only. + * by MDS or TAA. */ - if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && - boot_cpu_has(X86_FEATURE_RTM))) - setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) + verw_clear_cpu_buf_mitigation_selected = true; +} + +static void __init mmio_update_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off()) + return; + + if (verw_clear_cpu_buf_mitigation_selected) + mmio_mitigation = MMIO_MITIGATION_VERW; + + if (mmio_mitigation == MMIO_MITIGATION_VERW) { + /* + * Check if the system has the right microcode. + * + * CPU Fill buffer clear mitigation is enumerated by either an explicit + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS + * affected systems. + */ + if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && + !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))) + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; + } + + pr_info("%s\n", mmio_strings[mmio_mitigation]); +} + +static void __init mmio_apply_mitigation(void) +{ + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return; /* - * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based - * mitigations, disable KVM-only mitigation in that case. + * Only enable the VMM mitigation if the CPU buffer clear mitigation is + * not being used. */ - if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF)) - static_branch_disable(&mmio_stale_data_clear); - else - static_branch_enable(&mmio_stale_data_clear); + if (verw_clear_cpu_buf_mitigation_selected) { + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + static_branch_disable(&cpu_buf_vm_clear); + } else { + static_branch_enable(&cpu_buf_vm_clear); + } /* * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can @@ -471,21 +639,6 @@ static void __init mmio_select_mitigation(void) if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) static_branch_enable(&mds_idle_clear); - /* - * Check if the system has the right microcode. - * - * CPU Fill buffer clear mitigation is enumerated by either an explicit - * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS - * affected systems. - */ - if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || - (boot_cpu_has(X86_FEATURE_MD_CLEAR) && - boot_cpu_has(X86_FEATURE_FLUSH_L1D) && - !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))) - mmio_mitigation = MMIO_MITIGATION_VERW; - else - mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; - if (mmio_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); } @@ -520,22 +673,48 @@ static const char * const rfds_strings[] = { [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", }; +static inline bool __init verw_clears_cpu_reg_file(void) +{ + return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR); +} + static void __init rfds_select_mitigation(void) { if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) { rfds_mitigation = RFDS_MITIGATION_OFF; return; } + + if (rfds_mitigation == RFDS_MITIGATION_AUTO) + rfds_mitigation = RFDS_MITIGATION_VERW; + if (rfds_mitigation == RFDS_MITIGATION_OFF) return; - if (rfds_mitigation == RFDS_MITIGATION_AUTO) + if (verw_clears_cpu_reg_file()) + verw_clear_cpu_buf_mitigation_selected = true; +} + +static void __init rfds_update_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) + return; + + if (verw_clear_cpu_buf_mitigation_selected) rfds_mitigation = RFDS_MITIGATION_VERW; - if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR) + if (rfds_mitigation == RFDS_MITIGATION_VERW) { + if (!verw_clears_cpu_reg_file()) + rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; + } + + pr_info("%s\n", rfds_strings[rfds_mitigation]); +} + +static void __init rfds_apply_mitigation(void) +{ + if (rfds_mitigation == RFDS_MITIGATION_VERW) setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); - else - rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; } static __init int rfds_parse_cmdline(char *str) @@ -556,76 +735,11 @@ static __init int rfds_parse_cmdline(char *str) early_param("reg_file_data_sampling", rfds_parse_cmdline); #undef pr_fmt -#define pr_fmt(fmt) "" fmt - -static void __init md_clear_update_mitigation(void) -{ - if (cpu_mitigations_off()) - return; - - if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF)) - goto out; - - /* - * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO - * Stale Data mitigation, if necessary. - */ - if (mds_mitigation == MDS_MITIGATION_OFF && - boot_cpu_has_bug(X86_BUG_MDS)) { - mds_mitigation = MDS_MITIGATION_FULL; - mds_select_mitigation(); - } - if (taa_mitigation == TAA_MITIGATION_OFF && - boot_cpu_has_bug(X86_BUG_TAA)) { - taa_mitigation = TAA_MITIGATION_VERW; - taa_select_mitigation(); - } - /* - * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear - * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state. - */ - if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { - mmio_mitigation = MMIO_MITIGATION_VERW; - mmio_select_mitigation(); - } - if (rfds_mitigation == RFDS_MITIGATION_OFF && - boot_cpu_has_bug(X86_BUG_RFDS)) { - rfds_mitigation = RFDS_MITIGATION_VERW; - rfds_select_mitigation(); - } -out: - if (boot_cpu_has_bug(X86_BUG_MDS)) - pr_info("MDS: %s\n", mds_strings[mds_mitigation]); - if (boot_cpu_has_bug(X86_BUG_TAA)) - pr_info("TAA: %s\n", taa_strings[taa_mitigation]); - if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) - pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); - else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) - pr_info("MMIO Stale Data: Unknown: No mitigations\n"); - if (boot_cpu_has_bug(X86_BUG_RFDS)) - pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]); -} - -static void __init md_clear_select_mitigation(void) -{ - mds_select_mitigation(); - taa_select_mitigation(); - mmio_select_mitigation(); - rfds_select_mitigation(); - - /* - * As these mitigations are inter-related and rely on VERW instruction - * to clear the microarchitural buffers, update and print their status - * after mitigation selection is done for each of these vulnerabilities. - */ - md_clear_update_mitigation(); -} - -#undef pr_fmt #define pr_fmt(fmt) "SRBDS: " fmt enum srbds_mitigations { SRBDS_MITIGATION_OFF, + SRBDS_MITIGATION_AUTO, SRBDS_MITIGATION_UCODE_NEEDED, SRBDS_MITIGATION_FULL, SRBDS_MITIGATION_TSX_OFF, @@ -633,7 +747,7 @@ enum srbds_mitigations { }; static enum srbds_mitigations srbds_mitigation __ro_after_init = - IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_FULL : SRBDS_MITIGATION_OFF; + IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF; static const char * const srbds_strings[] = { [SRBDS_MITIGATION_OFF] = "Vulnerable", @@ -665,7 +779,7 @@ void update_srbds_msr(void) if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) return; - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); switch (srbds_mitigation) { case SRBDS_MITIGATION_OFF: @@ -679,13 +793,18 @@ void update_srbds_msr(void) break; } - wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); } static void __init srbds_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) { + srbds_mitigation = SRBDS_MITIGATION_OFF; return; + } + + if (srbds_mitigation == SRBDS_MITIGATION_AUTO) + srbds_mitigation = SRBDS_MITIGATION_FULL; /* * Check to see if this is one of the MDS_NO systems supporting TSX that @@ -699,13 +818,17 @@ static void __init srbds_select_mitigation(void) srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; - else if (cpu_mitigations_off() || srbds_off) + else if (srbds_off) srbds_mitigation = SRBDS_MITIGATION_OFF; - update_srbds_msr(); pr_info("%s\n", srbds_strings[srbds_mitigation]); } +static void __init srbds_apply_mitigation(void) +{ + update_srbds_msr(); +} + static int __init srbds_parse_cmdline(char *str) { if (!str) @@ -752,6 +875,7 @@ early_param("l1d_flush", l1d_flush_parse_cmdline); enum gds_mitigations { GDS_MITIGATION_OFF, + GDS_MITIGATION_AUTO, GDS_MITIGATION_UCODE_NEEDED, GDS_MITIGATION_FORCE, GDS_MITIGATION_FULL, @@ -760,7 +884,7 @@ enum gds_mitigations { }; static enum gds_mitigations gds_mitigation __ro_after_init = - IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_FULL : GDS_MITIGATION_OFF; + IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF; static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", @@ -785,7 +909,7 @@ void update_gds_msr(void) switch (gds_mitigation) { case GDS_MITIGATION_OFF: - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl |= GDS_MITG_DIS; break; case GDS_MITIGATION_FULL_LOCKED: @@ -795,23 +919,24 @@ void update_gds_msr(void) * CPUs. */ case GDS_MITIGATION_FULL: - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl &= ~GDS_MITG_DIS; break; case GDS_MITIGATION_FORCE: case GDS_MITIGATION_UCODE_NEEDED: case GDS_MITIGATION_HYPERVISOR: + case GDS_MITIGATION_AUTO: return; } - wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); /* * Check to make sure that the WRMSR value was not ignored. Writes to * GDS_MITG_DIS will be ignored if this processor is locked but the boot * processor was not. */ - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); } @@ -824,33 +949,28 @@ static void __init gds_select_mitigation(void) if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { gds_mitigation = GDS_MITIGATION_HYPERVISOR; - goto out; + return; } if (cpu_mitigations_off()) gds_mitigation = GDS_MITIGATION_OFF; /* Will verify below that mitigation _can_ be disabled */ + if (gds_mitigation == GDS_MITIGATION_AUTO) + gds_mitigation = GDS_MITIGATION_FULL; + /* No microcode */ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { - if (gds_mitigation == GDS_MITIGATION_FORCE) { - /* - * This only needs to be done on the boot CPU so do it - * here rather than in update_gds_msr() - */ - setup_clear_cpu_cap(X86_FEATURE_AVX); - pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); - } else { + if (gds_mitigation != GDS_MITIGATION_FORCE) gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; - } - goto out; + return; } /* Microcode has mitigation, use it */ if (gds_mitigation == GDS_MITIGATION_FORCE) gds_mitigation = GDS_MITIGATION_FULL; - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); if (mcu_ctrl & GDS_MITG_LOCKED) { if (gds_mitigation == GDS_MITIGATION_OFF) pr_warn("Mitigation locked. Disable failed.\n"); @@ -864,9 +984,25 @@ static void __init gds_select_mitigation(void) */ gds_mitigation = GDS_MITIGATION_FULL_LOCKED; } +} + +static void __init gds_apply_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return; + + /* Microcode is present */ + if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) + update_gds_msr(); + else if (gds_mitigation == GDS_MITIGATION_FORCE) { + /* + * This only needs to be done on the boot CPU so do it + * here rather than in update_gds_msr() + */ + setup_clear_cpu_cap(X86_FEATURE_AVX); + pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); + } - update_gds_msr(); -out: pr_info("%s\n", gds_strings[gds_mitigation]); } @@ -927,10 +1063,14 @@ static bool smap_works_speculatively(void) static void __init spectre_v1_select_mitigation(void) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; +} + +static void __init spectre_v1_apply_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) return; - } if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { /* @@ -983,8 +1123,20 @@ enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; #undef pr_fmt #define pr_fmt(fmt) "RETBleed: " fmt +enum its_mitigation { + ITS_MITIGATION_OFF, + ITS_MITIGATION_AUTO, + ITS_MITIGATION_VMEXIT_ONLY, + ITS_MITIGATION_ALIGNED_THUNKS, + ITS_MITIGATION_RETPOLINE_STUFF, +}; + +static enum its_mitigation its_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF; + enum retbleed_mitigation { RETBLEED_MITIGATION_NONE, + RETBLEED_MITIGATION_AUTO, RETBLEED_MITIGATION_UNRET, RETBLEED_MITIGATION_IBPB, RETBLEED_MITIGATION_IBRS, @@ -992,14 +1144,6 @@ enum retbleed_mitigation { RETBLEED_MITIGATION_STUFF, }; -enum retbleed_mitigation_cmd { - RETBLEED_CMD_OFF, - RETBLEED_CMD_AUTO, - RETBLEED_CMD_UNRET, - RETBLEED_CMD_IBPB, - RETBLEED_CMD_STUFF, -}; - static const char * const retbleed_strings[] = { [RETBLEED_MITIGATION_NONE] = "Vulnerable", [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", @@ -1010,9 +1154,7 @@ static const char * const retbleed_strings[] = { }; static enum retbleed_mitigation retbleed_mitigation __ro_after_init = - RETBLEED_MITIGATION_NONE; -static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = - IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_CMD_AUTO : RETBLEED_CMD_OFF; + IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE; static int __ro_after_init retbleed_nosmt = false; @@ -1029,15 +1171,15 @@ static int __init retbleed_parse_cmdline(char *str) } if (!strcmp(str, "off")) { - retbleed_cmd = RETBLEED_CMD_OFF; + retbleed_mitigation = RETBLEED_MITIGATION_NONE; } else if (!strcmp(str, "auto")) { - retbleed_cmd = RETBLEED_CMD_AUTO; + retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } else if (!strcmp(str, "unret")) { - retbleed_cmd = RETBLEED_CMD_UNRET; + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; } else if (!strcmp(str, "ibpb")) { - retbleed_cmd = RETBLEED_CMD_IBPB; + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; } else if (!strcmp(str, "stuff")) { - retbleed_cmd = RETBLEED_CMD_STUFF; + retbleed_mitigation = RETBLEED_MITIGATION_STUFF; } else if (!strcmp(str, "nosmt")) { retbleed_nosmt = true; } else if (!strcmp(str, "force")) { @@ -1058,72 +1200,117 @@ early_param("retbleed", retbleed_parse_cmdline); static void __init retbleed_select_mitigation(void) { - bool mitigate_smt = false; - - if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) - return; - - switch (retbleed_cmd) { - case RETBLEED_CMD_OFF: + if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) { + retbleed_mitigation = RETBLEED_MITIGATION_NONE; return; + } - case RETBLEED_CMD_UNRET: - if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { - retbleed_mitigation = RETBLEED_MITIGATION_UNRET; - } else { + switch (retbleed_mitigation) { + case RETBLEED_MITIGATION_UNRET: + if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { + retbleed_mitigation = RETBLEED_MITIGATION_AUTO; pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n"); - goto do_cmd_auto; } break; - - case RETBLEED_CMD_IBPB: + case RETBLEED_MITIGATION_IBPB: if (!boot_cpu_has(X86_FEATURE_IBPB)) { pr_err("WARNING: CPU does not support IBPB.\n"); - goto do_cmd_auto; - } else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { - retbleed_mitigation = RETBLEED_MITIGATION_IBPB; - } else { + retbleed_mitigation = RETBLEED_MITIGATION_AUTO; + } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); - goto do_cmd_auto; + retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } break; + case RETBLEED_MITIGATION_STUFF: + if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { + pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); + retbleed_mitigation = RETBLEED_MITIGATION_AUTO; + } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { + pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n"); + retbleed_mitigation = RETBLEED_MITIGATION_AUTO; + } + break; + default: + break; + } - case RETBLEED_CMD_STUFF: - if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) && - spectre_v2_enabled == SPECTRE_V2_RETPOLINE) { - retbleed_mitigation = RETBLEED_MITIGATION_STUFF; + if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) + return; + /* Intel mitigation selected in retbleed_update_mitigation() */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; + else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && + boot_cpu_has(X86_FEATURE_IBPB)) + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; + else + retbleed_mitigation = RETBLEED_MITIGATION_NONE; + } +} + +static void __init retbleed_update_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) + return; + + if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) + goto out; + + /* + * retbleed=stuff is only allowed on Intel. If stuffing can't be used + * then a different mitigation will be selected below. + * + * its=stuff will also attempt to enable stuffing. + */ + if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF || + its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) { + if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) { + pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); + retbleed_mitigation = RETBLEED_MITIGATION_AUTO; } else { - if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) - pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); - else - pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); + if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) + pr_info("Retbleed mitigation updated to stuffing\n"); - goto do_cmd_auto; + retbleed_mitigation = RETBLEED_MITIGATION_STUFF; } - break; - -do_cmd_auto: - case RETBLEED_CMD_AUTO: - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) - retbleed_mitigation = RETBLEED_MITIGATION_UNRET; - else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && - boot_cpu_has(X86_FEATURE_IBPB)) - retbleed_mitigation = RETBLEED_MITIGATION_IBPB; + } + /* + * Let IBRS trump all on Intel without affecting the effects of the + * retbleed= cmdline option except for call depth based stuffing + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_IBRS: + retbleed_mitigation = RETBLEED_MITIGATION_IBRS; + break; + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; + break; + default: + if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) + pr_err(RETBLEED_INTEL_MSG); } + /* If nothing has set the mitigation yet, default to NONE. */ + if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO) + retbleed_mitigation = RETBLEED_MITIGATION_NONE; + } +out: + pr_info("%s\n", retbleed_strings[retbleed_mitigation]); +} - /* - * The Intel mitigation (IBRS or eIBRS) was already selected in - * spectre_v2_select_mitigation(). 'retbleed_mitigation' will - * be set accordingly below. - */ - break; - } +static void __init retbleed_apply_mitigation(void) +{ + bool mitigate_smt = false; switch (retbleed_mitigation) { + case RETBLEED_MITIGATION_NONE: + return; + case RETBLEED_MITIGATION_UNRET: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); @@ -1173,47 +1360,11 @@ do_cmd_auto: if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && (retbleed_nosmt || cpu_mitigations_auto_nosmt())) cpu_smt_disable(false); - - /* - * Let IBRS trump all on Intel without affecting the effects of the - * retbleed= cmdline option except for call depth based stuffing - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { - switch (spectre_v2_enabled) { - case SPECTRE_V2_IBRS: - retbleed_mitigation = RETBLEED_MITIGATION_IBRS; - break; - case SPECTRE_V2_EIBRS: - case SPECTRE_V2_EIBRS_RETPOLINE: - case SPECTRE_V2_EIBRS_LFENCE: - retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; - break; - default: - if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) - pr_err(RETBLEED_INTEL_MSG); - } - } - - pr_info("%s\n", retbleed_strings[retbleed_mitigation]); } #undef pr_fmt #define pr_fmt(fmt) "ITS: " fmt -enum its_mitigation_cmd { - ITS_CMD_OFF, - ITS_CMD_ON, - ITS_CMD_VMEXIT, - ITS_CMD_RSB_STUFF, -}; - -enum its_mitigation { - ITS_MITIGATION_OFF, - ITS_MITIGATION_VMEXIT_ONLY, - ITS_MITIGATION_ALIGNED_THUNKS, - ITS_MITIGATION_RETPOLINE_STUFF, -}; - static const char * const its_strings[] = { [ITS_MITIGATION_OFF] = "Vulnerable", [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", @@ -1221,11 +1372,6 @@ static const char * const its_strings[] = { [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", }; -static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS; - -static enum its_mitigation_cmd its_cmd __ro_after_init = - IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF; - static int __init its_parse_cmdline(char *str) { if (!str) @@ -1237,16 +1383,16 @@ static int __init its_parse_cmdline(char *str) } if (!strcmp(str, "off")) { - its_cmd = ITS_CMD_OFF; + its_mitigation = ITS_MITIGATION_OFF; } else if (!strcmp(str, "on")) { - its_cmd = ITS_CMD_ON; + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; } else if (!strcmp(str, "force")) { - its_cmd = ITS_CMD_ON; + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; setup_force_cpu_bug(X86_BUG_ITS); } else if (!strcmp(str, "vmexit")) { - its_cmd = ITS_CMD_VMEXIT; + its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; } else if (!strcmp(str, "stuff")) { - its_cmd = ITS_CMD_RSB_STUFF; + its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; } else { pr_err("Ignoring unknown indirect_target_selection option (%s).", str); } @@ -1257,85 +1403,90 @@ early_param("indirect_target_selection", its_parse_cmdline); static void __init its_select_mitigation(void) { - enum its_mitigation_cmd cmd = its_cmd; - if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) { its_mitigation = ITS_MITIGATION_OFF; return; } - /* Retpoline+CDT mitigates ITS, bail out */ - if (boot_cpu_has(X86_FEATURE_RETPOLINE) && - boot_cpu_has(X86_FEATURE_CALL_DEPTH)) { - its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; - goto out; - } + if (its_mitigation == ITS_MITIGATION_AUTO) + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; + + if (its_mitigation == ITS_MITIGATION_OFF) + return; - /* Exit early to avoid irrelevant warnings */ - if (cmd == ITS_CMD_OFF) { - its_mitigation = ITS_MITIGATION_OFF; - goto out; - } - if (spectre_v2_enabled == SPECTRE_V2_NONE) { - pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); - its_mitigation = ITS_MITIGATION_OFF; - goto out; - } if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) { pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); its_mitigation = ITS_MITIGATION_OFF; - goto out; + return; } + if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); its_mitigation = ITS_MITIGATION_OFF; - goto out; - } - if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { - pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); - its_mitigation = ITS_MITIGATION_OFF; - goto out; + return; } - if (cmd == ITS_CMD_RSB_STUFF && - (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) { + if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && + !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { pr_err("RSB stuff mitigation not supported, using default\n"); - cmd = ITS_CMD_ON; + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; } - switch (cmd) { - case ITS_CMD_OFF: + if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY && + !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; +} + +static void __init its_update_mitigation(void) +{ + if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) + return; + + switch (spectre_v2_enabled) { + case SPECTRE_V2_NONE: + pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); its_mitigation = ITS_MITIGATION_OFF; break; - case ITS_CMD_VMEXIT: - if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) { - its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; - goto out; - } - fallthrough; - case ITS_CMD_ON: - its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; - if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) - setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); - setup_force_cpu_cap(X86_FEATURE_RETHUNK); - set_return_thunk(its_return_thunk); + case SPECTRE_V2_RETPOLINE: + /* Retpoline+CDT mitigates ITS */ + if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) + its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; break; - case ITS_CMD_RSB_STUFF: - its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; - setup_force_cpu_cap(X86_FEATURE_RETHUNK); - setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); - set_return_thunk(call_depth_return_thunk); - if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) { - retbleed_mitigation = RETBLEED_MITIGATION_STUFF; - pr_info("Retbleed mitigation updated to stuffing\n"); - } + case SPECTRE_V2_LFENCE: + case SPECTRE_V2_EIBRS_LFENCE: + pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); + its_mitigation = ITS_MITIGATION_OFF; + break; + default: break; } -out: + + /* + * retbleed_update_mitigation() will try to do stuffing if its=stuff. + * If it can't, such as if spectre_v2!=retpoline, then fall back to + * aligned thunks. + */ + if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && + retbleed_mitigation != RETBLEED_MITIGATION_STUFF) + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; + pr_info("%s\n", its_strings[its_mitigation]); } +static void __init its_apply_mitigation(void) +{ + /* its=stuff forces retbleed stuffing and is enabled there. */ + if (its_mitigation != ITS_MITIGATION_ALIGNED_THUNKS) + return; + + if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) + setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); + + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + set_return_thunk(its_return_thunk); +} + #undef pr_fmt #define pr_fmt(fmt) "Spectre V2 : " fmt @@ -1413,6 +1564,8 @@ enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_IBRS, }; +static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO; + enum spectre_v2_user_cmd { SPECTRE_V2_USER_CMD_NONE, SPECTRE_V2_USER_CMD_AUTO, @@ -1451,31 +1604,18 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure) pr_info("spectre_v2_user=%s forced on command line.\n", reason); } -static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; - -static enum spectre_v2_user_cmd __init -spectre_v2_parse_user_cmdline(void) +static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) { - enum spectre_v2_user_cmd mode; char arg[20]; int ret, i; - mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? - SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE; - - switch (spectre_v2_cmd) { - case SPECTRE_V2_CMD_NONE: + if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) return SPECTRE_V2_USER_CMD_NONE; - case SPECTRE_V2_CMD_FORCE: - return SPECTRE_V2_USER_CMD_FORCE; - default: - break; - } ret = cmdline_find_option(boot_command_line, "spectre_v2_user", arg, sizeof(arg)); if (ret < 0) - return mode; + return SPECTRE_V2_USER_CMD_AUTO; for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { if (match_option(arg, ret, v2_user_options[i].option)) { @@ -1486,7 +1626,7 @@ spectre_v2_parse_user_cmdline(void) } pr_err("Unknown user space protection option (%s). Switching to default\n", arg); - return mode; + return SPECTRE_V2_USER_CMD_AUTO; } static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) @@ -1494,60 +1634,72 @@ static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; } -static void __init -spectre_v2_user_select_mitigation(void) +static void __init spectre_v2_user_select_mitigation(void) { - enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; - enum spectre_v2_user_cmd cmd; - if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) return; - cmd = spectre_v2_parse_user_cmdline(); - switch (cmd) { + switch (spectre_v2_parse_user_cmdline()) { case SPECTRE_V2_USER_CMD_NONE: - goto set_mode; + return; case SPECTRE_V2_USER_CMD_FORCE: - mode = SPECTRE_V2_USER_STRICT; + spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; + spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; break; case SPECTRE_V2_USER_CMD_AUTO: case SPECTRE_V2_USER_CMD_PRCTL: + spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; + spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; + break; case SPECTRE_V2_USER_CMD_PRCTL_IBPB: - mode = SPECTRE_V2_USER_PRCTL; + spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; + spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_SECCOMP: + if (IS_ENABLED(CONFIG_SECCOMP)) + spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; + else + spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; + spectre_v2_user_stibp = spectre_v2_user_ibpb; + break; case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; if (IS_ENABLED(CONFIG_SECCOMP)) - mode = SPECTRE_V2_USER_SECCOMP; + spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; else - mode = SPECTRE_V2_USER_PRCTL; + spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; } - /* Initialize Indirect Branch Prediction Barrier */ - if (boot_cpu_has(X86_FEATURE_IBPB)) { - static_branch_enable(&switch_vcpu_ibpb); + /* + * At this point, an STIBP mode other than "off" has been set. + * If STIBP support is not being forced, check if STIBP always-on + * is preferred. + */ + if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) && + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) + spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; - spectre_v2_user_ibpb = mode; - switch (cmd) { - case SPECTRE_V2_USER_CMD_NONE: - break; - case SPECTRE_V2_USER_CMD_FORCE: - case SPECTRE_V2_USER_CMD_PRCTL_IBPB: - case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: - static_branch_enable(&switch_mm_always_ibpb); - spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; - break; - case SPECTRE_V2_USER_CMD_PRCTL: - case SPECTRE_V2_USER_CMD_AUTO: - case SPECTRE_V2_USER_CMD_SECCOMP: - static_branch_enable(&switch_mm_cond_ibpb); - break; - } + if (!boot_cpu_has(X86_FEATURE_IBPB)) + spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; - pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", - static_key_enabled(&switch_mm_always_ibpb) ? - "always-on" : "conditional"); + if (!boot_cpu_has(X86_FEATURE_STIBP)) + spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; +} + +static void __init spectre_v2_user_update_mitigation(void) +{ + if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) + return; + + /* The spectre_v2 cmd line can override spectre_v2_user options */ + if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { + spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; + spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; + } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { + spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; + spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; } /* @@ -1565,30 +1717,44 @@ spectre_v2_user_select_mitigation(void) if (!boot_cpu_has(X86_FEATURE_STIBP) || !cpu_smt_possible() || (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && - !boot_cpu_has(X86_FEATURE_AUTOIBRS))) + !boot_cpu_has(X86_FEATURE_AUTOIBRS))) { + spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; return; + } - /* - * At this point, an STIBP mode other than "off" has been set. - * If STIBP support is not being forced, check if STIBP always-on - * is preferred. - */ - if (mode != SPECTRE_V2_USER_STRICT && - boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) - mode = SPECTRE_V2_USER_STRICT_PREFERRED; - - if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || - retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { - if (mode != SPECTRE_V2_USER_STRICT && - mode != SPECTRE_V2_USER_STRICT_PREFERRED) + if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && + (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || + retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) { + if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && + spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED) pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); - mode = SPECTRE_V2_USER_STRICT_PREFERRED; + spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; } + pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]); +} - spectre_v2_user_stibp = mode; +static void __init spectre_v2_user_apply_mitigation(void) +{ + /* Initialize Indirect Branch Prediction Barrier */ + if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { + static_branch_enable(&switch_vcpu_ibpb); + + switch (spectre_v2_user_ibpb) { + case SPECTRE_V2_USER_STRICT: + static_branch_enable(&switch_mm_always_ibpb); + break; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + static_branch_enable(&switch_mm_cond_ibpb); + break; + default: + break; + } -set_mode: - pr_info("%s\n", spectre_v2_user_strings[mode]); + pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", + static_key_enabled(&switch_mm_always_ibpb) ? + "always-on" : "conditional"); + } } static const char * const spectre_v2_strings[] = { @@ -1808,12 +1974,13 @@ static bool __init spec_ctrl_bhi_dis(void) enum bhi_mitigations { BHI_MITIGATION_OFF, + BHI_MITIGATION_AUTO, BHI_MITIGATION_ON, BHI_MITIGATION_VMEXIT_ONLY, }; static enum bhi_mitigations bhi_mitigation __ro_after_init = - IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF; + IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF; static int __init spectre_bhi_parse_cmdline(char *str) { @@ -1835,6 +2002,25 @@ early_param("spectre_bhi", spectre_bhi_parse_cmdline); static void __init bhi_select_mitigation(void) { + if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off()) + bhi_mitigation = BHI_MITIGATION_OFF; + + if (bhi_mitigation == BHI_MITIGATION_AUTO) + bhi_mitigation = BHI_MITIGATION_ON; +} + +static void __init bhi_update_mitigation(void) +{ + if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) + bhi_mitigation = BHI_MITIGATION_OFF; + + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && + spectre_v2_cmd == SPECTRE_V2_CMD_AUTO) + bhi_mitigation = BHI_MITIGATION_OFF; +} + +static void __init bhi_apply_mitigation(void) +{ if (bhi_mitigation == BHI_MITIGATION_OFF) return; @@ -1855,86 +2041,92 @@ static void __init bhi_select_mitigation(void) if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n"); - setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT); + setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); return; } pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n"); setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); - setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT); + setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); } static void __init spectre_v2_select_mitigation(void) { - enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); - enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; + spectre_v2_cmd = spectre_v2_parse_cmdline(); - /* - * If the CPU is not affected and the command line mode is NONE or AUTO - * then nothing to do. - */ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && - (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) + (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)) return; - switch (cmd) { + switch (spectre_v2_cmd) { case SPECTRE_V2_CMD_NONE: return; case SPECTRE_V2_CMD_FORCE: case SPECTRE_V2_CMD_AUTO: if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { - mode = SPECTRE_V2_EIBRS; - break; - } - - if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && - boot_cpu_has_bug(X86_BUG_RETBLEED) && - retbleed_cmd != RETBLEED_CMD_OFF && - retbleed_cmd != RETBLEED_CMD_STUFF && - boot_cpu_has(X86_FEATURE_IBRS) && - boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { - mode = SPECTRE_V2_IBRS; + spectre_v2_enabled = SPECTRE_V2_EIBRS; break; } - mode = spectre_v2_select_retpoline(); + spectre_v2_enabled = spectre_v2_select_retpoline(); break; case SPECTRE_V2_CMD_RETPOLINE_LFENCE: pr_err(SPECTRE_V2_LFENCE_MSG); - mode = SPECTRE_V2_LFENCE; + spectre_v2_enabled = SPECTRE_V2_LFENCE; break; case SPECTRE_V2_CMD_RETPOLINE_GENERIC: - mode = SPECTRE_V2_RETPOLINE; + spectre_v2_enabled = SPECTRE_V2_RETPOLINE; break; case SPECTRE_V2_CMD_RETPOLINE: - mode = spectre_v2_select_retpoline(); + spectre_v2_enabled = spectre_v2_select_retpoline(); break; case SPECTRE_V2_CMD_IBRS: - mode = SPECTRE_V2_IBRS; + spectre_v2_enabled = SPECTRE_V2_IBRS; break; case SPECTRE_V2_CMD_EIBRS: - mode = SPECTRE_V2_EIBRS; + spectre_v2_enabled = SPECTRE_V2_EIBRS; break; case SPECTRE_V2_CMD_EIBRS_LFENCE: - mode = SPECTRE_V2_EIBRS_LFENCE; + spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE; break; case SPECTRE_V2_CMD_EIBRS_RETPOLINE: - mode = SPECTRE_V2_EIBRS_RETPOLINE; + spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE; break; } +} + +static void __init spectre_v2_update_mitigation(void) +{ + if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO && + !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) { + if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && + boot_cpu_has_bug(X86_BUG_RETBLEED) && + retbleed_mitigation != RETBLEED_MITIGATION_NONE && + retbleed_mitigation != RETBLEED_MITIGATION_STUFF && + boot_cpu_has(X86_FEATURE_IBRS) && + boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + spectre_v2_enabled = SPECTRE_V2_IBRS; + } + } - if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off()) + pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); +} + +static void __init spectre_v2_apply_mitigation(void) +{ + if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); - if (spectre_v2_in_ibrs_mode(mode)) { + if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); } else { @@ -1943,8 +2135,10 @@ static void __init spectre_v2_select_mitigation(void) } } - switch (mode) { + switch (spectre_v2_enabled) { case SPECTRE_V2_NONE: + return; + case SPECTRE_V2_EIBRS: break; @@ -1970,18 +2164,12 @@ static void __init spectre_v2_select_mitigation(void) * JMPs gets protection against BHI and Intramode-BTI, but RET * prediction from a non-RSB predictor is still a risk. */ - if (mode == SPECTRE_V2_EIBRS_LFENCE || - mode == SPECTRE_V2_EIBRS_RETPOLINE || - mode == SPECTRE_V2_RETPOLINE) + if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || + spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE || + spectre_v2_enabled == SPECTRE_V2_RETPOLINE) spec_ctrl_disable_kernel_rrsba(); - if (boot_cpu_has(X86_BUG_BHI)) - bhi_select_mitigation(); - - spectre_v2_enabled = mode; - pr_info("%s\n", spectre_v2_strings[mode]); - - spectre_v2_select_rsb_mitigation(mode); + spectre_v2_select_rsb_mitigation(spectre_v2_enabled); /* * Retpoline protects the kernel, but doesn't protect firmware. IBRS @@ -1989,28 +2177,26 @@ static void __init spectre_v2_select_mitigation(void) * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't * otherwise enabled. * - * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because - * the user might select retpoline on the kernel command line and if - * the CPU supports Enhanced IBRS, kernel might un-intentionally not - * enable IBRS around firmware calls. + * Use "spectre_v2_enabled" to check Enhanced IBRS instead of + * boot_cpu_has(), because the user might select retpoline on the kernel + * command line and if the CPU supports Enhanced IBRS, kernel might + * un-intentionally not enable IBRS around firmware calls. */ if (boot_cpu_has_bug(X86_BUG_RETBLEED) && boot_cpu_has(X86_FEATURE_IBPB) && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { - if (retbleed_cmd != RETBLEED_CMD_IBPB) { + if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); pr_info("Enabling Speculation Barrier for firmware calls\n"); } - } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { + } else if (boot_cpu_has(X86_FEATURE_IBRS) && + !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } - - /* Set up IBPB and STIBP depending on the general spectre V2 command */ - spectre_v2_cmd = cmd; } static void update_stibp_msr(void * __unused) @@ -2199,19 +2385,18 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) return cmd; } -static enum ssb_mitigation __init __ssb_select_mitigation(void) +static void __init ssb_select_mitigation(void) { - enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; enum ssb_mitigation_cmd cmd; if (!boot_cpu_has(X86_FEATURE_SSBD)) - return mode; + goto out; cmd = ssb_parse_cmdline(); if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && (cmd == SPEC_STORE_BYPASS_CMD_NONE || cmd == SPEC_STORE_BYPASS_CMD_AUTO)) - return mode; + return; switch (cmd) { case SPEC_STORE_BYPASS_CMD_SECCOMP: @@ -2220,28 +2405,35 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) * enabled. */ if (IS_ENABLED(CONFIG_SECCOMP)) - mode = SPEC_STORE_BYPASS_SECCOMP; + ssb_mode = SPEC_STORE_BYPASS_SECCOMP; else - mode = SPEC_STORE_BYPASS_PRCTL; + ssb_mode = SPEC_STORE_BYPASS_PRCTL; break; case SPEC_STORE_BYPASS_CMD_ON: - mode = SPEC_STORE_BYPASS_DISABLE; + ssb_mode = SPEC_STORE_BYPASS_DISABLE; break; case SPEC_STORE_BYPASS_CMD_AUTO: case SPEC_STORE_BYPASS_CMD_PRCTL: - mode = SPEC_STORE_BYPASS_PRCTL; + ssb_mode = SPEC_STORE_BYPASS_PRCTL; break; case SPEC_STORE_BYPASS_CMD_NONE: break; } +out: + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + pr_info("%s\n", ssb_strings[ssb_mode]); +} + +static void __init ssb_apply_mitigation(void) +{ /* * We have three CPU feature flags that are in play here: * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation */ - if (mode == SPEC_STORE_BYPASS_DISABLE) { + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); /* * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may @@ -2255,16 +2447,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) update_spec_ctrl(x86_spec_ctrl_base); } } - - return mode; -} - -static void ssb_select_mitigation(void) -{ - ssb_mode = __ssb_select_mitigation(); - - if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) - pr_info("%s\n", ssb_strings[ssb_mode]); } #undef pr_fmt @@ -2520,7 +2702,7 @@ EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); /* Default mitigation for L1TF-affected CPUs */ enum l1tf_mitigations l1tf_mitigation __ro_after_init = - IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_FLUSH : L1TF_MITIGATION_OFF; + IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; #if IS_ENABLED(CONFIG_KVM_INTEL) EXPORT_SYMBOL_GPL(l1tf_mitigation); #endif @@ -2568,22 +2750,33 @@ static void override_cache_bits(struct cpuinfo_x86 *c) static void __init l1tf_select_mitigation(void) { + if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) { + l1tf_mitigation = L1TF_MITIGATION_OFF; + return; + } + + if (l1tf_mitigation == L1TF_MITIGATION_AUTO) { + if (cpu_mitigations_auto_nosmt()) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; + else + l1tf_mitigation = L1TF_MITIGATION_FLUSH; + } +} + +static void __init l1tf_apply_mitigation(void) +{ u64 half_pa; if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; - if (cpu_mitigations_off()) - l1tf_mitigation = L1TF_MITIGATION_OFF; - else if (cpu_mitigations_auto_nosmt()) - l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; - override_cache_bits(&boot_cpu_data); switch (l1tf_mitigation) { case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_FLUSH_NOWARN: case L1TF_MITIGATION_FLUSH: + case L1TF_MITIGATION_AUTO: break; case L1TF_MITIGATION_FLUSH_NOSMT: case L1TF_MITIGATION_FULL: @@ -2643,6 +2836,7 @@ early_param("l1tf", l1tf_cmdline); enum srso_mitigation { SRSO_MITIGATION_NONE, + SRSO_MITIGATION_AUTO, SRSO_MITIGATION_UCODE_NEEDED, SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, SRSO_MITIGATION_MICROCODE, @@ -2652,14 +2846,6 @@ enum srso_mitigation { SRSO_MITIGATION_BP_SPEC_REDUCE, }; -enum srso_mitigation_cmd { - SRSO_CMD_OFF, - SRSO_CMD_MICROCODE, - SRSO_CMD_SAFE_RET, - SRSO_CMD_IBPB, - SRSO_CMD_IBPB_ON_VMEXIT, -}; - static const char * const srso_strings[] = { [SRSO_MITIGATION_NONE] = "Vulnerable", [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", @@ -2671,8 +2857,7 @@ static const char * const srso_strings[] = { [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" }; -static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; -static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; +static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; static int __init srso_parse_cmdline(char *str) { @@ -2680,15 +2865,15 @@ static int __init srso_parse_cmdline(char *str) return -EINVAL; if (!strcmp(str, "off")) - srso_cmd = SRSO_CMD_OFF; + srso_mitigation = SRSO_MITIGATION_NONE; else if (!strcmp(str, "microcode")) - srso_cmd = SRSO_CMD_MICROCODE; + srso_mitigation = SRSO_MITIGATION_MICROCODE; else if (!strcmp(str, "safe-ret")) - srso_cmd = SRSO_CMD_SAFE_RET; + srso_mitigation = SRSO_MITIGATION_SAFE_RET; else if (!strcmp(str, "ibpb")) - srso_cmd = SRSO_CMD_IBPB; + srso_mitigation = SRSO_MITIGATION_IBPB; else if (!strcmp(str, "ibpb-vmexit")) - srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; + srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; else pr_err("Ignoring unknown SRSO option (%s).", str); @@ -2700,132 +2885,85 @@ early_param("spec_rstack_overflow", srso_parse_cmdline); static void __init srso_select_mitigation(void) { - bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); + bool has_microcode; - if (!boot_cpu_has_bug(X86_BUG_SRSO) || - cpu_mitigations_off() || - srso_cmd == SRSO_CMD_OFF) { - if (boot_cpu_has(X86_FEATURE_SBPB)) - x86_pred_cmd = PRED_CMD_SBPB; - goto out; - } + if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) + srso_mitigation = SRSO_MITIGATION_NONE; + + if (srso_mitigation == SRSO_MITIGATION_NONE) + return; + + if (srso_mitigation == SRSO_MITIGATION_AUTO) + srso_mitigation = SRSO_MITIGATION_SAFE_RET; + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); if (has_microcode) { /* * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. - * - * Zen1/2 don't have SBPB, no need to try to enable it here. */ if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); - goto out; - } - - if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { - srso_mitigation = SRSO_MITIGATION_IBPB; - goto out; + srso_mitigation = SRSO_MITIGATION_NONE; + return; } } else { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); - - /* may be overwritten by SRSO_CMD_SAFE_RET below */ - srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; } - switch (srso_cmd) { - case SRSO_CMD_MICROCODE: - if (has_microcode) { - srso_mitigation = SRSO_MITIGATION_MICROCODE; - pr_warn(SRSO_NOTICE); - } - break; - - case SRSO_CMD_SAFE_RET: - if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) + switch (srso_mitigation) { + case SRSO_MITIGATION_SAFE_RET: + if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { + srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; goto ibpb_on_vmexit; + } - if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) { - /* - * Enable the return thunk for generated code - * like ftrace, static_call, etc. - */ - setup_force_cpu_cap(X86_FEATURE_RETHUNK); - setup_force_cpu_cap(X86_FEATURE_UNRET); - - if (boot_cpu_data.x86 == 0x19) { - setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - set_return_thunk(srso_alias_return_thunk); - } else { - setup_force_cpu_cap(X86_FEATURE_SRSO); - set_return_thunk(srso_return_thunk); - } - if (has_microcode) - srso_mitigation = SRSO_MITIGATION_SAFE_RET; - else - srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; - } else { + if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); + srso_mitigation = SRSO_MITIGATION_NONE; } - break; - case SRSO_CMD_IBPB: - if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { - if (has_microcode) { - setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); - setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); - srso_mitigation = SRSO_MITIGATION_IBPB; - - /* - * IBPB on entry already obviates the need for - * software-based untraining so clear those in case some - * other mitigation like Retbleed has selected them. - */ - setup_clear_cpu_cap(X86_FEATURE_UNRET); - setup_clear_cpu_cap(X86_FEATURE_RETHUNK); - - /* - * There is no need for RSB filling: write_ibpb() ensures - * all predictions, including the RSB, are invalidated, - * regardless of IBPB implementation. - */ - setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); - } - } else { - pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); - } + if (!has_microcode) + srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; break; - ibpb_on_vmexit: - case SRSO_CMD_IBPB_ON_VMEXIT: + case SRSO_MITIGATION_IBPB_ON_VMEXIT: if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; break; } - - if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { - if (has_microcode) { - setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); - srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; - - /* - * There is no need for RSB filling: write_ibpb() ensures - * all predictions, including the RSB, are invalidated, - * regardless of IBPB implementation. - */ - setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); - } - } else { + fallthrough; + case SRSO_MITIGATION_IBPB: + if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); + srso_mitigation = SRSO_MITIGATION_NONE; } + + if (!has_microcode) + srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; break; default: break; } +} -out: +static void __init srso_update_mitigation(void) +{ + /* If retbleed is using IBPB, that works for SRSO as well */ + if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && + boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) + srso_mitigation = SRSO_MITIGATION_IBPB; + + if (boot_cpu_has_bug(X86_BUG_SRSO) && + !cpu_mitigations_off() && + !boot_cpu_has(X86_FEATURE_SRSO_NO)) + pr_info("%s\n", srso_strings[srso_mitigation]); +} + +static void __init srso_apply_mitigation(void) +{ /* * Clear the feature flag if this mitigation is not selected as that * feature flag controls the BpSpecReduce MSR bit toggling in KVM. @@ -2833,8 +2971,52 @@ out: if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); - if (srso_mitigation != SRSO_MITIGATION_NONE) - pr_info("%s\n", srso_strings[srso_mitigation]); + if (srso_mitigation == SRSO_MITIGATION_NONE) { + if (boot_cpu_has(X86_FEATURE_SBPB)) + x86_pred_cmd = PRED_CMD_SBPB; + return; + } + + switch (srso_mitigation) { + case SRSO_MITIGATION_SAFE_RET: + case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: + /* + * Enable the return thunk for generated code + * like ftrace, static_call, etc. + */ + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); + + if (boot_cpu_data.x86 == 0x19) { + setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); + set_return_thunk(srso_alias_return_thunk); + } else { + setup_force_cpu_cap(X86_FEATURE_SRSO); + set_return_thunk(srso_return_thunk); + } + break; + case SRSO_MITIGATION_IBPB: + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + /* + * IBPB on entry already obviates the need for + * software-based untraining so clear those in case some + * other mitigation like Retbleed has selected them. + */ + setup_clear_cpu_cap(X86_FEATURE_UNRET); + setup_clear_cpu_cap(X86_FEATURE_RETHUNK); + fallthrough; + case SRSO_MITIGATION_IBPB_ON_VMEXIT: + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + /* + * There is no need for RSB filling: entry_ibpb() ensures + * all predictions, including the RSB, are invalidated, + * regardless of IBPB implementation. + */ + setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); + break; + default: + break; + } } #undef pr_fmt @@ -2929,9 +3111,6 @@ static ssize_t tsx_async_abort_show_state(char *buf) static ssize_t mmio_stale_data_show_state(char *buf) { - if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) - return sysfs_emit(buf, "Unknown: No mitigations\n"); - if (mmio_mitigation == MMIO_MITIGATION_OFF) return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); @@ -2949,6 +3128,14 @@ static ssize_t rfds_show_state(char *buf) return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); } +static ssize_t old_microcode_show_state(char *buf) +{ + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return sysfs_emit(buf, "Unknown: running under hypervisor"); + + return sysfs_emit(buf, "Vulnerable\n"); +} + static ssize_t its_show_state(char *buf) { return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); @@ -3012,7 +3199,7 @@ static const char *spectre_bhi_state(void) !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && rrsba_disabled) return "; BHI: Retpoline"; - else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) return "; BHI: Vulnerable, KVM: SW loop"; return "; BHI: Vulnerable"; @@ -3121,7 +3308,6 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return srbds_show_state(buf); case X86_BUG_MMIO_STALE_DATA: - case X86_BUG_MMIO_UNKNOWN: return mmio_stale_data_show_state(buf); case X86_BUG_RETBLEED: @@ -3136,6 +3322,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_RFDS: return rfds_show_state(buf); + case X86_BUG_OLD_MICROCODE: + return old_microcode_show_state(buf); + case X86_BUG_ITS: return its_show_state(buf); @@ -3193,10 +3382,7 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) { - if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) - return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); - else - return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); } ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) @@ -3219,6 +3405,11 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); } +ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE); +} + ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_ITS); diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index 237faf7e700c..981f8b1f0792 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -10,6 +10,7 @@ #include <asm/cmdline.h> #include <asm/traps.h> #include <asm/cpu.h> +#include <asm/msr.h> enum split_lock_detect_state { sld_off = 0, @@ -95,15 +96,15 @@ static bool split_lock_verify_msr(bool on) { u64 ctrl, tmp; - if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) + if (rdmsrq_safe(MSR_TEST_CTRL, &ctrl)) return false; if (on) ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; else ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; - if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) + if (wrmsrq_safe(MSR_TEST_CTRL, ctrl)) return false; - rdmsrl(MSR_TEST_CTRL, tmp); + rdmsrq(MSR_TEST_CTRL, tmp); return ctrl == tmp; } @@ -137,7 +138,7 @@ static void __init __split_lock_setup(void) return; } - rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); + rdmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache); if (!split_lock_verify_msr(true)) { pr_info("MSR access failed: Disabled\n"); @@ -145,7 +146,7 @@ static void __init __split_lock_setup(void) } /* Restore the MSR to its cached value. */ - wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); + wrmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache); setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); } @@ -162,7 +163,7 @@ static void sld_update_msr(bool on) if (on) test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; - wrmsrl(MSR_TEST_CTRL, test_ctrl_val); + wrmsrq(MSR_TEST_CTRL, test_ctrl_val); } void split_lock_init(void) @@ -297,7 +298,7 @@ void bus_lock_init(void) if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) return; - rdmsrl(MSR_IA32_DEBUGCTLMSR, val); + rdmsrq(MSR_IA32_DEBUGCTLMSR, val); if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && (sld_state == sld_warn || sld_state == sld_fatal)) || @@ -311,7 +312,7 @@ void bus_lock_init(void) val |= DEBUGCTLMSR_BUS_LOCK_DETECT; } - wrmsrl(MSR_IA32_DEBUGCTLMSR, val); + wrmsrq(MSR_IA32_DEBUGCTLMSR, val); } bool handle_user_split_lock(struct pt_regs *regs, long error_code) @@ -375,7 +376,7 @@ static void __init split_lock_setup(struct cpuinfo_x86 *c) * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set * it have split lock detection. */ - rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); + rdmsrq(MSR_IA32_CORE_CAPS, ia32_core_caps); if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) goto supported; diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index b3a520959b51..adfa7e8bb865 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -1,35 +1,28 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Routines to identify caches on Intel CPU. + * x86 CPU caches detection and configuration * - * Changes: - * Venkatesh Pallipadi : Adding cache identification through cpuid(4) - * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. - * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. + * Previous changes + * - Venkatesh Pallipadi: Cache identification through CPUID(0x4) + * - Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure + * - Andi Kleen / Andreas Herrmann: CPUID(0x4) emulation on AMD */ #include <linux/cacheinfo.h> -#include <linux/capability.h> #include <linux/cpu.h> #include <linux/cpuhotplug.h> -#include <linux/pci.h> #include <linux/stop_machine.h> -#include <linux/sysfs.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/cacheinfo.h> #include <asm/cpufeature.h> +#include <asm/cpuid/api.h> #include <asm/mtrr.h> #include <asm/smp.h> #include <asm/tlbflush.h> #include "cpu.h" -#define LVL_1_INST 1 -#define LVL_1_DATA 2 -#define LVL_2 3 -#define LVL_3 4 - /* Shared last level cache maps */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); @@ -41,208 +34,127 @@ static cpumask_var_t cpu_cacheinfo_mask; /* Kernel controls MTRR and/or PAT MSRs. */ unsigned int memory_caching_control __ro_after_init; -struct _cache_table { - unsigned char descriptor; - char cache_type; - short size; -}; - -#define MB(x) ((x) * 1024) - -/* All the cache descriptor types we care about (no TLB or - trace cache entries) */ - -static const struct _cache_table cache_table[] = -{ - { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ - { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ - { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ - { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ - { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ - { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ - { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ - { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ - { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ - { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ - { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ - { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ - { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ - { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ - { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ - { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ - { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ - { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */ - { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ - { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ - { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ - { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ - { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ - { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ - { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ - { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */ - { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */ - { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */ - { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ - { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */ - { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ - { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ - { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ - { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ - { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ - { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ - { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ - { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */ - { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ - { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */ - { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ - { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */ - { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */ - { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */ - { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */ - { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ - { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */ - { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ - { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */ - { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */ - { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ - { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ - { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */ - { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */ - { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */ - { 0x00, 0, 0} -}; - - enum _cache_type { - CTYPE_NULL = 0, - CTYPE_DATA = 1, - CTYPE_INST = 2, - CTYPE_UNIFIED = 3 + CTYPE_NULL = 0, + CTYPE_DATA = 1, + CTYPE_INST = 2, + CTYPE_UNIFIED = 3 }; union _cpuid4_leaf_eax { struct { - enum _cache_type type:5; - unsigned int level:3; - unsigned int is_self_initializing:1; - unsigned int is_fully_associative:1; - unsigned int reserved:4; - unsigned int num_threads_sharing:12; - unsigned int num_cores_on_die:6; + enum _cache_type type :5; + unsigned int level :3; + unsigned int is_self_initializing :1; + unsigned int is_fully_associative :1; + unsigned int reserved :4; + unsigned int num_threads_sharing :12; + unsigned int num_cores_on_die :6; } split; u32 full; }; union _cpuid4_leaf_ebx { struct { - unsigned int coherency_line_size:12; - unsigned int physical_line_partition:10; - unsigned int ways_of_associativity:10; + unsigned int coherency_line_size :12; + unsigned int physical_line_partition :10; + unsigned int ways_of_associativity :10; } split; u32 full; }; union _cpuid4_leaf_ecx { struct { - unsigned int number_of_sets:32; + unsigned int number_of_sets :32; } split; u32 full; }; -struct _cpuid4_info_regs { +struct _cpuid4_info { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned int id; unsigned long size; - struct amd_northbridge *nb; }; -/* AMD doesn't have CPUID4. Emulate it here to report the same - information to the user. This makes some assumptions about the machine: - L2 not shared, no SMT etc. that is currently true on AMD CPUs. +/* Map CPUID(0x4) EAX.cache_type to <linux/cacheinfo.h> types */ +static const enum cache_type cache_type_map[] = { + [CTYPE_NULL] = CACHE_TYPE_NOCACHE, + [CTYPE_DATA] = CACHE_TYPE_DATA, + [CTYPE_INST] = CACHE_TYPE_INST, + [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, +}; + +/* + * Fallback AMD CPUID(0x4) emulation + * AMD CPUs with TOPOEXT can just use CPUID(0x8000001d) + * + * @AMD_L2_L3_INVALID_ASSOC: cache info for the respective L2/L3 cache should + * be determined from CPUID(0x8000001d) instead of CPUID(0x80000006). + */ + +#define AMD_CPUID4_FULLY_ASSOCIATIVE 0xffff +#define AMD_L2_L3_INVALID_ASSOC 0x9 - In theory the TLBs could be reported as fake type (they are in "dummy"). - Maybe later */ union l1_cache { struct { - unsigned line_size:8; - unsigned lines_per_tag:8; - unsigned assoc:8; - unsigned size_in_kb:8; + unsigned line_size :8; + unsigned lines_per_tag :8; + unsigned assoc :8; + unsigned size_in_kb :8; }; - unsigned val; + unsigned int val; }; union l2_cache { struct { - unsigned line_size:8; - unsigned lines_per_tag:4; - unsigned assoc:4; - unsigned size_in_kb:16; + unsigned line_size :8; + unsigned lines_per_tag :4; + unsigned assoc :4; + unsigned size_in_kb :16; }; - unsigned val; + unsigned int val; }; union l3_cache { struct { - unsigned line_size:8; - unsigned lines_per_tag:4; - unsigned assoc:4; - unsigned res:2; - unsigned size_encoded:14; + unsigned line_size :8; + unsigned lines_per_tag :4; + unsigned assoc :4; + unsigned res :2; + unsigned size_encoded :14; }; - unsigned val; + unsigned int val; }; +/* L2/L3 associativity mapping */ static const unsigned short assocs[] = { - [1] = 1, - [2] = 2, - [4] = 4, - [6] = 8, - [8] = 16, - [0xa] = 32, - [0xb] = 48, - [0xc] = 64, - [0xd] = 96, - [0xe] = 128, - [0xf] = 0xffff /* fully associative - no way to show this currently */ + [1] = 1, + [2] = 2, + [3] = 3, + [4] = 4, + [5] = 6, + [6] = 8, + [8] = 16, + [0xa] = 32, + [0xb] = 48, + [0xc] = 64, + [0xd] = 96, + [0xe] = 128, + [0xf] = AMD_CPUID4_FULLY_ASSOCIATIVE }; static const unsigned char levels[] = { 1, 1, 2, 3 }; -static const unsigned char types[] = { 1, 2, 3, 3 }; +static const unsigned char types[] = { 1, 2, 3, 3 }; -static const enum cache_type cache_type_map[] = { - [CTYPE_NULL] = CACHE_TYPE_NOCACHE, - [CTYPE_DATA] = CACHE_TYPE_DATA, - [CTYPE_INST] = CACHE_TYPE_INST, - [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, -}; - -static void -amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, - union _cpuid4_leaf_ebx *ebx, - union _cpuid4_leaf_ecx *ecx) +static void legacy_amd_cpuid4(int index, union _cpuid4_leaf_eax *eax, + union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx) { - unsigned dummy; - unsigned line_size, lines_per_tag, assoc, size_in_kb; - union l1_cache l1i, l1d; + unsigned int dummy, line_size, lines_per_tag, assoc, size_in_kb; + union l1_cache l1i, l1d, *l1; union l2_cache l2; union l3_cache l3; - union l1_cache *l1 = &l1d; eax->full = 0; ebx->full = 0; @@ -251,430 +163,155 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val); - switch (leaf) { + l1 = &l1d; + switch (index) { case 1: l1 = &l1i; fallthrough; case 0: if (!l1->val) return; - assoc = assocs[l1->assoc]; - line_size = l1->line_size; - lines_per_tag = l1->lines_per_tag; - size_in_kb = l1->size_in_kb; + + assoc = (l1->assoc == 0xff) ? AMD_CPUID4_FULLY_ASSOCIATIVE : l1->assoc; + line_size = l1->line_size; + lines_per_tag = l1->lines_per_tag; + size_in_kb = l1->size_in_kb; break; case 2: - if (!l2.val) + if (!l2.assoc || l2.assoc == AMD_L2_L3_INVALID_ASSOC) return; - assoc = assocs[l2.assoc]; - line_size = l2.line_size; - lines_per_tag = l2.lines_per_tag; - /* cpu_data has errata corrections for K7 applied */ - size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); + + /* Use x86_cache_size as it might have K7 errata fixes */ + assoc = assocs[l2.assoc]; + line_size = l2.line_size; + lines_per_tag = l2.lines_per_tag; + size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); break; case 3: - if (!l3.val) + if (!l3.assoc || l3.assoc == AMD_L2_L3_INVALID_ASSOC) return; - assoc = assocs[l3.assoc]; - line_size = l3.line_size; - lines_per_tag = l3.lines_per_tag; - size_in_kb = l3.size_encoded * 512; + + assoc = assocs[l3.assoc]; + line_size = l3.line_size; + lines_per_tag = l3.lines_per_tag; + size_in_kb = l3.size_encoded * 512; if (boot_cpu_has(X86_FEATURE_AMD_DCM)) { - size_in_kb = size_in_kb >> 1; - assoc = assoc >> 1; + size_in_kb = size_in_kb >> 1; + assoc = assoc >> 1; } break; default: return; } - eax->split.is_self_initializing = 1; - eax->split.type = types[leaf]; - eax->split.level = levels[leaf]; - eax->split.num_threads_sharing = 0; - eax->split.num_cores_on_die = topology_num_cores_per_package(); + eax->split.is_self_initializing = 1; + eax->split.type = types[index]; + eax->split.level = levels[index]; + eax->split.num_threads_sharing = 0; + eax->split.num_cores_on_die = topology_num_cores_per_package(); - - if (assoc == 0xffff) + if (assoc == AMD_CPUID4_FULLY_ASSOCIATIVE) eax->split.is_fully_associative = 1; - ebx->split.coherency_line_size = line_size - 1; - ebx->split.ways_of_associativity = assoc - 1; - ebx->split.physical_line_partition = lines_per_tag - 1; - ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / - (ebx->split.ways_of_associativity + 1) - 1; -} - -#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) - -/* - * L3 cache descriptors - */ -static void amd_calc_l3_indices(struct amd_northbridge *nb) -{ - struct amd_l3_cache *l3 = &nb->l3_cache; - unsigned int sc0, sc1, sc2, sc3; - u32 val = 0; - - pci_read_config_dword(nb->misc, 0x1C4, &val); - - /* calculate subcache sizes */ - l3->subcaches[0] = sc0 = !(val & BIT(0)); - l3->subcaches[1] = sc1 = !(val & BIT(4)); - - if (boot_cpu_data.x86 == 0x15) { - l3->subcaches[0] = sc0 += !(val & BIT(1)); - l3->subcaches[1] = sc1 += !(val & BIT(5)); - } - - l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); - l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); - - l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; -} - -/* - * check whether a slot used for disabling an L3 index is occupied. - * @l3: L3 cache descriptor - * @slot: slot number (0..1) - * - * @returns: the disabled index if used or negative value if slot free. - */ -static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) -{ - unsigned int reg = 0; - - pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®); - - /* check whether this slot is activated already */ - if (reg & (3UL << 30)) - return reg & 0xfff; - - return -1; -} - -static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf, - unsigned int slot) -{ - int index; - struct amd_northbridge *nb = this_leaf->priv; - - index = amd_get_l3_disable_slot(nb, slot); - if (index >= 0) - return sprintf(buf, "%d\n", index); - - return sprintf(buf, "FREE\n"); -} - -#define SHOW_CACHE_DISABLE(slot) \ -static ssize_t \ -cache_disable_##slot##_show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ - return show_cache_disable(this_leaf, buf, slot); \ -} -SHOW_CACHE_DISABLE(0) -SHOW_CACHE_DISABLE(1) - -static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, - unsigned slot, unsigned long idx) -{ - int i; - idx |= BIT(30); - - /* - * disable index in all 4 subcaches - */ - for (i = 0; i < 4; i++) { - u32 reg = idx | (i << 20); - - if (!nb->l3_cache.subcaches[i]) - continue; - - pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); - - /* - * We need to WBINVD on a core on the node containing the L3 - * cache which indices we disable therefore a simple wbinvd() - * is not sufficient. - */ - wbinvd_on_cpu(cpu); - - reg |= BIT(31); - pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); - } -} - -/* - * disable a L3 cache index by using a disable-slot - * - * @l3: L3 cache descriptor - * @cpu: A CPU on the node containing the L3 cache - * @slot: slot number (0..1) - * @index: index to disable - * - * @return: 0 on success, error status on failure - */ -static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, - unsigned slot, unsigned long index) -{ - int ret = 0; - - /* check if @slot is already used or the index is already disabled */ - ret = amd_get_l3_disable_slot(nb, slot); - if (ret >= 0) - return -EEXIST; - - if (index > nb->l3_cache.indices) - return -EINVAL; - - /* check whether the other slot has disabled the same index already */ - if (index == amd_get_l3_disable_slot(nb, !slot)) - return -EEXIST; - - amd_l3_disable_index(nb, cpu, slot, index); - - return 0; -} - -static ssize_t store_cache_disable(struct cacheinfo *this_leaf, - const char *buf, size_t count, - unsigned int slot) -{ - unsigned long val = 0; - int cpu, err = 0; - struct amd_northbridge *nb = this_leaf->priv; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - cpu = cpumask_first(&this_leaf->shared_cpu_map); - - if (kstrtoul(buf, 10, &val) < 0) - return -EINVAL; - - err = amd_set_l3_disable_slot(nb, cpu, slot, val); - if (err) { - if (err == -EEXIST) - pr_warn("L3 slot %d in use/index already disabled!\n", - slot); - return err; - } - return count; -} - -#define STORE_CACHE_DISABLE(slot) \ -static ssize_t \ -cache_disable_##slot##_store(struct device *dev, \ - struct device_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ - return store_cache_disable(this_leaf, buf, count, slot); \ -} -STORE_CACHE_DISABLE(0) -STORE_CACHE_DISABLE(1) - -static ssize_t subcaches_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cacheinfo *this_leaf = dev_get_drvdata(dev); - int cpu = cpumask_first(&this_leaf->shared_cpu_map); - - return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); -} - -static ssize_t subcaches_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cacheinfo *this_leaf = dev_get_drvdata(dev); - int cpu = cpumask_first(&this_leaf->shared_cpu_map); - unsigned long val; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (kstrtoul(buf, 16, &val) < 0) - return -EINVAL; - - if (amd_set_subcaches(cpu, val)) - return -EINVAL; - - return count; + ebx->split.coherency_line_size = line_size - 1; + ebx->split.ways_of_associativity = assoc - 1; + ebx->split.physical_line_partition = lines_per_tag - 1; + ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / + (ebx->split.ways_of_associativity + 1) - 1; } -static DEVICE_ATTR_RW(cache_disable_0); -static DEVICE_ATTR_RW(cache_disable_1); -static DEVICE_ATTR_RW(subcaches); - -static umode_t -cache_private_attrs_is_visible(struct kobject *kobj, - struct attribute *attr, int unused) +static int cpuid4_info_fill_done(struct _cpuid4_info *id4, union _cpuid4_leaf_eax eax, + union _cpuid4_leaf_ebx ebx, union _cpuid4_leaf_ecx ecx) { - struct device *dev = kobj_to_dev(kobj); - struct cacheinfo *this_leaf = dev_get_drvdata(dev); - umode_t mode = attr->mode; - - if (!this_leaf->priv) - return 0; - - if ((attr == &dev_attr_subcaches.attr) && - amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) - return mode; + if (eax.split.type == CTYPE_NULL) + return -EIO; - if ((attr == &dev_attr_cache_disable_0.attr || - attr == &dev_attr_cache_disable_1.attr) && - amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) - return mode; + id4->eax = eax; + id4->ebx = ebx; + id4->ecx = ecx; + id4->size = (ecx.split.number_of_sets + 1) * + (ebx.split.coherency_line_size + 1) * + (ebx.split.physical_line_partition + 1) * + (ebx.split.ways_of_associativity + 1); return 0; } -static struct attribute_group cache_private_group = { - .is_visible = cache_private_attrs_is_visible, -}; - -static void init_amd_l3_attrs(void) -{ - int n = 1; - static struct attribute **amd_l3_attrs; - - if (amd_l3_attrs) /* already initialized */ - return; - - if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) - n += 2; - if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) - n += 1; - - amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL); - if (!amd_l3_attrs) - return; - - n = 0; - if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { - amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr; - amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr; - } - if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) - amd_l3_attrs[n++] = &dev_attr_subcaches.attr; - - cache_private_group.attrs = amd_l3_attrs; -} - -const struct attribute_group * -cache_get_priv_group(struct cacheinfo *this_leaf) +static int amd_fill_cpuid4_info(int index, struct _cpuid4_info *id4) { - struct amd_northbridge *nb = this_leaf->priv; - - if (this_leaf->level < 3 || !nb) - return NULL; + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + u32 ignored; - if (nb && nb->l3_cache.indices) - init_amd_l3_attrs(); + if (boot_cpu_has(X86_FEATURE_TOPOEXT) || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &ignored); + else + legacy_amd_cpuid4(index, &eax, &ebx, &ecx); - return &cache_private_group; + return cpuid4_info_fill_done(id4, eax, ebx, ecx); } -static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) +static int intel_fill_cpuid4_info(int index, struct _cpuid4_info *id4) { - int node; + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + u32 ignored; - /* only for L3, and not in virtualized environments */ - if (index < 3) - return; + cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &ignored); - node = topology_amd_node_id(smp_processor_id()); - this_leaf->nb = node_to_amd_nb(node); - if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) - amd_calc_l3_indices(this_leaf->nb); + return cpuid4_info_fill_done(id4, eax, ebx, ecx); } -#else -#define amd_init_l3_cache(x, y) -#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ -static int -cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) +static int fill_cpuid4_info(int index, struct _cpuid4_info *id4) { - union _cpuid4_leaf_eax eax; - union _cpuid4_leaf_ebx ebx; - union _cpuid4_leaf_ecx ecx; - unsigned edx; - - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { - if (boot_cpu_has(X86_FEATURE_TOPOEXT)) - cpuid_count(0x8000001d, index, &eax.full, - &ebx.full, &ecx.full, &edx); - else - amd_cpuid4(index, &eax, &ebx, &ecx); - amd_init_l3_cache(this_leaf, index); - } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - cpuid_count(0x8000001d, index, &eax.full, - &ebx.full, &ecx.full, &edx); - amd_init_l3_cache(this_leaf, index); - } else { - cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); - } + u8 cpu_vendor = boot_cpu_data.x86_vendor; - if (eax.split.type == CTYPE_NULL) - return -EIO; /* better error ? */ - - this_leaf->eax = eax; - this_leaf->ebx = ebx; - this_leaf->ecx = ecx; - this_leaf->size = (ecx.split.number_of_sets + 1) * - (ebx.split.coherency_line_size + 1) * - (ebx.split.physical_line_partition + 1) * - (ebx.split.ways_of_associativity + 1); - return 0; + return (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON) ? + amd_fill_cpuid4_info(index, id4) : + intel_fill_cpuid4_info(index, id4); } static int find_num_cache_leaves(struct cpuinfo_x86 *c) { - unsigned int eax, ebx, ecx, edx, op; - union _cpuid4_leaf_eax cache_eax; - int i = -1; - - if (c->x86_vendor == X86_VENDOR_AMD || - c->x86_vendor == X86_VENDOR_HYGON) - op = 0x8000001d; - else - op = 4; + unsigned int eax, ebx, ecx, edx, op; + union _cpuid4_leaf_eax cache_eax; + int i = -1; + /* Do a CPUID(op) loop to calculate num_cache_leaves */ + op = (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) ? 0x8000001d : 4; do { ++i; - /* Do cpuid(op) loop to find out num_cache_leaves */ cpuid_count(op, i, &eax, &ebx, &ecx, &edx); cache_eax.full = eax; } while (cache_eax.split.type != CTYPE_NULL); return i; } +/* + * AMD/Hygon CPUs may have multiple LLCs if L3 caches exist. + */ + void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id) { - /* - * We may have multiple LLCs if L3 caches exist, so check if we - * have an L3 cache by looking at the L3 cache CPUID leaf. - */ - if (!cpuid_edx(0x80000006)) + if (!cpuid_amd_hygon_has_l3_cache()) return; if (c->x86 < 0x17) { - /* LLC is at the node level. */ + /* Pre-Zen: LLC is at the node level */ c->topo.llc_id = die_id; } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) { /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. + * Family 17h up to 1F models: LLC is at the core + * complex level. Core complex ID is ApicId[3]. */ c->topo.llc_id = c->topo.apicid >> 3; } else { /* - * LLC ID is calculated from the number of threads sharing the - * cache. - * */ + * Newer families: LLC ID is calculated from the number + * of threads sharing the L3 cache. + */ u32 eax, ebx, ecx, edx, num_sharing_cache = 0; u32 llc_index = find_num_cache_leaves(c) - 1; @@ -683,25 +320,21 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id) num_sharing_cache = ((eax >> 14) & 0xfff) + 1; if (num_sharing_cache) { - int bits = get_count_order(num_sharing_cache); + int index_msb = get_count_order(num_sharing_cache); - c->topo.llc_id = c->topo.apicid >> bits; + c->topo.llc_id = c->topo.apicid >> index_msb; } } } void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c) { - /* - * We may have multiple LLCs if L3 caches exist, so check if we - * have an L3 cache by looking at the L3 cache CPUID leaf. - */ - if (!cpuid_edx(0x80000006)) + if (!cpuid_amd_hygon_has_l3_cache()) return; /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. + * Hygons are similar to AMD Family 17h up to 1F models: LLC is + * at the core complex level. Core complex ID is ApicId[3]. */ c->topo.llc_id = c->topo.apicid >> 3; } @@ -710,14 +343,10 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index); - if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) ci->num_leaves = find_num_cache_leaves(c); - } else if (c->extended_cpuid_level >= 0x80000006) { - if (cpuid_edx(0x80000006) & 0xf000) - ci->num_leaves = 4; - else - ci->num_leaves = 3; - } + else if (c->extended_cpuid_level >= 0x80000006) + ci->num_leaves = (cpuid_edx(0x80000006) & 0xf000) ? 4 : 3; } void init_hygon_cacheinfo(struct cpuinfo_x86 *c) @@ -727,148 +356,131 @@ void init_hygon_cacheinfo(struct cpuinfo_x86 *c) ci->num_leaves = find_num_cache_leaves(c); } -void init_intel_cacheinfo(struct cpuinfo_x86 *c) +static void intel_cacheinfo_done(struct cpuinfo_x86 *c, unsigned int l3, + unsigned int l2, unsigned int l1i, unsigned int l1d) { - /* Cache sizes */ - unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; - unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ - unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ - unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; - struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index); + /* + * If llc_id is still unset, then cpuid_level < 4, which implies + * that the only possibility left is SMT. Since CPUID(0x2) doesn't + * specify any shared caches and SMT shares all caches, we can + * unconditionally set LLC ID to the package ID so that all + * threads share it. + */ + if (c->topo.llc_id == BAD_APICID) + c->topo.llc_id = c->topo.pkg_id; - if (c->cpuid_level > 3) { - /* - * There should be at least one leaf. A non-zero value means - * that the number of leaves has been initialized. - */ - if (!ci->num_leaves) - ci->num_leaves = find_num_cache_leaves(c); + c->x86_cache_size = l3 ? l3 : (l2 ? l2 : l1i + l1d); - /* - * Whenever possible use cpuid(4), deterministic cache - * parameters cpuid leaf to find the cache details - */ - for (i = 0; i < ci->num_leaves; i++) { - struct _cpuid4_info_regs this_leaf = {}; - int retval; + if (!l2) + cpu_detect_cache_sizes(c); +} - retval = cpuid4_cache_lookup_regs(i, &this_leaf); - if (retval < 0) - continue; +/* + * Legacy Intel CPUID(0x2) path if CPUID(0x4) is not available. + */ +static void intel_cacheinfo_0x2(struct cpuinfo_x86 *c) +{ + unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; + const struct leaf_0x2_table *desc; + union leaf_0x2_regs regs; + u8 *ptr; - switch (this_leaf.eax.split.level) { - case 1: - if (this_leaf.eax.split.type == CTYPE_DATA) - new_l1d = this_leaf.size/1024; - else if (this_leaf.eax.split.type == CTYPE_INST) - new_l1i = this_leaf.size/1024; - break; - case 2: - new_l2 = this_leaf.size/1024; - num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; - index_msb = get_count_order(num_threads_sharing); - l2_id = c->topo.apicid & ~((1 << index_msb) - 1); - break; - case 3: - new_l3 = this_leaf.size/1024; - num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; - index_msb = get_count_order(num_threads_sharing); - l3_id = c->topo.apicid & ~((1 << index_msb) - 1); - break; - default: - break; - } - } - } + if (c->cpuid_level < 2) + return; - /* Don't use CPUID(2) if CPUID(4) is supported. */ - if (!ci->num_leaves && c->cpuid_level > 1) { - /* supports eax=2 call */ - int j, n; - unsigned int regs[4]; - unsigned char *dp = (unsigned char *)regs; - - /* Number of times to iterate */ - n = cpuid_eax(2) & 0xFF; - - for (i = 0 ; i < n ; i++) { - cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); - - /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 4 ; j++) - if (regs[j] & (1 << 31)) - regs[j] = 0; - - /* Byte 0 is level count, not a descriptor */ - for (j = 1 ; j < 16 ; j++) { - unsigned char des = dp[j]; - unsigned char k = 0; - - /* look up this descriptor in the table */ - while (cache_table[k].descriptor != 0) { - if (cache_table[k].descriptor == des) { - switch (cache_table[k].cache_type) { - case LVL_1_INST: - l1i += cache_table[k].size; - break; - case LVL_1_DATA: - l1d += cache_table[k].size; - break; - case LVL_2: - l2 += cache_table[k].size; - break; - case LVL_3: - l3 += cache_table[k].size; - break; - } - - break; - } - - k++; - } - } + cpuid_leaf_0x2(®s); + for_each_cpuid_0x2_desc(regs, ptr, desc) { + switch (desc->c_type) { + case CACHE_L1_INST: l1i += desc->c_size; break; + case CACHE_L1_DATA: l1d += desc->c_size; break; + case CACHE_L2: l2 += desc->c_size; break; + case CACHE_L3: l3 += desc->c_size; break; } } - if (new_l1d) - l1d = new_l1d; + intel_cacheinfo_done(c, l3, l2, l1i, l1d); +} - if (new_l1i) - l1i = new_l1i; +static unsigned int calc_cache_topo_id(struct cpuinfo_x86 *c, const struct _cpuid4_info *id4) +{ + unsigned int num_threads_sharing; + int index_msb; - if (new_l2) { - l2 = new_l2; - c->topo.llc_id = l2_id; - c->topo.l2c_id = l2_id; - } + num_threads_sharing = 1 + id4->eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + return c->topo.apicid & ~((1 << index_msb) - 1); +} - if (new_l3) { - l3 = new_l3; - c->topo.llc_id = l3_id; - } +static bool intel_cacheinfo_0x4(struct cpuinfo_x86 *c) +{ + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index); + unsigned int l2_id = BAD_APICID, l3_id = BAD_APICID; + unsigned int l1d = 0, l1i = 0, l2 = 0, l3 = 0; + + if (c->cpuid_level < 4) + return false; /* - * If llc_id is not yet set, this means cpuid_level < 4 which in - * turns means that the only possibility is SMT (as indicated in - * cpuid1). Since cpuid2 doesn't specify shared caches, and we know - * that SMT shares all caches, we can unconditionally set cpu_llc_id to - * c->topo.pkg_id. + * There should be at least one leaf. A non-zero value means + * that the number of leaves has been previously initialized. */ - if (c->topo.llc_id == BAD_APICID) - c->topo.llc_id = c->topo.pkg_id; + if (!ci->num_leaves) + ci->num_leaves = find_num_cache_leaves(c); + + if (!ci->num_leaves) + return false; - c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); + for (int i = 0; i < ci->num_leaves; i++) { + struct _cpuid4_info id4 = {}; + int ret; - if (!l2) - cpu_detect_cache_sizes(c); + ret = intel_fill_cpuid4_info(i, &id4); + if (ret < 0) + continue; + + switch (id4.eax.split.level) { + case 1: + if (id4.eax.split.type == CTYPE_DATA) + l1d = id4.size / 1024; + else if (id4.eax.split.type == CTYPE_INST) + l1i = id4.size / 1024; + break; + case 2: + l2 = id4.size / 1024; + l2_id = calc_cache_topo_id(c, &id4); + break; + case 3: + l3 = id4.size / 1024; + l3_id = calc_cache_topo_id(c, &id4); + break; + default: + break; + } + } + + c->topo.l2c_id = l2_id; + c->topo.llc_id = (l3_id == BAD_APICID) ? l2_id : l3_id; + intel_cacheinfo_done(c, l3, l2, l1i, l1d); + return true; +} + +void init_intel_cacheinfo(struct cpuinfo_x86 *c) +{ + /* Don't use CPUID(0x2) if CPUID(0x4) is supported. */ + if (intel_cacheinfo_0x4(c)) + return; + + intel_cacheinfo_0x2(c); } +/* + * <linux/cacheinfo.h> shared_cpu_map setup, AMD/Hygon + */ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, - struct _cpuid4_info_regs *base) + const struct _cpuid4_info *id4) { struct cpu_cacheinfo *this_cpu_ci; - struct cacheinfo *this_leaf; + struct cacheinfo *ci; int i, sibling; /* @@ -880,18 +492,18 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, this_cpu_ci = get_cpu_cacheinfo(i); if (!this_cpu_ci->info_list) continue; - this_leaf = this_cpu_ci->info_list + index; + + ci = this_cpu_ci->info_list + index; for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { if (!cpu_online(sibling)) continue; - cpumask_set_cpu(sibling, - &this_leaf->shared_cpu_map); + cpumask_set_cpu(sibling, &ci->shared_cpu_map); } } } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { unsigned int apicid, nshared, first, last; - nshared = base->eax.split.num_threads_sharing + 1; + nshared = id4->eax.split.num_threads_sharing + 1; apicid = cpu_data(cpu).topo.apicid; first = apicid - (apicid % nshared); last = first + nshared - 1; @@ -905,14 +517,13 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, if ((apicid < first) || (apicid > last)) continue; - this_leaf = this_cpu_ci->info_list + index; + ci = this_cpu_ci->info_list + index; for_each_online_cpu(sibling) { apicid = cpu_data(sibling).topo.apicid; if ((apicid < first) || (apicid > last)) continue; - cpumask_set_cpu(sibling, - &this_leaf->shared_cpu_map); + cpumask_set_cpu(sibling, &ci->shared_cpu_map); } } } else @@ -921,25 +532,27 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, return 1; } +/* + * <linux/cacheinfo.h> shared_cpu_map setup, Intel + fallback AMD/Hygon + */ static void __cache_cpumap_setup(unsigned int cpu, int index, - struct _cpuid4_info_regs *base) + const struct _cpuid4_info *id4) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct cacheinfo *this_leaf, *sibling_leaf; + struct cpuinfo_x86 *c = &cpu_data(cpu); + struct cacheinfo *ci, *sibling_ci; unsigned long num_threads_sharing; int index_msb, i; - struct cpuinfo_x86 *c = &cpu_data(cpu); - if (c->x86_vendor == X86_VENDOR_AMD || - c->x86_vendor == X86_VENDOR_HYGON) { - if (__cache_amd_cpumap_setup(cpu, index, base)) + if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { + if (__cache_amd_cpumap_setup(cpu, index, id4)) return; } - this_leaf = this_cpu_ci->info_list + index; - num_threads_sharing = 1 + base->eax.split.num_threads_sharing; + ci = this_cpu_ci->info_list + index; + num_threads_sharing = 1 + id4->eax.split.num_threads_sharing; - cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + cpumask_set_cpu(cpu, &ci->shared_cpu_map); if (num_threads_sharing == 1) return; @@ -949,30 +562,29 @@ static void __cache_cpumap_setup(unsigned int cpu, int index, if (cpu_data(i).topo.apicid >> index_msb == c->topo.apicid >> index_msb) { struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); + /* Skip if itself or no cacheinfo */ if (i == cpu || !sib_cpu_ci->info_list) - continue;/* skip if itself or no cacheinfo */ - sibling_leaf = sib_cpu_ci->info_list + index; - cpumask_set_cpu(i, &this_leaf->shared_cpu_map); - cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map); + continue; + + sibling_ci = sib_cpu_ci->info_list + index; + cpumask_set_cpu(i, &ci->shared_cpu_map); + cpumask_set_cpu(cpu, &sibling_ci->shared_cpu_map); } } -static void ci_leaf_init(struct cacheinfo *this_leaf, - struct _cpuid4_info_regs *base) +static void ci_info_init(struct cacheinfo *ci, const struct _cpuid4_info *id4, + struct amd_northbridge *nb) { - this_leaf->id = base->id; - this_leaf->attributes = CACHE_ID; - this_leaf->level = base->eax.split.level; - this_leaf->type = cache_type_map[base->eax.split.type]; - this_leaf->coherency_line_size = - base->ebx.split.coherency_line_size + 1; - this_leaf->ways_of_associativity = - base->ebx.split.ways_of_associativity + 1; - this_leaf->size = base->size; - this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1; - this_leaf->physical_line_partition = - base->ebx.split.physical_line_partition + 1; - this_leaf->priv = base->nb; + ci->id = id4->id; + ci->attributes = CACHE_ID; + ci->level = id4->eax.split.level; + ci->type = cache_type_map[id4->eax.split.type]; + ci->coherency_line_size = id4->ebx.split.coherency_line_size + 1; + ci->ways_of_associativity = id4->ebx.split.ways_of_associativity + 1; + ci->size = id4->size; + ci->number_of_sets = id4->ecx.split.number_of_sets + 1; + ci->physical_line_partition = id4->ebx.split.physical_line_partition + 1; + ci->priv = nb; } int init_cache_level(unsigned int cpu) @@ -987,38 +599,45 @@ int init_cache_level(unsigned int cpu) } /* - * The max shared threads number comes from CPUID.4:EAX[25-14] with input + * The max shared threads number comes from CPUID(0x4) EAX[25-14] with input * ECX as cache index. Then right shift apicid by the number's order to get * cache id for this cache node. */ -static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs) +static void get_cache_id(int cpu, struct _cpuid4_info *id4) { struct cpuinfo_x86 *c = &cpu_data(cpu); unsigned long num_threads_sharing; int index_msb; - num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing; + num_threads_sharing = 1 + id4->eax.split.num_threads_sharing; index_msb = get_count_order(num_threads_sharing); - id4_regs->id = c->topo.apicid >> index_msb; + id4->id = c->topo.apicid >> index_msb; } int populate_cache_leaves(unsigned int cpu) { - unsigned int idx, ret; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct cacheinfo *this_leaf = this_cpu_ci->info_list; - struct _cpuid4_info_regs id4_regs = {}; + struct cacheinfo *ci = this_cpu_ci->info_list; + u8 cpu_vendor = boot_cpu_data.x86_vendor; + struct amd_northbridge *nb = NULL; + struct _cpuid4_info id4 = {}; + int idx, ret; for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { - ret = cpuid4_cache_lookup_regs(idx, &id4_regs); + ret = fill_cpuid4_info(idx, &id4); if (ret) return ret; - get_cache_id(cpu, &id4_regs); - ci_leaf_init(this_leaf++, &id4_regs); - __cache_cpumap_setup(cpu, idx, &id4_regs); + + get_cache_id(cpu, &id4); + + if (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON) + nb = amd_init_l3_cache(idx); + + ci_info_init(ci++, &id4, nb); + __cache_cpumap_setup(cpu, idx, &id4); } - this_cpu_ci->cpu_map_populated = true; + this_cpu_ci->cpu_map_populated = true; return 0; } @@ -1034,31 +653,33 @@ int populate_cache_leaves(unsigned int cpu) static unsigned long saved_cr4; static DEFINE_RAW_SPINLOCK(cache_disable_lock); +/* + * Cache flushing is the most time-consuming step when programming the + * MTRRs. On many Intel CPUs without known erratas, it can be skipped + * if the CPU declares cache self-snooping support. + */ +static void maybe_flush_caches(void) +{ + if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) + wbinvd(); +} + void cache_disable(void) __acquires(cache_disable_lock) { unsigned long cr0; /* - * Note that this is not ideal - * since the cache is only flushed/disabled for this CPU while the - * MTRRs are changed, but changing this requires more invasive - * changes to the way the kernel boots + * This is not ideal since the cache is only flushed/disabled + * for this CPU while the MTRRs are changed, but changing this + * requires more invasive changes to the way the kernel boots. */ - raw_spin_lock(&cache_disable_lock); /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ cr0 = read_cr0() | X86_CR0_CD; write_cr0(cr0); - /* - * Cache flushing is the most time-consuming step when programming - * the MTRRs. Fortunately, as per the Intel Software Development - * Manual, we can skip it if the processor supports cache self- - * snooping. - */ - if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) - wbinvd(); + maybe_flush_caches(); /* Save value of CR4 and clear Page Global Enable (bit 7) */ if (cpu_feature_enabled(X86_FEATURE_PGE)) { @@ -1073,9 +694,7 @@ void cache_disable(void) __acquires(cache_disable_lock) if (cpu_feature_enabled(X86_FEATURE_MTRR)) mtrr_disable(); - /* Again, only flush caches if we have to. */ - if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) - wbinvd(); + maybe_flush_caches(); } void cache_enable(void) __releases(cache_disable_lock) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0ff057ff11ce..8feb8fd2957a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -29,7 +29,7 @@ #include <asm/alternative.h> #include <asm/cmdline.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/perf_event.h> #include <asm/mmu_context.h> #include <asm/doublefault.h> @@ -148,7 +148,7 @@ static void ppin_init(struct cpuinfo_x86 *c) */ info = (struct ppin_info *)id->driver_data; - if (rdmsrl_safe(info->msr_ppin_ctl, &val)) + if (rdmsrq_safe(info->msr_ppin_ctl, &val)) goto clear_ppin; if ((val & 3UL) == 1UL) { @@ -158,13 +158,13 @@ static void ppin_init(struct cpuinfo_x86 *c) /* If PPIN is disabled, try to enable */ if (!(val & 2UL)) { - wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); - rdmsrl_safe(info->msr_ppin_ctl, &val); + wrmsrq_safe(info->msr_ppin_ctl, val | 2UL); + rdmsrq_safe(info->msr_ppin_ctl, &val); } /* Is the enable bit set? */ if (val & 2UL) { - c->ppin = __rdmsr(info->msr_ppin); + c->ppin = native_rdmsrq(info->msr_ppin); set_cpu_cap(c, info->feature); return; } @@ -242,6 +242,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { #endif } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); +SYM_PIC_ALIAS(gdt_page); #ifdef CONFIG_X86_64 static int __init x86_nopcid_setup(char *s) @@ -321,7 +322,7 @@ static int __init cachesize_setup(char *str) __setup("cachesize=", cachesize_setup); /* Probe for the CPUID instruction */ -bool have_cpuid_p(void) +bool cpuid_feature(void) { return flag_is_changeable_p(X86_EFLAGS_ID); } @@ -562,9 +563,9 @@ __noendbr u64 ibt_save(bool disable) u64 msr = 0; if (cpu_feature_enabled(X86_FEATURE_IBT)) { - rdmsrl(MSR_IA32_S_CET, msr); + rdmsrq(MSR_IA32_S_CET, msr); if (disable) - wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); + wrmsrq(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); } return msr; @@ -575,10 +576,10 @@ __noendbr void ibt_restore(u64 save) u64 msr; if (cpu_feature_enabled(X86_FEATURE_IBT)) { - rdmsrl(MSR_IA32_S_CET, msr); + rdmsrq(MSR_IA32_S_CET, msr); msr &= ~CET_ENDBR_EN; msr |= (save & CET_ENDBR_EN); - wrmsrl(MSR_IA32_S_CET, msr); + wrmsrq(MSR_IA32_S_CET, msr); } } @@ -602,15 +603,15 @@ static __always_inline void setup_cet(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_USER_SHSTK); if (kernel_ibt) - wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN); + wrmsrq(MSR_IA32_S_CET, CET_ENDBR_EN); else - wrmsrl(MSR_IA32_S_CET, 0); + wrmsrq(MSR_IA32_S_CET, 0); cr4_set_bits(X86_CR4_CET); if (kernel_ibt && ibt_selftest()) { pr_err("IBT selftest: Failed!\n"); - wrmsrl(MSR_IA32_S_CET, 0); + wrmsrq(MSR_IA32_S_CET, 0); setup_clear_cpu_cap(X86_FEATURE_IBT); } } @@ -621,8 +622,8 @@ __noendbr void cet_disable(void) cpu_feature_enabled(X86_FEATURE_SHSTK))) return; - wrmsrl(MSR_IA32_S_CET, 0); - wrmsrl(MSR_IA32_U_CET, 0); + wrmsrq(MSR_IA32_S_CET, 0); + wrmsrq(MSR_IA32_U_CET, 0); } /* @@ -751,9 +752,9 @@ void __init switch_gdt_and_percpu_base(int cpu) * No need to load %gs. It is already correct. * * Writing %gs on 64bit would zero GSBASE which would make any per - * CPU operation up to the point of the wrmsrl() fault. + * CPU operation up to the point of the wrmsrq() fault. * - * Set GSBASE to the new offset. Until the wrmsrl() happens the + * Set GSBASE to the new offset. Until the wrmsrq() happens the * early mapping is still valid. That means the GSBASE update will * lose any prior per CPU data which was not copied over in * setup_per_cpu_areas(). @@ -761,7 +762,7 @@ void __init switch_gdt_and_percpu_base(int cpu) * This works even with stackprotector enabled because the * per CPU stack canary is 0 in both per CPU areas. */ - wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); + wrmsrq(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); #else /* * %fs is already set to __KERNEL_PERCPU, but after switching GDT @@ -1005,17 +1006,18 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_D_1_EAX] = eax; } - /* AMD-defined flags: level 0x80000001 */ + /* + * Check if extended CPUID leaves are implemented: Max extended + * CPUID leaf must be in the 0x80000001-0x8000ffff range. + */ eax = cpuid_eax(0x80000000); - c->extended_cpuid_level = eax; + c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0; - if ((eax & 0xffff0000) == 0x80000000) { - if (eax >= 0x80000001) { - cpuid(0x80000001, &eax, &ebx, &ecx, &edx); + if (c->extended_cpuid_level >= 0x80000001) { + cpuid(0x80000001, &eax, &ebx, &ecx, &edx); - c->x86_capability[CPUID_8000_0001_ECX] = ecx; - c->x86_capability[CPUID_8000_0001_EDX] = edx; - } + c->x86_capability[CPUID_8000_0001_ECX] = ecx; + c->x86_capability[CPUID_8000_0001_EDX] = edx; } if (c->extended_cpuid_level >= 0x80000007) { @@ -1295,7 +1297,7 @@ u64 x86_read_arch_cap_msr(void) u64 x86_arch_cap_msr = 0; if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr); + rdmsrq(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr); return x86_arch_cap_msr; } @@ -1351,10 +1353,52 @@ static bool __init vulnerable_to_its(u64 x86_arch_cap_msr) return false; } +static struct x86_cpu_id cpu_latest_microcode[] = { +#include "microcode/intel-ucode-defs.h" + {} +}; + +static bool __init cpu_has_old_microcode(void) +{ + const struct x86_cpu_id *m = x86_match_cpu(cpu_latest_microcode); + + /* Give unknown CPUs a pass: */ + if (!m) { + /* Intel CPUs should be in the list. Warn if not: */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + pr_info("x86/CPU: Model not found in latest microcode list\n"); + return false; + } + + /* + * Hosts usually lie to guests with a super high microcode + * version. Just ignore what hosts tell guests: + */ + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return false; + + /* Consider all debug microcode to be old: */ + if (boot_cpu_data.microcode & BIT(31)) + return true; + + /* Give new microcode a pass: */ + if (boot_cpu_data.microcode >= m->driver_data) + return false; + + /* Uh oh, too old: */ + return true; +} + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 x86_arch_cap_msr = x86_read_arch_cap_msr(); + if (cpu_has_old_microcode()) { + pr_warn("x86/CPU: Running old microcode\n"); + setup_force_cpu_bug(X86_BUG_OLD_MICROCODE); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + } + /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO)) @@ -1435,15 +1479,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) * Affected CPU list is generally enough to enumerate the vulnerability, * but for virtualization case check for ARCH_CAP MSR bits also, VMM may * not want the guest to enumerate the bug. - * - * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, - * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. */ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) { if (cpu_matches(cpu_vuln_blacklist, MMIO)) setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); - else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) - setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); } if (!cpu_has(c, X86_FEATURE_BTC_NO)) { @@ -1672,11 +1711,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) memset(&c->x86_capability, 0, sizeof(c->x86_capability)); c->extended_cpuid_level = 0; - if (!have_cpuid_p()) + if (!cpuid_feature()) identify_cpu_without_cpuid(c); /* cyrix could have cpuid enabled via c_identify()*/ - if (have_cpuid_p()) { + if (cpuid_feature()) { cpu_detect(c); get_cpu_vendor(c); intel_unlock_cpuid_leafs(c); @@ -1791,11 +1830,11 @@ static bool detect_null_seg_behavior(void) */ unsigned long old_base, tmp; - rdmsrl(MSR_FS_BASE, old_base); - wrmsrl(MSR_FS_BASE, 1); + rdmsrq(MSR_FS_BASE, old_base); + wrmsrq(MSR_FS_BASE, 1); loadsegment(fs, 0); - rdmsrl(MSR_FS_BASE, tmp); - wrmsrl(MSR_FS_BASE, old_base); + rdmsrq(MSR_FS_BASE, tmp); + wrmsrq(MSR_FS_BASE, old_base); return tmp == 0; } @@ -1836,11 +1875,11 @@ static void generic_identify(struct cpuinfo_x86 *c) { c->extended_cpuid_level = 0; - if (!have_cpuid_p()) + if (!cpuid_feature()) identify_cpu_without_cpuid(c); /* cyrix could have cpuid enabled via c_identify()*/ - if (!have_cpuid_p()) + if (!cpuid_feature()) return; cpu_detect(c); @@ -2024,9 +2063,9 @@ void enable_sep_cpu(void) */ tss->x86_tss.ss1 = __KERNEL_CS; - wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); - wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); - wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); + wrmsrq(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1); + wrmsrq(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1)); + wrmsrq(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32); put_cpu(); } @@ -2133,7 +2172,7 @@ DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_ DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth); EXPORT_PER_CPU_SYMBOL(__x86_call_depth); -static void wrmsrl_cstar(unsigned long val) +static void wrmsrq_cstar(unsigned long val) { /* * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR @@ -2141,37 +2180,37 @@ static void wrmsrl_cstar(unsigned long val) * guest. Avoid the pointless write on all Intel CPUs. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) - wrmsrl(MSR_CSTAR, val); + wrmsrq(MSR_CSTAR, val); } static inline void idt_syscall_init(void) { - wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); + wrmsrq(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); if (ia32_enabled()) { - wrmsrl_cstar((unsigned long)entry_SYSCALL_compat); + wrmsrq_cstar((unsigned long)entry_SYSCALL_compat); /* * This only works on Intel CPUs. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. * This does not cause SYSENTER to jump to the wrong location, because * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ - wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, + wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); + wrmsrq_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); - wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); + wrmsrq_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); } else { - wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore); - wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); - wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); + wrmsrq_cstar((unsigned long)entry_SYSCALL32_ignore); + wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); + wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 0ULL); + wrmsrq_safe(MSR_IA32_SYSENTER_EIP, 0ULL); } /* * Flags to clear on syscall; clear as much as possible * to minimize user space-kernel interference. */ - wrmsrl(MSR_SYSCALL_MASK, + wrmsrq(MSR_SYSCALL_MASK, X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF| X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF| @@ -2240,7 +2279,7 @@ static inline void setup_getcpu(int cpu) struct desc_struct d = { }; if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID)) - wrmsr(MSR_TSC_AUX, cpudata, 0); + wrmsrq(MSR_TSC_AUX, cpudata); /* Store CPU and node number in limit. */ d.limit0 = cpudata; @@ -2355,8 +2394,8 @@ void cpu_init(void) memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); syscall_init(); - wrmsrl(MSR_FS_BASE, 0); - wrmsrl(MSR_KERNEL_GS_BASE, 0); + wrmsrq(MSR_FS_BASE, 0); + wrmsrq(MSR_KERNEL_GS_BASE, 0); barrier(); x2apic_setup(); diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 51deb60a9d26..bc38b2d56f26 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -75,6 +75,15 @@ extern void check_null_seg_clears_base(struct cpuinfo_x86 *c); void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id); void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c); +#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) +struct amd_northbridge *amd_init_l3_cache(int index); +#else +static inline struct amd_northbridge *amd_init_l3_cache(int index) +{ + return NULL; +} +#endif + unsigned int aperfmperf_get_khz(int cpu); void cpu_select_mitigations(void); diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index a2fbea0be535..46efcbd6afa4 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -28,6 +28,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, + { X86_FEATURE_APX, X86_FEATURE_XSAVE }, { X86_FEATURE_CMOV, X86_FEATURE_FXSR }, { X86_FEATURE_MMX, X86_FEATURE_FXSR }, { X86_FEATURE_MMXEXT, X86_FEATURE_MMX }, @@ -82,8 +83,12 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_XFD, X86_FEATURE_XSAVES }, { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 }, { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, + { X86_FEATURE_AMX_FP16, X86_FEATURE_AMX_TILE }, + { X86_FEATURE_AMX_BF16, X86_FEATURE_AMX_TILE }, + { X86_FEATURE_AMX_INT8, X86_FEATURE_AMX_TILE }, { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, { X86_FEATURE_FRED, X86_FEATURE_LKGS }, + { X86_FEATURE_SPEC_CTRL_SSBD, X86_FEATURE_SPEC_CTRL }, {} }; diff --git a/arch/x86/kernel/cpu/cpuid_0x2_table.c b/arch/x86/kernel/cpu/cpuid_0x2_table.c new file mode 100644 index 000000000000..89bc8db5e9c6 --- /dev/null +++ b/arch/x86/kernel/cpu/cpuid_0x2_table.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/sizes.h> + +#include <asm/cpuid/types.h> + +#include "cpu.h" + +#define CACHE_ENTRY(_desc, _type, _size) \ + [_desc] = { \ + .c_type = (_type), \ + .c_size = (_size) / SZ_1K, \ + } + +#define TLB_ENTRY(_desc, _type, _entries) \ + [_desc] = { \ + .t_type = (_type), \ + .entries = (_entries), \ + } + +const struct leaf_0x2_table cpuid_0x2_table[256] = { + CACHE_ENTRY(0x06, CACHE_L1_INST, SZ_8K ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x08, CACHE_L1_INST, SZ_16K ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x09, CACHE_L1_INST, SZ_32K ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x0a, CACHE_L1_DATA, SZ_8K ), /* 2 way set assoc, 32 byte line size */ + CACHE_ENTRY(0x0c, CACHE_L1_DATA, SZ_16K ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x0d, CACHE_L1_DATA, SZ_16K ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x0e, CACHE_L1_DATA, SZ_24K ), /* 6-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x21, CACHE_L2, SZ_256K ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x22, CACHE_L3, SZ_512K ), /* 4-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x23, CACHE_L3, SZ_1M ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x25, CACHE_L3, SZ_2M ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x29, CACHE_L3, SZ_4M ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x2c, CACHE_L1_DATA, SZ_32K ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x30, CACHE_L1_INST, SZ_32K ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x39, CACHE_L2, SZ_128K ), /* 4-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x3a, CACHE_L2, SZ_192K ), /* 6-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x3b, CACHE_L2, SZ_128K ), /* 2-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x3c, CACHE_L2, SZ_256K ), /* 4-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x3d, CACHE_L2, SZ_384K ), /* 6-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x3e, CACHE_L2, SZ_512K ), /* 4-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x3f, CACHE_L2, SZ_256K ), /* 2-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x41, CACHE_L2, SZ_128K ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x42, CACHE_L2, SZ_256K ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x43, CACHE_L2, SZ_512K ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x44, CACHE_L2, SZ_1M ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x45, CACHE_L2, SZ_2M ), /* 4-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x46, CACHE_L3, SZ_4M ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x47, CACHE_L3, SZ_8M ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x48, CACHE_L2, SZ_3M ), /* 12-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x49, CACHE_L3, SZ_4M ), /* 16-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x4a, CACHE_L3, SZ_6M ), /* 12-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x4b, CACHE_L3, SZ_8M ), /* 16-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x4c, CACHE_L3, SZ_12M ), /* 12-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x4d, CACHE_L3, SZ_16M ), /* 16-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x4e, CACHE_L2, SZ_6M ), /* 24-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x60, CACHE_L1_DATA, SZ_16K ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x66, CACHE_L1_DATA, SZ_8K ), /* 4-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x67, CACHE_L1_DATA, SZ_16K ), /* 4-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x68, CACHE_L1_DATA, SZ_32K ), /* 4-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x78, CACHE_L2, SZ_1M ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x79, CACHE_L2, SZ_128K ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x7a, CACHE_L2, SZ_256K ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x7b, CACHE_L2, SZ_512K ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x7c, CACHE_L2, SZ_1M ), /* 8-way set assoc, sectored cache, 64 byte line size */ + CACHE_ENTRY(0x7d, CACHE_L2, SZ_2M ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x7f, CACHE_L2, SZ_512K ), /* 2-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x80, CACHE_L2, SZ_512K ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x82, CACHE_L2, SZ_256K ), /* 8-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x83, CACHE_L2, SZ_512K ), /* 8-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x84, CACHE_L2, SZ_1M ), /* 8-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x85, CACHE_L2, SZ_2M ), /* 8-way set assoc, 32 byte line size */ + CACHE_ENTRY(0x86, CACHE_L2, SZ_512K ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0x87, CACHE_L2, SZ_1M ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xd0, CACHE_L3, SZ_512K ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xd1, CACHE_L3, SZ_1M ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xd2, CACHE_L3, SZ_2M ), /* 4-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xd6, CACHE_L3, SZ_1M ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xd7, CACHE_L3, SZ_2M ), /* 8-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xd8, CACHE_L3, SZ_4M ), /* 12-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xdc, CACHE_L3, SZ_2M ), /* 12-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xdd, CACHE_L3, SZ_4M ), /* 12-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xde, CACHE_L3, SZ_8M ), /* 12-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xe2, CACHE_L3, SZ_2M ), /* 16-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xe3, CACHE_L3, SZ_4M ), /* 16-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xe4, CACHE_L3, SZ_8M ), /* 16-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xea, CACHE_L3, SZ_12M ), /* 24-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xeb, CACHE_L3, SZ_18M ), /* 24-way set assoc, 64 byte line size */ + CACHE_ENTRY(0xec, CACHE_L3, SZ_24M ), /* 24-way set assoc, 64 byte line size */ + + TLB_ENTRY( 0x01, TLB_INST_4K, 32 ), /* TLB_INST 4 KByte pages, 4-way set associative */ + TLB_ENTRY( 0x02, TLB_INST_4M, 2 ), /* TLB_INST 4 MByte pages, full associative */ + TLB_ENTRY( 0x03, TLB_DATA_4K, 64 ), /* TLB_DATA 4 KByte pages, 4-way set associative */ + TLB_ENTRY( 0x04, TLB_DATA_4M, 8 ), /* TLB_DATA 4 MByte pages, 4-way set associative */ + TLB_ENTRY( 0x05, TLB_DATA_4M, 32 ), /* TLB_DATA 4 MByte pages, 4-way set associative */ + TLB_ENTRY( 0x0b, TLB_INST_4M, 4 ), /* TLB_INST 4 MByte pages, 4-way set associative */ + TLB_ENTRY( 0x4f, TLB_INST_4K, 32 ), /* TLB_INST 4 KByte pages */ + TLB_ENTRY( 0x50, TLB_INST_ALL, 64 ), /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */ + TLB_ENTRY( 0x51, TLB_INST_ALL, 128 ), /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */ + TLB_ENTRY( 0x52, TLB_INST_ALL, 256 ), /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */ + TLB_ENTRY( 0x55, TLB_INST_2M_4M, 7 ), /* TLB_INST 2-MByte or 4-MByte pages, fully associative */ + TLB_ENTRY( 0x56, TLB_DATA0_4M, 16 ), /* TLB_DATA0 4 MByte pages, 4-way set associative */ + TLB_ENTRY( 0x57, TLB_DATA0_4K, 16 ), /* TLB_DATA0 4 KByte pages, 4-way associative */ + TLB_ENTRY( 0x59, TLB_DATA0_4K, 16 ), /* TLB_DATA0 4 KByte pages, fully associative */ + TLB_ENTRY( 0x5a, TLB_DATA0_2M_4M, 32 ), /* TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative */ + TLB_ENTRY( 0x5b, TLB_DATA_4K_4M, 64 ), /* TLB_DATA 4 KByte and 4 MByte pages */ + TLB_ENTRY( 0x5c, TLB_DATA_4K_4M, 128 ), /* TLB_DATA 4 KByte and 4 MByte pages */ + TLB_ENTRY( 0x5d, TLB_DATA_4K_4M, 256 ), /* TLB_DATA 4 KByte and 4 MByte pages */ + TLB_ENTRY( 0x61, TLB_INST_4K, 48 ), /* TLB_INST 4 KByte pages, full associative */ + TLB_ENTRY( 0x63, TLB_DATA_1G_2M_4M, 4 ), /* TLB_DATA 1 GByte pages, 4-way set associative + * (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here) */ + TLB_ENTRY( 0x6b, TLB_DATA_4K, 256 ), /* TLB_DATA 4 KByte pages, 8-way associative */ + TLB_ENTRY( 0x6c, TLB_DATA_2M_4M, 128 ), /* TLB_DATA 2 MByte or 4 MByte pages, 8-way associative */ + TLB_ENTRY( 0x6d, TLB_DATA_1G, 16 ), /* TLB_DATA 1 GByte pages, fully associative */ + TLB_ENTRY( 0x76, TLB_INST_2M_4M, 8 ), /* TLB_INST 2-MByte or 4-MByte pages, fully associative */ + TLB_ENTRY( 0xb0, TLB_INST_4K, 128 ), /* TLB_INST 4 KByte pages, 4-way set associative */ + TLB_ENTRY( 0xb1, TLB_INST_2M_4M, 4 ), /* TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries */ + TLB_ENTRY( 0xb2, TLB_INST_4K, 64 ), /* TLB_INST 4KByte pages, 4-way set associative */ + TLB_ENTRY( 0xb3, TLB_DATA_4K, 128 ), /* TLB_DATA 4 KByte pages, 4-way set associative */ + TLB_ENTRY( 0xb4, TLB_DATA_4K, 256 ), /* TLB_DATA 4 KByte pages, 4-way associative */ + TLB_ENTRY( 0xb5, TLB_INST_4K, 64 ), /* TLB_INST 4 KByte pages, 8-way set associative */ + TLB_ENTRY( 0xb6, TLB_INST_4K, 128 ), /* TLB_INST 4 KByte pages, 8-way set associative */ + TLB_ENTRY( 0xba, TLB_DATA_4K, 64 ), /* TLB_DATA 4 KByte pages, 4-way associative */ + TLB_ENTRY( 0xc0, TLB_DATA_4K_4M, 8 ), /* TLB_DATA 4 KByte and 4 MByte pages, 4-way associative */ + TLB_ENTRY( 0xc1, STLB_4K_2M, 1024 ), /* STLB 4 KByte and 2 MByte pages, 8-way associative */ + TLB_ENTRY( 0xc2, TLB_DATA_2M_4M, 16 ), /* TLB_DATA 2 MByte/4MByte pages, 4-way associative */ + TLB_ENTRY( 0xca, STLB_4K, 512 ), /* STLB 4 KByte pages, 4-way associative */ +}; diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 4a4118784c13..d69757246bde 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -4,6 +4,7 @@ #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/msr-index.h> +#include <asm/msr.h> #include <asm/processor.h> #include <asm/vmx.h> @@ -118,7 +119,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c) bool enable_vmx; u64 msr; - if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { + if (rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr)) { clear_cpu_cap(c, X86_FEATURE_VMX); clear_cpu_cap(c, X86_FEATURE_SGX); return; @@ -165,7 +166,7 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c) msr |= FEAT_CTL_SGX_LC_ENABLED; } - wrmsrl(MSR_IA32_FEAT_CTL, msr); + wrmsrq(MSR_IA32_FEAT_CTL, msr); update_caps: set_cpu_cap(c, X86_FEATURE_MSR_IA32_FEAT_CTL); diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 6af4a4a90a52..2154f12766fb 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,7 @@ #include <asm/cacheinfo.h> #include <asm/spec-ctrl.h> #include <asm/delay.h> +#include <asm/msr.h> #include "cpu.h" @@ -96,7 +97,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { u64 val; - rdmsrl(MSR_K7_HWCR, val); + rdmsrq(MSR_K7_HWCR, val); if (!(val & BIT(24))) pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); } @@ -110,7 +111,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) * Try to cache the base value so further operations can * avoid RMW. If that faults, do not enable SSBD. */ - if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { + if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); setup_force_cpu_cap(X86_FEATURE_SSBD); x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; @@ -194,7 +195,7 @@ static void init_hygon(struct cpuinfo_x86 *c) init_hygon_cacheinfo(c); if (cpu_has(c, X86_FEATURE_SVM)) { - rdmsrl(MSR_VM_CR, vm_cr); + rdmsrq(MSR_VM_CR, vm_cr); if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); clear_cpu_cap(c, X86_FEATURE_SVM); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index cdc9813871ef..076eaa41b8c8 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -6,6 +6,7 @@ #include <linux/minmax.h> #include <linux/smp.h> #include <linux/string.h> +#include <linux/types.h> #ifdef CONFIG_X86_64 #include <linux/topology.h> @@ -15,6 +16,7 @@ #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> #include <asm/cpu.h> +#include <asm/cpuid/api.h> #include <asm/hwcap2.h> #include <asm/intel-family.h> #include <asm/microcode.h> @@ -157,7 +159,7 @@ static void detect_tme_early(struct cpuinfo_x86 *c) u64 tme_activate; int keyid_bits; - rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); + rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate); if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { pr_info_once("x86/tme: not enabled by BIOS\n"); @@ -299,7 +301,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) * string flag and enhanced fast string capabilities accordingly. */ if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) { - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { /* X86_FEATURE_ERMS is set based on CPUID */ set_cpu_cap(c, X86_FEATURE_REP_GOOD); @@ -488,7 +490,7 @@ static void init_cpuid_fault(struct cpuinfo_x86 *c) { u64 msr; - if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) { + if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) { if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); } @@ -498,7 +500,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c) { u64 msr; - if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) + if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr)) return; /* Clear all MISC features */ @@ -509,7 +511,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c) probe_xeon_phi_r3mwait(c); msr = this_cpu_read(msr_misc_features_shadow); - wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); + wrmsrq(MSR_MISC_FEATURES_ENABLES, msr); } /* @@ -646,103 +648,11 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) } #endif -#define TLB_INST_4K 0x01 -#define TLB_INST_4M 0x02 -#define TLB_INST_2M_4M 0x03 - -#define TLB_INST_ALL 0x05 -#define TLB_INST_1G 0x06 - -#define TLB_DATA_4K 0x11 -#define TLB_DATA_4M 0x12 -#define TLB_DATA_2M_4M 0x13 -#define TLB_DATA_4K_4M 0x14 - -#define TLB_DATA_1G 0x16 -#define TLB_DATA_1G_2M_4M 0x17 - -#define TLB_DATA0_4K 0x21 -#define TLB_DATA0_4M 0x22 -#define TLB_DATA0_2M_4M 0x23 - -#define STLB_4K 0x41 -#define STLB_4K_2M 0x42 - -/* - * All of leaf 0x2's one-byte TLB descriptors implies the same number of - * entries for their respective TLB types. The 0x63 descriptor is an - * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries - * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for - * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the - * intel_tlb_table[] mapping. - */ -#define TLB_0x63_2M_4M_ENTRIES 32 - -struct _tlb_table { - unsigned char descriptor; - char tlb_type; - unsigned int entries; -}; - -static const struct _tlb_table intel_tlb_table[] = { - { 0x01, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages, 4-way set associative */ - { 0x02, TLB_INST_4M, 2}, /* TLB_INST 4 MByte pages, full associative */ - { 0x03, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way set associative */ - { 0x04, TLB_DATA_4M, 8}, /* TLB_DATA 4 MByte pages, 4-way set associative */ - { 0x05, TLB_DATA_4M, 32}, /* TLB_DATA 4 MByte pages, 4-way set associative */ - { 0x0b, TLB_INST_4M, 4}, /* TLB_INST 4 MByte pages, 4-way set associative */ - { 0x4f, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages */ - { 0x50, TLB_INST_ALL, 64}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */ - { 0x51, TLB_INST_ALL, 128}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */ - { 0x52, TLB_INST_ALL, 256}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */ - { 0x55, TLB_INST_2M_4M, 7}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */ - { 0x56, TLB_DATA0_4M, 16}, /* TLB_DATA0 4 MByte pages, 4-way set associative */ - { 0x57, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, 4-way associative */ - { 0x59, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, fully associative */ - { 0x5a, TLB_DATA0_2M_4M, 32}, /* TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative */ - { 0x5b, TLB_DATA_4K_4M, 64}, /* TLB_DATA 4 KByte and 4 MByte pages */ - { 0x5c, TLB_DATA_4K_4M, 128}, /* TLB_DATA 4 KByte and 4 MByte pages */ - { 0x5d, TLB_DATA_4K_4M, 256}, /* TLB_DATA 4 KByte and 4 MByte pages */ - { 0x61, TLB_INST_4K, 48}, /* TLB_INST 4 KByte pages, full associative */ - { 0x63, TLB_DATA_1G_2M_4M, 4}, /* TLB_DATA 1 GByte pages, 4-way set associative - * (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here) */ - { 0x6b, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 8-way associative */ - { 0x6c, TLB_DATA_2M_4M, 128}, /* TLB_DATA 2 MByte or 4 MByte pages, 8-way associative */ - { 0x6d, TLB_DATA_1G, 16}, /* TLB_DATA 1 GByte pages, fully associative */ - { 0x76, TLB_INST_2M_4M, 8}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */ - { 0xb0, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 4-way set associative */ - { 0xb1, TLB_INST_2M_4M, 4}, /* TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries */ - { 0xb2, TLB_INST_4K, 64}, /* TLB_INST 4KByte pages, 4-way set associative */ - { 0xb3, TLB_DATA_4K, 128}, /* TLB_DATA 4 KByte pages, 4-way set associative */ - { 0xb4, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 4-way associative */ - { 0xb5, TLB_INST_4K, 64}, /* TLB_INST 4 KByte pages, 8-way set associative */ - { 0xb6, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 8-way set associative */ - { 0xba, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way associative */ - { 0xc0, TLB_DATA_4K_4M, 8}, /* TLB_DATA 4 KByte and 4 MByte pages, 4-way associative */ - { 0xc1, STLB_4K_2M, 1024}, /* STLB 4 KByte and 2 MByte pages, 8-way associative */ - { 0xc2, TLB_DATA_2M_4M, 16}, /* TLB_DATA 2 MByte/4MByte pages, 4-way associative */ - { 0xca, STLB_4K, 512}, /* STLB 4 KByte pages, 4-way associative */ - { 0x00, 0, 0 } -}; - -static void intel_tlb_lookup(const unsigned char desc) +static void intel_tlb_lookup(const struct leaf_0x2_table *desc) { - unsigned int entries; - unsigned char k; - - if (desc == 0) - return; - - /* look up this descriptor in the table */ - for (k = 0; intel_tlb_table[k].descriptor != desc && - intel_tlb_table[k].descriptor != 0; k++) - ; + short entries = desc->entries; - if (intel_tlb_table[k].tlb_type == 0) - return; - - entries = intel_tlb_table[k].entries; - switch (intel_tlb_table[k].tlb_type) { + switch (desc->t_type) { case STLB_4K: tlb_lli_4k = max(tlb_lli_4k, entries); tlb_lld_4k = max(tlb_lld_4k, entries); @@ -799,28 +709,16 @@ static void intel_tlb_lookup(const unsigned char desc) static void intel_detect_tlb(struct cpuinfo_x86 *c) { - int i, j, n; - unsigned int regs[4]; - unsigned char *desc = (unsigned char *)regs; + const struct leaf_0x2_table *desc; + union leaf_0x2_regs regs; + u8 *ptr; if (c->cpuid_level < 2) return; - /* Number of times to iterate */ - n = cpuid_eax(2) & 0xFF; - - for (i = 0 ; i < n ; i++) { - cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); - - /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 4 ; j++) - if (regs[j] & (1 << 31)) - regs[j] = 0; - - /* Byte 0 is level count, not a descriptor */ - for (j = 1 ; j < 16 ; j++) - intel_tlb_lookup(desc[j]); - } + cpuid_leaf_0x2(®s); + for_each_cpuid_0x2_desc(regs, ptr, desc) + intel_tlb_lookup(desc); } static const struct cpu_dev intel_cpu_dev = { diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c index 30b1d63b97f3..bc7671f920a7 100644 --- a/arch/x86/kernel/cpu/intel_epb.c +++ b/arch/x86/kernel/cpu/intel_epb.c @@ -79,7 +79,7 @@ static int intel_epb_save(void) { u64 epb; - rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); + rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb); /* * Ensure that saved_epb will always be nonzero after this write even if * the EPB value read from the MSR is 0. @@ -94,7 +94,7 @@ static void intel_epb_restore(void) u64 val = this_cpu_read(saved_epb); u64 epb; - rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); + rdmsrq(MSR_IA32_ENERGY_PERF_BIAS, epb); if (val) { val &= EPB_MASK; } else { @@ -111,7 +111,7 @@ static void intel_epb_restore(void) pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); } } - wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); + wrmsrq(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); } static struct syscore_ops intel_epb_syscore_ops = { @@ -135,7 +135,7 @@ static ssize_t energy_perf_bias_show(struct device *dev, u64 epb; int ret; - ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret < 0) return ret; @@ -157,11 +157,11 @@ static ssize_t energy_perf_bias_store(struct device *dev, else if (kstrtou64(buf, 0, &val) || val > MAX_EPB) return -EINVAL; - ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret < 0) return ret; - ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, + ret = wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); if (ret < 0) return ret; diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 1075a90141da..9d852c3b2cb5 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -662,12 +662,12 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) return; } - rdmsrl(MSR_K7_HWCR, hwcr); + rdmsrq(MSR_K7_HWCR, hwcr); /* McStatusWrEn has to be set */ need_toggle = !(hwcr & BIT(18)); if (need_toggle) - wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); + wrmsrq(MSR_K7_HWCR, hwcr | BIT(18)); /* Clear CntP bit safely */ for (i = 0; i < num_msrs; i++) @@ -675,7 +675,7 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) /* restore old settings */ if (need_toggle) - wrmsrl(MSR_K7_HWCR, hwcr); + wrmsrq(MSR_K7_HWCR, hwcr); } /* cpu init entry point, called from mce.c with preempt off */ @@ -805,12 +805,12 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) } if (mce_flags.smca) { - rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid); + rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), m->ipid); if (m->status & MCI_STATUS_SYNDV) { - rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd); - rdmsrl(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1); - rdmsrl(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2); + rdmsrq(MSR_AMD64_SMCA_MCx_SYND(bank), m->synd); + rdmsrq(MSR_AMD64_SMCA_MCx_SYND1(bank), err.vendor.amd.synd1); + rdmsrq(MSR_AMD64_SMCA_MCx_SYND2(bank), err.vendor.amd.synd2); } } @@ -834,16 +834,16 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) { u64 status, addr = 0; - rdmsrl(msr_stat, status); + rdmsrq(msr_stat, status); if (!(status & MCI_STATUS_VAL)) return false; if (status & MCI_STATUS_ADDRV) - rdmsrl(msr_addr, addr); + rdmsrq(msr_addr, addr); __log_error(bank, status, addr, misc); - wrmsrl(msr_stat, 0); + wrmsrq(msr_stat, 0); return status & MCI_STATUS_DEFERRED; } @@ -862,7 +862,7 @@ static bool _log_error_deferred(unsigned int bank, u32 misc) return true; /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); + wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); return true; } diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index f6fd71b64b66..e9b3c5d4a52e 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -121,7 +121,7 @@ void mce_prep_record_common(struct mce *m) { m->cpuid = cpuid_eax(1); m->cpuvendor = boot_cpu_data.x86_vendor; - m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); + m->mcgcap = native_rdmsrq(MSR_IA32_MCG_CAP); /* need the internal __ version to avoid deadlocks */ m->time = __ktime_get_real_seconds(); } @@ -388,9 +388,9 @@ void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) } /* MSR access wrappers used for error injection */ -noinstr u64 mce_rdmsrl(u32 msr) +noinstr u64 mce_rdmsrq(u32 msr) { - DECLARE_ARGS(val, low, high); + EAX_EDX_DECLARE_ARGS(val, low, high); if (__this_cpu_read(injectm.finished)) { int offset; @@ -423,7 +423,7 @@ noinstr u64 mce_rdmsrl(u32 msr) return EAX_EDX_VAL(val, low, high); } -static noinstr void mce_wrmsrl(u32 msr, u64 v) +static noinstr void mce_wrmsrq(u32 msr, u64 v) { u32 low, high; @@ -444,7 +444,7 @@ static noinstr void mce_wrmsrl(u32 msr, u64 v) low = (u32)v; high = (u32)(v >> 32); - /* See comment in mce_rdmsrl() */ + /* See comment in mce_rdmsrq() */ asm volatile("1: wrmsr\n" "2:\n" _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE) @@ -468,7 +468,7 @@ static noinstr void mce_gather_info(struct mce_hw_err *err, struct pt_regs *regs instrumentation_end(); m = &err->m; - m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); + m->mcgstatus = mce_rdmsrq(MSR_IA32_MCG_STATUS); if (regs) { /* * Get the address of the instruction at the time of @@ -488,7 +488,7 @@ static noinstr void mce_gather_info(struct mce_hw_err *err, struct pt_regs *regs } /* Use accurate RIP reporting if available. */ if (mca_cfg.rip_msr) - m->ip = mce_rdmsrl(mca_cfg.rip_msr); + m->ip = mce_rdmsrq(mca_cfg.rip_msr); } } @@ -684,10 +684,10 @@ static noinstr void mce_read_aux(struct mce_hw_err *err, int i) struct mce *m = &err->m; if (m->status & MCI_STATUS_MISCV) - m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC)); + m->misc = mce_rdmsrq(mca_msr_reg(i, MCA_MISC)); if (m->status & MCI_STATUS_ADDRV) { - m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR)); + m->addr = mce_rdmsrq(mca_msr_reg(i, MCA_ADDR)); /* * Mask the reported address by the reported granularity. @@ -702,12 +702,12 @@ static noinstr void mce_read_aux(struct mce_hw_err *err, int i) } if (mce_flags.smca) { - m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i)); + m->ipid = mce_rdmsrq(MSR_AMD64_SMCA_MCx_IPID(i)); if (m->status & MCI_STATUS_SYNDV) { - m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i)); - err->vendor.amd.synd1 = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND1(i)); - err->vendor.amd.synd2 = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND2(i)); + m->synd = mce_rdmsrq(MSR_AMD64_SMCA_MCx_SYND(i)); + err->vendor.amd.synd1 = mce_rdmsrq(MSR_AMD64_SMCA_MCx_SYND1(i)); + err->vendor.amd.synd2 = mce_rdmsrq(MSR_AMD64_SMCA_MCx_SYND2(i)); } } } @@ -753,7 +753,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) m->bank = i; barrier(); - m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); + m->status = mce_rdmsrq(mca_msr_reg(i, MCA_STATUS)); /* * Update storm tracking here, before checking for the @@ -829,7 +829,7 @@ clear_it: /* * Clear state for this bank. */ - mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); + mce_wrmsrq(mca_msr_reg(i, MCA_STATUS), 0); } /* @@ -887,8 +887,8 @@ quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) */ static noinstr bool quirk_skylake_repmov(void) { - u64 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); - u64 misc_enable = mce_rdmsrl(MSR_IA32_MISC_ENABLE); + u64 mcgstatus = mce_rdmsrq(MSR_IA32_MCG_STATUS); + u64 misc_enable = mce_rdmsrq(MSR_IA32_MISC_ENABLE); u64 mc1_status; /* @@ -899,7 +899,7 @@ static noinstr bool quirk_skylake_repmov(void) !(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) return false; - mc1_status = mce_rdmsrl(MSR_IA32_MCx_STATUS(1)); + mc1_status = mce_rdmsrq(MSR_IA32_MCx_STATUS(1)); /* Check for a software-recoverable data fetch error. */ if ((mc1_status & @@ -910,8 +910,8 @@ static noinstr bool quirk_skylake_repmov(void) MCI_STATUS_ADDRV | MCI_STATUS_MISCV | MCI_STATUS_AR | MCI_STATUS_S)) { misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; - mce_wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - mce_wrmsrl(MSR_IA32_MCx_STATUS(1), 0); + mce_wrmsrq(MSR_IA32_MISC_ENABLE, misc_enable); + mce_wrmsrq(MSR_IA32_MCx_STATUS(1), 0); instrumentation_begin(); pr_err_once("Erratum detected, disable fast string copy instructions.\n"); @@ -955,7 +955,7 @@ static __always_inline int mce_no_way_out(struct mce_hw_err *err, char **msg, un int i; for (i = 0; i < this_cpu_read(mce_num_banks); i++) { - m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); + m->status = mce_rdmsrq(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; @@ -1274,7 +1274,7 @@ static __always_inline void mce_clear_state(unsigned long *toclear) for (i = 0; i < this_cpu_read(mce_num_banks); i++) { if (arch_test_bit(i, toclear)) - mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); + mce_wrmsrq(mca_msr_reg(i, MCA_STATUS), 0); } } @@ -1298,7 +1298,7 @@ static noinstr bool mce_check_crashing_cpu(void) (crashing_cpu != -1 && crashing_cpu != cpu)) { u64 mcgstatus; - mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); + mcgstatus = native_rdmsrq(MSR_IA32_MCG_STATUS); if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) @@ -1306,7 +1306,7 @@ static noinstr bool mce_check_crashing_cpu(void) } if (mcgstatus & MCG_STATUS_RIPV) { - __wrmsr(MSR_IA32_MCG_STATUS, 0, 0); + native_wrmsrq(MSR_IA32_MCG_STATUS, 0); return true; } } @@ -1335,7 +1335,7 @@ __mc_scan_banks(struct mce_hw_err *err, struct pt_regs *regs, m->addr = 0; m->bank = i; - m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); + m->status = mce_rdmsrq(mca_msr_reg(i, MCA_STATUS)); if (!(m->status & MCI_STATUS_VAL)) continue; @@ -1693,7 +1693,7 @@ out: instrumentation_end(); clear: - mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); + mce_wrmsrq(MSR_IA32_MCG_STATUS, 0); } EXPORT_SYMBOL_GPL(do_machine_check); @@ -1822,7 +1822,7 @@ static void __mcheck_cpu_cap_init(void) u64 cap; u8 b; - rdmsrl(MSR_IA32_MCG_CAP, cap); + rdmsrq(MSR_IA32_MCG_CAP, cap); b = cap & MCG_BANKCNT_MASK; @@ -1863,7 +1863,7 @@ static void __mcheck_cpu_init_generic(void) cr4_set_bits(X86_CR4_MCE); - rdmsrl(MSR_IA32_MCG_CAP, cap); + rdmsrq(MSR_IA32_MCG_CAP, cap); if (cap & MCG_CTL_P) wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); } @@ -1878,8 +1878,8 @@ static void __mcheck_cpu_init_clear_banks(void) if (!b->init) continue; - wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); - wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); + wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl); + wrmsrq(mca_msr_reg(i, MCA_STATUS), 0); } } @@ -1905,7 +1905,7 @@ static void __mcheck_cpu_check_banks(void) if (!b->init) continue; - rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); + rdmsrq(mca_msr_reg(i, MCA_CTL), msrval); b->init = !!msrval; } } @@ -2436,7 +2436,7 @@ static void mce_disable_error_reporting(void) struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(mca_msr_reg(i, MCA_CTL), 0); + wrmsrq(mca_msr_reg(i, MCA_CTL), 0); } return; } @@ -2786,7 +2786,7 @@ static void mce_reenable_cpu(void) struct mce_bank *b = &mce_banks[i]; if (b->init) - wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); + wrmsrq(mca_msr_reg(i, MCA_CTL), b->ctl); } } diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 06e3cf7229ce..d02c4f556cd0 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -24,10 +24,11 @@ #include <linux/pci.h> #include <linux/uaccess.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/apic.h> #include <asm/irq_vectors.h> #include <asm/mce.h> +#include <asm/msr.h> #include <asm/nmi.h> #include <asm/smp.h> @@ -475,27 +476,27 @@ static void prepare_msrs(void *info) struct mce m = *(struct mce *)info; u8 b = m.bank; - wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); + wrmsrq(MSR_IA32_MCG_STATUS, m.mcgstatus); if (boot_cpu_has(X86_FEATURE_SMCA)) { if (m.inject_flags == DFR_INT_INJ) { - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status); - wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr); + wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status); + wrmsrq(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr); } else { - wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status); - wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr); + wrmsrq(MSR_AMD64_SMCA_MCx_STATUS(b), m.status); + wrmsrq(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr); } - wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd); + wrmsrq(MSR_AMD64_SMCA_MCx_SYND(b), m.synd); if (m.misc) - wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc); + wrmsrq(MSR_AMD64_SMCA_MCx_MISC(b), m.misc); } else { - wrmsrl(MSR_IA32_MCx_STATUS(b), m.status); - wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr); + wrmsrq(MSR_IA32_MCx_STATUS(b), m.status); + wrmsrq(MSR_IA32_MCx_ADDR(b), m.addr); if (m.misc) - wrmsrl(MSR_IA32_MCx_MISC(b), m.misc); + wrmsrq(MSR_IA32_MCx_MISC(b), m.misc); } } @@ -589,7 +590,7 @@ static int inj_bank_set(void *data, u64 val) u64 cap; /* Get bank count on target CPU so we can handle non-uniform values. */ - rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap); + rdmsrq_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap); n_banks = cap & MCG_BANKCNT_MASK; if (val >= n_banks) { @@ -613,7 +614,7 @@ static int inj_bank_set(void *data, u64 val) if (cpu_feature_enabled(X86_FEATURE_SMCA)) { u64 ipid; - if (rdmsrl_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) { + if (rdmsrq_on_cpu(m->extcpu, MSR_AMD64_SMCA_MCx_IPID(val), &ipid)) { pr_err("Error reading IPID on CPU%d\n", m->extcpu); return -EINVAL; } @@ -741,15 +742,15 @@ static void check_hw_inj_possible(void) u64 status = MCI_STATUS_VAL, ipid; /* Check whether bank is populated */ - rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), ipid); + rdmsrq(MSR_AMD64_SMCA_MCx_IPID(bank), ipid); if (!ipid) continue; toggle_hw_mce_inject(cpu, true); - wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status); - rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status); - wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0); + wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), status); + rdmsrq_safe(mca_msr_reg(bank, MCA_STATUS), &status); + wrmsrq_safe(mca_msr_reg(bank, MCA_STATUS), 0); if (!status) { hw_injection_possible = false; diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index f863df0ff42c..efcf21e9552e 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -94,7 +94,7 @@ static bool cmci_supported(int *banks) if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) return false; - rdmsrl(MSR_IA32_MCG_CAP, cap); + rdmsrq(MSR_IA32_MCG_CAP, cap); *banks = min_t(unsigned, MAX_NR_BANKS, cap & MCG_BANKCNT_MASK); return !!(cap & MCG_CMCI_P); } @@ -106,7 +106,7 @@ static bool lmce_supported(void) if (mca_cfg.lmce_disabled) return false; - rdmsrl(MSR_IA32_MCG_CAP, tmp); + rdmsrq(MSR_IA32_MCG_CAP, tmp); /* * LMCE depends on recovery support in the processor. Hence both @@ -123,7 +123,7 @@ static bool lmce_supported(void) * WARN if the MSR isn't locked as init_ia32_feat_ctl() unconditionally * locks the MSR in the event that it wasn't already locked by BIOS. */ - rdmsrl(MSR_IA32_FEAT_CTL, tmp); + rdmsrq(MSR_IA32_FEAT_CTL, tmp); if (WARN_ON_ONCE(!(tmp & FEAT_CTL_LOCKED))) return false; @@ -141,9 +141,9 @@ static void cmci_set_threshold(int bank, int thresh) u64 val; raw_spin_lock_irqsave(&cmci_discover_lock, flags); - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); + rdmsrq(MSR_IA32_MCx_CTL2(bank), val); val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; - wrmsrl(MSR_IA32_MCx_CTL2(bank), val | thresh); + wrmsrq(MSR_IA32_MCx_CTL2(bank), val | thresh); raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } @@ -184,7 +184,7 @@ static bool cmci_skip_bank(int bank, u64 *val) if (test_bit(bank, mce_banks_ce_disabled)) return true; - rdmsrl(MSR_IA32_MCx_CTL2(bank), *val); + rdmsrq(MSR_IA32_MCx_CTL2(bank), *val); /* Already owned by someone else? */ if (*val & MCI_CTL2_CMCI_EN) { @@ -232,8 +232,8 @@ static void cmci_claim_bank(int bank, u64 val, int bios_zero_thresh, int *bios_w struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc); val |= MCI_CTL2_CMCI_EN; - wrmsrl(MSR_IA32_MCx_CTL2(bank), val); - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); + wrmsrq(MSR_IA32_MCx_CTL2(bank), val); + rdmsrq(MSR_IA32_MCx_CTL2(bank), val); /* If the enable bit did not stick, this bank should be polled. */ if (!(val & MCI_CTL2_CMCI_EN)) { @@ -324,9 +324,9 @@ static void __cmci_disable_bank(int bank) if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) return; - rdmsrl(MSR_IA32_MCx_CTL2(bank), val); + rdmsrq(MSR_IA32_MCx_CTL2(bank), val); val &= ~MCI_CTL2_CMCI_EN; - wrmsrl(MSR_IA32_MCx_CTL2(bank), val); + wrmsrq(MSR_IA32_MCx_CTL2(bank), val); __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); if ((val & MCI_CTL2_CMCI_THRESHOLD_MASK) == CMCI_STORM_THRESHOLD) @@ -430,10 +430,10 @@ void intel_init_lmce(void) if (!lmce_supported()) return; - rdmsrl(MSR_IA32_MCG_EXT_CTL, val); + rdmsrq(MSR_IA32_MCG_EXT_CTL, val); if (!(val & MCG_EXT_CTL_LMCE_EN)) - wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); + wrmsrq(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); } void intel_clear_lmce(void) @@ -443,9 +443,9 @@ void intel_clear_lmce(void) if (!lmce_supported()) return; - rdmsrl(MSR_IA32_MCG_EXT_CTL, val); + rdmsrq(MSR_IA32_MCG_EXT_CTL, val); val &= ~MCG_EXT_CTL_LMCE_EN; - wrmsrl(MSR_IA32_MCG_EXT_CTL, val); + wrmsrq(MSR_IA32_MCG_EXT_CTL, val); } /* @@ -460,10 +460,10 @@ static void intel_imc_init(struct cpuinfo_x86 *c) case INTEL_SANDYBRIDGE_X: case INTEL_IVYBRIDGE_X: case INTEL_HASWELL_X: - if (rdmsrl_safe(MSR_ERROR_CONTROL, &error_control)) + if (rdmsrq_safe(MSR_ERROR_CONTROL, &error_control)) return; error_control |= 2; - wrmsrl_safe(MSR_ERROR_CONTROL, error_control); + wrmsrq_safe(MSR_ERROR_CONTROL, error_control); break; } } diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 95a504ece43e..b5ba598e54cb 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -312,7 +312,7 @@ static __always_inline void pentium_machine_check(struct pt_regs *regs) {} static __always_inline void winchip_machine_check(struct pt_regs *regs) {} #endif -noinstr u64 mce_rdmsrl(u32 msr); +noinstr u64 mce_rdmsrq(u32 msr); static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg) { diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 96cb992d50ef..097e39327942 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -217,7 +217,6 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi { struct patch_digest *pd = NULL; u8 digest[SHA256_DIGEST_SIZE]; - struct sha256_state s; int i; if (x86_family(bsp_cpuid_1_eax) < 0x17) @@ -235,9 +234,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi return false; } - sha256_init(&s); - sha256_update(&s, data, len); - sha256_final(&s, digest); + sha256(data, len, digest); if (memcmp(digest, pd->sha256, sizeof(digest))) { pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id); @@ -607,7 +604,7 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev, if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize)) return false; - native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr); + native_wrmsrq(MSR_AMD64_PATCH_LOADER, p_addr); if (x86_family(bsp_cpuid_1_eax) == 0x17) { unsigned long p_addr_end = p_addr + psize - 1; @@ -1178,11 +1175,18 @@ static void microcode_fini_cpu_amd(int cpu) uci->mc = NULL; } +static void finalize_late_load_amd(int result) +{ + if (result) + cleanup(); +} + static struct microcode_ops microcode_amd_ops = { .request_microcode_fw = request_microcode_amd, .collect_cpu_info = collect_cpu_info_amd, .apply_microcode = apply_microcode_amd, .microcode_fini_cpu = microcode_fini_cpu_amd, + .finalize_late_load = finalize_late_load_amd, .nmi_safe = true, }; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 079f046ee26d..fe50eb5b7c4a 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -37,6 +37,7 @@ #include <asm/perf_event.h> #include <asm/processor.h> #include <asm/cmdline.h> +#include <asm/msr.h> #include <asm/setup.h> #include "internal.h" @@ -117,7 +118,7 @@ bool __init microcode_loader_disabled(void) * 3) Certain AMD patch levels are not allowed to be * overwritten. */ - if (!have_cpuid_p() || + if (!cpuid_feature() || native_cpuid_ecx(1) & BIT(31) || amd_check_current_patch_level()) dis_ucode_ldr = true; @@ -696,6 +697,8 @@ static int load_late_locked(void) return load_late_stop_cpus(true); case UCODE_NFOUND: return -ENOENT; + case UCODE_OK: + return 0; default: return -EBADFD; } diff --git a/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h b/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h new file mode 100644 index 000000000000..cb6e601701ab --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h @@ -0,0 +1,150 @@ +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x03, .steppings = 0x0004, .driver_data = 0x2 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0001, .driver_data = 0x45 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0002, .driver_data = 0x40 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0004, .driver_data = 0x2c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0008, .driver_data = 0x10 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0001, .driver_data = 0xa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0020, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0400, .driver_data = 0xd }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x2000, .driver_data = 0x7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0002, .driver_data = 0x14 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0004, .driver_data = 0x38 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0008, .driver_data = 0x2e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0002, .driver_data = 0x11 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0008, .driver_data = 0x8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0040, .driver_data = 0xc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0400, .driver_data = 0x5 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x09, .steppings = 0x0020, .driver_data = 0x47 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0a, .steppings = 0x0001, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0a, .steppings = 0x0002, .driver_data = 0x1 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0b, .steppings = 0x0002, .driver_data = 0x1d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0b, .steppings = 0x0010, .driver_data = 0x2 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0d, .steppings = 0x0040, .driver_data = 0x18 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0e, .steppings = 0x0100, .driver_data = 0x39 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0e, .steppings = 0x1000, .driver_data = 0x59 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0004, .driver_data = 0x5d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0040, .driver_data = 0xd2 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0080, .driver_data = 0x6b }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0400, .driver_data = 0x95 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0800, .driver_data = 0xbc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x2000, .driver_data = 0xa4 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x16, .steppings = 0x0002, .driver_data = 0x44 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0040, .driver_data = 0x60f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0080, .driver_data = 0x70a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0400, .driver_data = 0xa0b }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1a, .steppings = 0x0010, .driver_data = 0x12 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1a, .steppings = 0x0020, .driver_data = 0x1d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1c, .steppings = 0x0004, .driver_data = 0x219 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1c, .steppings = 0x0400, .driver_data = 0x107 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1d, .steppings = 0x0002, .driver_data = 0x29 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1e, .steppings = 0x0020, .driver_data = 0xa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x25, .steppings = 0x0004, .driver_data = 0x11 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x25, .steppings = 0x0020, .driver_data = 0x7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x26, .steppings = 0x0002, .driver_data = 0x105 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2a, .steppings = 0x0080, .driver_data = 0x2f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2c, .steppings = 0x0004, .driver_data = 0x1f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2d, .steppings = 0x0040, .driver_data = 0x621 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2d, .steppings = 0x0080, .driver_data = 0x71a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2e, .steppings = 0x0040, .driver_data = 0xd }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2f, .steppings = 0x0004, .driver_data = 0x3b }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x37, .steppings = 0x0100, .driver_data = 0x838 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x37, .steppings = 0x0200, .driver_data = 0x90d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3a, .steppings = 0x0200, .driver_data = 0x21 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3c, .steppings = 0x0008, .driver_data = 0x28 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3d, .steppings = 0x0010, .driver_data = 0x2f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0010, .driver_data = 0x42e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0040, .driver_data = 0x600 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0080, .driver_data = 0x715 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3f, .steppings = 0x0004, .driver_data = 0x49 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3f, .steppings = 0x0010, .driver_data = 0x1a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x45, .steppings = 0x0002, .driver_data = 0x26 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x46, .steppings = 0x0002, .driver_data = 0x1c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x47, .steppings = 0x0002, .driver_data = 0x22 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4c, .steppings = 0x0008, .driver_data = 0x368 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4c, .steppings = 0x0010, .driver_data = 0x411 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4d, .steppings = 0x0100, .driver_data = 0x12d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4e, .steppings = 0x0008, .driver_data = 0xf0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0008, .driver_data = 0x1000191 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0010, .driver_data = 0x2007006 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0020, .driver_data = 0x3000010 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0040, .driver_data = 0x4003605 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0080, .driver_data = 0x5003707 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0800, .driver_data = 0x7002904 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0004, .driver_data = 0x1c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0008, .driver_data = 0x700001c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0010, .driver_data = 0xf00001a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0020, .driver_data = 0xe000015 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0004, .driver_data = 0x14 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0200, .driver_data = 0x48 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0400, .driver_data = 0x28 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5e, .steppings = 0x0008, .driver_data = 0xf0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5f, .steppings = 0x0002, .driver_data = 0x3e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x66, .steppings = 0x0008, .driver_data = 0x2a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6a, .steppings = 0x0020, .driver_data = 0xc0002f0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6a, .steppings = 0x0040, .driver_data = 0xd0003e7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6c, .steppings = 0x0002, .driver_data = 0x10002b0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7a, .steppings = 0x0002, .driver_data = 0x42 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7a, .steppings = 0x0100, .driver_data = 0x24 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7e, .steppings = 0x0020, .driver_data = 0xc6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8a, .steppings = 0x0002, .driver_data = 0x33 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8c, .steppings = 0x0002, .driver_data = 0xb8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8c, .steppings = 0x0004, .driver_data = 0x38 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8d, .steppings = 0x0002, .driver_data = 0x52 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0200, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0400, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0800, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x1000, .driver_data = 0xfc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0100, .driver_data = 0x2c000390 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0080, .driver_data = 0x2b000603 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0040, .driver_data = 0x2c000390 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0020, .driver_data = 0x2c000390 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0010, .driver_data = 0x2c000390 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x96, .steppings = 0x0002, .driver_data = 0x1a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x97, .steppings = 0x0004, .driver_data = 0x37 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x97, .steppings = 0x0020, .driver_data = 0x37 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0004, .driver_data = 0x37 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0020, .driver_data = 0x37 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9a, .steppings = 0x0008, .driver_data = 0x435 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9a, .steppings = 0x0010, .driver_data = 0x435 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9c, .steppings = 0x0001, .driver_data = 0x24000026 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0200, .driver_data = 0xf8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0400, .driver_data = 0xf8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0800, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x1000, .driver_data = 0xf8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x2000, .driver_data = 0x100 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0004, .driver_data = 0xfc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0008, .driver_data = 0xfc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0020, .driver_data = 0xfc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa6, .steppings = 0x0001, .driver_data = 0xfe }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa6, .steppings = 0x0002, .driver_data = 0xfc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa7, .steppings = 0x0002, .driver_data = 0x62 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xaa, .steppings = 0x0010, .driver_data = 0x20 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xb7, .steppings = 0x0002, .driver_data = 0x12b }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0004, .driver_data = 0x4123 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0008, .driver_data = 0x4123 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0100, .driver_data = 0x4123 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbe, .steppings = 0x0001, .driver_data = 0x1a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xcf, .steppings = 0x0004, .driver_data = 0x21000283 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xcf, .steppings = 0x0002, .driver_data = 0x21000283 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x00, .steppings = 0x0080, .driver_data = 0x12 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x00, .steppings = 0x0400, .driver_data = 0x15 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x01, .steppings = 0x0004, .driver_data = 0x2e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0010, .driver_data = 0x21 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0020, .driver_data = 0x2c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0040, .driver_data = 0x10 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0080, .driver_data = 0x39 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0200, .driver_data = 0x2f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0004, .driver_data = 0xa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0008, .driver_data = 0xc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0010, .driver_data = 0x17 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0002, .driver_data = 0x17 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0008, .driver_data = 0x5 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0010, .driver_data = 0x6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0080, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0100, .driver_data = 0xe }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0200, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0400, .driver_data = 0x4 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0004, .driver_data = 0xf }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0010, .driver_data = 0x4 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0020, .driver_data = 0x8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0100, .driver_data = 0x9 }, diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 2a397da43923..371ca6eac00e 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -320,7 +320,7 @@ static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci, } /* write microcode via MSR 0x79 */ - native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); + native_wrmsrq(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 3e2533954675..c78f860419d6 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -30,6 +30,7 @@ #include <asm/reboot.h> #include <asm/nmi.h> #include <clocksource/hyperv_timer.h> +#include <asm/msr.h> #include <asm/numa.h> #include <asm/svm.h> @@ -70,7 +71,7 @@ u64 hv_get_non_nested_msr(unsigned int reg) if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present) hv_ivm_msr_read(reg, &value); else - rdmsrl(reg, value); + rdmsrq(reg, value); return value; } EXPORT_SYMBOL_GPL(hv_get_non_nested_msr); @@ -82,9 +83,9 @@ void hv_set_non_nested_msr(unsigned int reg, u64 value) /* Write proxy bit via wrmsl instruction */ if (hv_is_sint_msr(reg)) - wrmsrl(reg, value | 1 << 20); + wrmsrq(reg, value | 1 << 20); } else { - wrmsrl(reg, value); + wrmsrq(reg, value); } } EXPORT_SYMBOL_GPL(hv_set_non_nested_msr); @@ -345,7 +346,7 @@ static unsigned long hv_get_tsc_khz(void) { unsigned long freq; - rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq); + rdmsrq(HV_X64_MSR_TSC_FREQUENCY, freq); return freq / 1000; } @@ -541,7 +542,7 @@ static void __init ms_hyperv_init_platform(void) */ u64 hv_lapic_frequency; - rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); + rdmsrq(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); lapic_timer_period = hv_lapic_frequency; pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n", @@ -574,7 +575,7 @@ static void __init ms_hyperv_init_platform(void) * setting of this MSR bit should happen before init_intel() * is called. */ - wrmsrl(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC); + wrmsrq(HV_X64_MSR_TSC_INVARIANT_CONTROL, HV_EXPOSE_INVARIANT_TSC); setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); } diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index cf29681d01e0..d987b11c168c 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -22,6 +22,7 @@ #include <linux/cpuhotplug.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include <asm/resctrl.h> #include "internal.h" @@ -145,10 +146,10 @@ static inline void cache_alloc_hsw_probe(void) struct rdt_resource *r = &hw_res->r_resctrl; u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0; - if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) + if (wrmsrq_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) return; - rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0); + rdmsrq(MSR_IA32_L3_CBM_BASE, l3_cbm_0); /* If all the bits were set in MSR, return success */ if (l3_cbm_0 != max_cbm) @@ -309,7 +310,7 @@ static void mba_wrmsr_amd(struct msr_param *m) unsigned int i; for (i = m->low; i < m->high; i++) - wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); + wrmsrq(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } /* @@ -334,7 +335,7 @@ static void mba_wrmsr_intel(struct msr_param *m) /* Write the delay values for mba. */ for (i = m->low; i < m->high; i++) - wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res)); + wrmsrq(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res)); } static void cat_wrmsr(struct msr_param *m) @@ -344,7 +345,7 @@ static void cat_wrmsr(struct msr_param *m) unsigned int i; for (i = m->low; i < m->high; i++) - wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); + wrmsrq(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } u32 resctrl_arch_get_num_closid(struct rdt_resource *r) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index a93ed7d2a160..591b0b44d260 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include <asm/resctrl.h> #include "internal.h" @@ -238,7 +239,7 @@ static int __rmid_read_phys(u32 prmid, enum resctrl_event_id eventid, u64 *val) * are error bits. */ wrmsr(MSR_IA32_QM_EVTSEL, eventid, prmid); - rdmsrl(MSR_IA32_QM_CTR, msr_val); + rdmsrq(MSR_IA32_QM_CTR, msr_val); if (msr_val & RMID_VAL_ERROR) return -EIO; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 92ea1472bde9..1190c48a16b2 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -25,6 +25,7 @@ #include <asm/cpu_device_id.h> #include <asm/resctrl.h> #include <asm/perf_event.h> +#include <asm/msr.h> #include "../../events/perf_event.h" /* For X86_CONFIG() */ #include "internal.h" @@ -481,8 +482,8 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) * the buffer and evict pseudo-locked memory read earlier from the * cache. */ - saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); - __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); + saved_msr = native_rdmsrq(MSR_MISC_FEATURE_CONTROL); + native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); closid_p = this_cpu_read(pqr_state.cur_closid); rmid_p = this_cpu_read(pqr_state.cur_rmid); mem_r = plr->kmem; @@ -494,7 +495,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) * pseudo-locked followed by reading of kernel memory to load it * into the cache. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); + native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); /* * Cache was flushed earlier. Now access kernel memory to read it @@ -531,10 +532,10 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) * Critical section end: restore closid with capacity bitmask that * does not overlap with pseudo-locked region. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p); + native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p); /* Re-enable the hardware prefetcher(s) */ - wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr); + wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr); local_irq_enable(); plr->thread_done = 1; @@ -904,7 +905,7 @@ int resctrl_arch_measure_cycles_lat_fn(void *_plr) * Disable hardware prefetchers. */ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); - wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); + wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); mem_r = READ_ONCE(plr->kmem); /* * Dummy execute of the time measurement to load the needed @@ -1000,7 +1001,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, * Disable hardware prefetchers. */ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); - wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); + wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); /* Initialize rest of local variables */ /* @@ -1018,8 +1019,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, * used in L1 cache, second to capture accurate value that does not * include cache misses incurred because of instruction loads. */ - rdpmcl(hit_pmcnum, hits_before); - rdpmcl(miss_pmcnum, miss_before); + hits_before = rdpmc(hit_pmcnum); + miss_before = rdpmc(miss_pmcnum); /* * From SDM: Performing back-to-back fast reads are not guaranteed * to be monotonic. @@ -1027,8 +1028,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, * before proceeding. */ rmb(); - rdpmcl(hit_pmcnum, hits_before); - rdpmcl(miss_pmcnum, miss_before); + hits_before = rdpmc(hit_pmcnum); + miss_before = rdpmc(miss_pmcnum); /* * Use LFENCE to ensure all previous instructions are retired * before proceeding. @@ -1050,8 +1051,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, * before proceeding. */ rmb(); - rdpmcl(hit_pmcnum, hits_after); - rdpmcl(miss_pmcnum, miss_after); + hits_after = rdpmc(hit_pmcnum); + miss_after = rdpmc(miss_pmcnum); /* * Use LFENCE to ensure all previous instructions are retired * before proceeding. diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index cc4a54145c83..c85ace29ea3a 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -28,6 +28,7 @@ #include <uapi/linux/magic.h> +#include <asm/msr.h> #include <asm/resctrl.h> #include "internal.h" @@ -1635,7 +1636,7 @@ void resctrl_arch_mon_event_config_read(void *_config_info) pr_warn_once("Invalid event id %d\n", config_info->evtid); return; } - rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); + rdmsrq(MSR_IA32_EVT_CFG_BASE + index, msrval); /* Report only the valid event configuration bits */ config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; @@ -1707,7 +1708,7 @@ void resctrl_arch_mon_event_config_write(void *_config_info) pr_warn_once("Invalid event id %d\n", config_info->evtid); return; } - wrmsr(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config, 0); + wrmsrq(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config); } static void mbm_config_write_domain(struct rdt_resource *r, @@ -2326,14 +2327,14 @@ static void l3_qos_cfg_update(void *arg) { bool *enable = arg; - wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); + wrmsrq(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } static void l2_qos_cfg_update(void *arg) { bool *enable = arg; - wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); + wrmsrq(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } static inline bool is_mba_linear(void) diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 16f3ca30626a..dbf6d71bdf18 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -27,6 +27,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 }, + { X86_FEATURE_APX, CPUID_EDX, 21, 0x00000007, 1 }, { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 }, { X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 }, { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 }, @@ -53,7 +54,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 }, - { X86_FEATURE_AMD_HETEROGENEOUS_CORES, CPUID_EAX, 30, 0x80000026, 0 }, + { X86_FEATURE_AMD_HTR_CORES, CPUID_EAX, 30, 0x80000026, 0 }, { 0, 0, 0, 0, 0 } }; diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 8ce352fc72ac..6722b2fc82cf 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/vmalloc.h> +#include <asm/msr.h> #include <asm/sgx.h> #include "driver.h" #include "encl.h" @@ -871,7 +872,7 @@ void sgx_update_lepubkeyhash(u64 *lepubkeyhash) WARN_ON_ONCE(preemptible()); for (i = 0; i < 4; i++) - wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); + wrmsrq(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); } const struct file_operations sgx_provision_fops = { diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 01456236a6dd..e35ccdc84910 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -30,6 +30,7 @@ #include <asm/hypervisor.h> #include <asm/io_apic.h> #include <asm/mpspec.h> +#include <asm/msr.h> #include <asm/smp.h> #include "cpu.h" @@ -154,7 +155,7 @@ static __init bool check_for_real_bsp(u32 apic_id) * kernel must rely on the firmware enumeration order. */ if (has_apic_base) { - rdmsrl(MSR_IA32_APICBASE, msr); + rdmsrq(MSR_IA32_APICBASE, msr); is_bsp = !!(msr & MSR_IA32_APICBASE_BSP); } diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c index 03b3c9c3a45e..843b1655ab45 100644 --- a/arch/x86/kernel/cpu/topology_amd.c +++ b/arch/x86/kernel/cpu/topology_amd.c @@ -3,6 +3,7 @@ #include <asm/apic.h> #include <asm/memtype.h> +#include <asm/msr.h> #include <asm/processor.h> #include "cpu.h" @@ -133,7 +134,7 @@ static void parse_fam10h_node_id(struct topo_scan *tscan) if (!boot_cpu_has(X86_FEATURE_NODEID_MSR)) return; - rdmsrl(MSR_FAM10H_NODE_ID, nid.msr); + rdmsrq(MSR_FAM10H_NODE_ID, nid.msr); store_node(tscan, nid.nodes_per_pkg + 1, nid.node_id); tscan->c->topo.llc_id = nid.node_id; } @@ -160,7 +161,7 @@ static void topoext_fixup(struct topo_scan *tscan) if (msr_set_bit(0xc0011005, 54) <= 0) return; - rdmsrl(0xc0011005, msrval); + rdmsrq(0xc0011005, msrval); if (msrval & BIT_64(54)) { set_cpu_cap(c, X86_FEATURE_TOPOEXT); pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); @@ -182,7 +183,7 @@ static void parse_topology_amd(struct topo_scan *tscan) if (cpu_feature_enabled(X86_FEATURE_TOPOEXT)) has_topoext = cpu_parse_topology_ext(tscan); - if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) + if (cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES)) tscan->c->topo.cpu_type = cpuid_ebx(0x80000026); if (!has_topoext && !parse_8000_0008(tscan)) diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c index b31ee4f1657a..49782724a943 100644 --- a/arch/x86/kernel/cpu/tsx.c +++ b/arch/x86/kernel/cpu/tsx.c @@ -12,6 +12,7 @@ #include <asm/cmdline.h> #include <asm/cpu.h> +#include <asm/msr.h> #include "cpu.h" @@ -24,7 +25,7 @@ static void tsx_disable(void) { u64 tsx; - rdmsrl(MSR_IA32_TSX_CTRL, tsx); + rdmsrq(MSR_IA32_TSX_CTRL, tsx); /* Force all transactions to immediately abort */ tsx |= TSX_CTRL_RTM_DISABLE; @@ -37,14 +38,14 @@ static void tsx_disable(void) */ tsx |= TSX_CTRL_CPUID_CLEAR; - wrmsrl(MSR_IA32_TSX_CTRL, tsx); + wrmsrq(MSR_IA32_TSX_CTRL, tsx); } static void tsx_enable(void) { u64 tsx; - rdmsrl(MSR_IA32_TSX_CTRL, tsx); + rdmsrq(MSR_IA32_TSX_CTRL, tsx); /* Enable the RTM feature in the cpu */ tsx &= ~TSX_CTRL_RTM_DISABLE; @@ -56,7 +57,7 @@ static void tsx_enable(void) */ tsx &= ~TSX_CTRL_CPUID_CLEAR; - wrmsrl(MSR_IA32_TSX_CTRL, tsx); + wrmsrq(MSR_IA32_TSX_CTRL, tsx); } static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) @@ -115,13 +116,13 @@ static void tsx_clear_cpuid(void) */ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) && boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { - rdmsrl(MSR_TSX_FORCE_ABORT, msr); + rdmsrq(MSR_TSX_FORCE_ABORT, msr); msr |= MSR_TFA_TSX_CPUID_CLEAR; - wrmsrl(MSR_TSX_FORCE_ABORT, msr); + wrmsrq(MSR_TSX_FORCE_ABORT, msr); } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) { - rdmsrl(MSR_IA32_TSX_CTRL, msr); + rdmsrq(MSR_IA32_TSX_CTRL, msr); msr |= TSX_CTRL_CPUID_CLEAR; - wrmsrl(MSR_IA32_TSX_CTRL, msr); + wrmsrq(MSR_IA32_TSX_CTRL, msr); } } @@ -146,11 +147,11 @@ static void tsx_dev_mode_disable(void) !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL)) return; - rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); + rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); if (mcu_opt_ctrl & RTM_ALLOW) { mcu_opt_ctrl &= ~RTM_ALLOW; - wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); + wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT); } } diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c index 2293efd6ffa6..933fcd7ff250 100644 --- a/arch/x86/kernel/cpu/umwait.c +++ b/arch/x86/kernel/cpu/umwait.c @@ -33,7 +33,7 @@ static DEFINE_MUTEX(umwait_lock); static void umwait_update_control_msr(void * unused) { lockdep_assert_irqs_disabled(); - wrmsr(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached), 0); + wrmsrq(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached)); } /* @@ -71,7 +71,7 @@ static int umwait_cpu_offline(unsigned int cpu) * the original control MSR value in umwait_init(). So there * is no race condition here. */ - wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0); + wrmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); return 0; } @@ -214,7 +214,7 @@ static int __init umwait_init(void) * changed. This is the only place where orig_umwait_control_cached * is modified. */ - rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); + rdmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online", umwait_cpu_online, umwait_cpu_offline); diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 90eba7eb5335..89b1c8a70fe8 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -4,6 +4,7 @@ #include <asm/cpu.h> #include <asm/cpufeature.h> +#include <asm/msr.h> #include "cpu.h" diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index c6fefd4585f8..71ee20102a8a 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -23,8 +23,6 @@ #include <asm/stacktrace.h> #include <asm/unwind.h> -int panic_on_unrecovered_nmi; -int panic_on_io_nmi; static int die_counter; static struct pt_regs exec_summary_regs; diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 3aad78bfcb26..cba75306e5b6 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/console.h> #include <linux/kernel.h> +#include <linux/kexec.h> #include <linux/init.h> #include <linux/string.h> #include <linux/screen_info.h> @@ -144,6 +145,11 @@ static __init void early_serial_hw_init(unsigned divisor) static_call(serial_out)(early_serial_base, DLL, divisor & 0xff); static_call(serial_out)(early_serial_base, DLH, (divisor >> 8) & 0xff); static_call(serial_out)(early_serial_base, LCR, c & ~DLAB); + +#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_X86_64) + if (static_call_query(serial_in) == io_serial_in) + kexec_debug_8250_port = early_serial_base; +#endif } #define DEFAULT_BAUD 9600 @@ -327,6 +333,9 @@ static __init void early_pci_serial_init(char *s) /* WARNING! assuming the address is always in the first 4G */ early_serial_base = (unsigned long)early_ioremap(bar0 & PCI_BASE_ADDRESS_MEM_MASK, 0x10); +#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_X86_64) + kexec_debug_8250_mmio32 = bar0 & PCI_BASE_ADDRESS_MEM_MASK; +#endif write_pci_config(bus, slot, func, PCI_COMMAND, cmdreg|PCI_COMMAND_MEMORY); } diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h index f6d856bd50bc..10d0a720659c 100644 --- a/arch/x86/kernel/fpu/context.h +++ b/arch/x86/kernel/fpu/context.h @@ -53,7 +53,7 @@ static inline void fpregs_activate(struct fpu *fpu) /* Internal helper for switch_fpu_return() and signal frame setup */ static inline void fpregs_restore_userregs(void) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); int cpu = smp_processor_id(); if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER))) @@ -67,7 +67,7 @@ static inline void fpregs_restore_userregs(void) * If PKRU is enabled, then the PKRU value is already * correct because it was either set in switch_to() or in * flush_thread(). So it is excluded because it might be - * not up to date in current->thread.fpu.xsave state. + * not up to date in current->thread.fpu->xsave state. * * XFD state is handled in restore_fpregs_from_fpstate(). */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 91d6341f281f..ea138583dd92 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -11,6 +11,7 @@ #include <asm/fpu/sched.h> #include <asm/fpu/signal.h> #include <asm/fpu/types.h> +#include <asm/msr.h> #include <asm/traps.h> #include <asm/irq_regs.h> @@ -43,14 +44,27 @@ struct fpu_state_config fpu_user_cfg __ro_after_init; */ struct fpstate init_fpstate __ro_after_init; -/* Track in-kernel FPU usage */ -static DEFINE_PER_CPU(bool, in_kernel_fpu); +/* + * Track FPU initialization and kernel-mode usage. 'true' means the FPU is + * initialized and is not currently being used by the kernel: + */ +DEFINE_PER_CPU(bool, kernel_fpu_allowed); /* * Track which context is using the FPU on the CPU: */ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); +#ifdef CONFIG_X86_DEBUG_FPU +struct fpu *x86_task_fpu(struct task_struct *task) +{ + if (WARN_ON_ONCE(task->flags & PF_KTHREAD)) + return NULL; + + return (void *)task + sizeof(*task); +} +#endif + /* * Can we use the FPU in kernel mode with the * whole "kernel_fpu_begin/end()" sequence? @@ -61,15 +75,18 @@ bool irq_fpu_usable(void) return false; /* - * In kernel FPU usage already active? This detects any explicitly - * nested usage in task or softirq context, which is unsupported. It - * also detects attempted usage in a hardirq that has interrupted a - * kernel-mode FPU section. + * Return false in the following cases: + * + * - FPU is not yet initialized. This can happen only when the call is + * coming from CPU onlining, for example for microcode checksumming. + * - The kernel is already using the FPU, either because of explicit + * nesting (which should never be done), or because of implicit + * nesting when a hardirq interrupted a kernel-mode FPU section. + * + * The single boolean check below handles both cases: */ - if (this_cpu_read(in_kernel_fpu)) { - WARN_ON_FPU(!in_hardirq()); + if (!this_cpu_read(kernel_fpu_allowed)) return false; - } /* * When not in NMI or hard interrupt context, FPU can be used in: @@ -202,7 +219,7 @@ void fpu_reset_from_exception_fixup(void) #if IS_ENABLED(CONFIG_KVM) static void __fpstate_reset(struct fpstate *fpstate, u64 xfd); -static void fpu_init_guest_permissions(struct fpu_guest *gfpu) +static void fpu_lock_guest_permissions(void) { struct fpu_state_perm *fpuperm; u64 perm; @@ -211,15 +228,13 @@ static void fpu_init_guest_permissions(struct fpu_guest *gfpu) return; spin_lock_irq(¤t->sighand->siglock); - fpuperm = ¤t->group_leader->thread.fpu.guest_perm; + fpuperm = &x86_task_fpu(current->group_leader)->guest_perm; perm = fpuperm->__state_perm; /* First fpstate allocation locks down permissions. */ WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED); spin_unlock_irq(¤t->sighand->siglock); - - gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED; } bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) @@ -240,7 +255,6 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) gfpu->fpstate = fpstate; gfpu->xfeatures = fpu_kernel_cfg.default_features; - gfpu->perm = fpu_kernel_cfg.default_features; /* * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state @@ -255,7 +269,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size)) gfpu->uabi_size = fpu_user_cfg.default_size; - fpu_init_guest_permissions(gfpu); + fpu_lock_guest_permissions(); return true; } @@ -263,16 +277,16 @@ EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate); void fpu_free_guest_fpstate(struct fpu_guest *gfpu) { - struct fpstate *fps = gfpu->fpstate; + struct fpstate *fpstate = gfpu->fpstate; - if (!fps) + if (!fpstate) return; - if (WARN_ON_ONCE(!fps->is_valloc || !fps->is_guest || fps->in_use)) + if (WARN_ON_ONCE(!fpstate->is_valloc || !fpstate->is_guest || fpstate->in_use)) return; gfpu->fpstate = NULL; - vfree(fps); + vfree(fpstate); } EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate); @@ -323,12 +337,12 @@ EXPORT_SYMBOL_GPL(fpu_update_guest_xfd); */ void fpu_sync_guest_vmexit_xfd_state(void) { - struct fpstate *fps = current->thread.fpu.fpstate; + struct fpstate *fpstate = x86_task_fpu(current)->fpstate; lockdep_assert_irqs_disabled(); if (fpu_state_size_dynamic()) { - rdmsrl(MSR_IA32_XFD, fps->xfd); - __this_cpu_write(xfd_state, fps->xfd); + rdmsrq(MSR_IA32_XFD, fpstate->xfd); + __this_cpu_write(xfd_state, fpstate->xfd); } } EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state); @@ -337,7 +351,7 @@ EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state); int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) { struct fpstate *guest_fps = guest_fpu->fpstate; - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); struct fpstate *cur_fps = fpu->fpstate; fpregs_lock(); @@ -431,14 +445,15 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask) fpregs_lock(); WARN_ON_FPU(!irq_fpu_usable()); - WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); - this_cpu_write(in_kernel_fpu, true); + /* Toggle kernel_fpu_allowed to false: */ + WARN_ON_FPU(!this_cpu_read(kernel_fpu_allowed)); + this_cpu_write(kernel_fpu_allowed, false); if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && !test_thread_flag(TIF_NEED_FPU_LOAD)) { set_thread_flag(TIF_NEED_FPU_LOAD); - save_fpregs_to_fpstate(¤t->thread.fpu); + save_fpregs_to_fpstate(x86_task_fpu(current)); } __cpu_invalidate_fpregs_state(); @@ -453,9 +468,10 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); void kernel_fpu_end(void) { - WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); + /* Toggle kernel_fpu_allowed back to true: */ + WARN_ON_FPU(this_cpu_read(kernel_fpu_allowed)); + this_cpu_write(kernel_fpu_allowed, true); - this_cpu_write(in_kernel_fpu, false); if (!irqs_disabled()) fpregs_unlock(); } @@ -467,7 +483,7 @@ EXPORT_SYMBOL_GPL(kernel_fpu_end); */ void fpu_sync_fpstate(struct fpu *fpu) { - WARN_ON_FPU(fpu != ¤t->thread.fpu); + WARN_ON_FPU(fpu != x86_task_fpu(current)); fpregs_lock(); trace_x86_fpu_before_save(fpu); @@ -552,7 +568,7 @@ void fpstate_reset(struct fpu *fpu) static inline void fpu_inherit_perms(struct fpu *dst_fpu) { if (fpu_state_size_dynamic()) { - struct fpu *src_fpu = ¤t->group_leader->thread.fpu; + struct fpu *src_fpu = x86_task_fpu(current->group_leader); spin_lock_irq(¤t->sighand->siglock); /* Fork also inherits the permissions of the parent */ @@ -572,7 +588,7 @@ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp) if (!ssp) return 0; - xstate = get_xsave_addr(&dst->thread.fpu.fpstate->regs.xsave, + xstate = get_xsave_addr(&x86_task_fpu(dst)->fpstate->regs.xsave, XFEATURE_CET_USER); /* @@ -593,8 +609,16 @@ static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp) int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal, unsigned long ssp) { - struct fpu *src_fpu = ¤t->thread.fpu; - struct fpu *dst_fpu = &dst->thread.fpu; + /* + * We allocate the new FPU structure right after the end of the task struct. + * task allocation size already took this into account. + * + * This is safe because task_struct size is a multiple of cacheline size, + * thus x86_task_fpu() will always be cacheline aligned as well. + */ + struct fpu *dst_fpu = (void *)dst + sizeof(*dst); + + BUILD_BUG_ON(sizeof(*dst) % SMP_CACHE_BYTES != 0); /* The new task's FPU state cannot be valid in the hardware. */ dst_fpu->last_cpu = -1; @@ -657,19 +681,22 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal, if (update_fpu_shstk(dst, ssp)) return 1; - trace_x86_fpu_copy_src(src_fpu); trace_x86_fpu_copy_dst(dst_fpu); return 0; } /* - * Whitelist the FPU register state embedded into task_struct for hardened - * usercopy. + * While struct fpu is no longer part of struct thread_struct, it is still + * allocated after struct task_struct in the "task_struct" kmem cache. But + * since FPU is expected to be part of struct thread_struct, we have to + * adjust for it here. */ void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { - *offset = offsetof(struct thread_struct, fpu.__fpstate.regs); + /* The allocation follows struct task_struct. */ + *offset = sizeof(struct task_struct) - offsetof(struct task_struct, thread); + *offset += offsetof(struct fpu, __fpstate.regs); *size = fpu_kernel_cfg.default_size; } @@ -682,11 +709,18 @@ void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size) * a state-restore is coming: either an explicit one, * or a reschedule. */ -void fpu__drop(struct fpu *fpu) +void fpu__drop(struct task_struct *tsk) { + struct fpu *fpu; + + if (test_tsk_thread_flag(tsk, TIF_NEED_FPU_LOAD)) + return; + + fpu = x86_task_fpu(tsk); + preempt_disable(); - if (fpu == ¤t->thread.fpu) { + if (fpu == x86_task_fpu(current)) { /* Ignore delayed exceptions from user space */ asm volatile("1: fwait\n" "2:\n" @@ -718,9 +752,9 @@ static inline void restore_fpregs_from_init_fpstate(u64 features_mask) /* * Reset current->fpu memory state to the init values. */ -static void fpu_reset_fpregs(void) +static void fpu_reset_fpstate_regs(void) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); fpregs_lock(); __fpu_invalidate_fpregs_state(fpu); @@ -749,11 +783,11 @@ static void fpu_reset_fpregs(void) */ void fpu__clear_user_states(struct fpu *fpu) { - WARN_ON_FPU(fpu != ¤t->thread.fpu); + WARN_ON_FPU(fpu != x86_task_fpu(current)); fpregs_lock(); if (!cpu_feature_enabled(X86_FEATURE_FPU)) { - fpu_reset_fpregs(); + fpu_reset_fpstate_regs(); fpregs_unlock(); return; } @@ -782,8 +816,8 @@ void fpu__clear_user_states(struct fpu *fpu) void fpu_flush_thread(void) { - fpstate_reset(¤t->thread.fpu); - fpu_reset_fpregs(); + fpstate_reset(x86_task_fpu(current)); + fpu_reset_fpstate_regs(); } /* * Load FPU context before returning to userspace. @@ -823,7 +857,7 @@ void fpregs_lock_and_load(void) */ void fpregs_assert_state_consistent(void) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); if (test_thread_flag(TIF_NEED_FPU_LOAD)) return; @@ -835,7 +869,7 @@ EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent); void fpregs_mark_activate(void) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); fpregs_activate(fpu); fpu->last_cpu = smp_processor_id(); diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 998a08f17e33..99db41bf9fa6 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -38,7 +38,7 @@ static void fpu__init_cpu_generic(void) /* Flush out any pending x87 state: */ #ifdef CONFIG_MATH_EMULATION if (!boot_cpu_has(X86_FEATURE_FPU)) - fpstate_init_soft(¤t->thread.fpu.fpstate->regs.soft); + ; else #endif asm volatile ("fninit"); @@ -51,6 +51,9 @@ void fpu__init_cpu(void) { fpu__init_cpu_generic(); fpu__init_cpu_xstate(); + + /* Start allowing kernel-mode FPU: */ + this_cpu_write(kernel_fpu_allowed, true); } static bool __init fpu__probe_without_cpuid(void) @@ -73,6 +76,8 @@ static bool __init fpu__probe_without_cpuid(void) static void __init fpu__init_system_early_generic(void) { + set_thread_flag(TIF_NEED_FPU_LOAD); + if (!boot_cpu_has(X86_FEATURE_CPUID) && !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { if (fpu__probe_without_cpuid()) @@ -94,7 +99,6 @@ static void __init fpu__init_system_early_generic(void) * Boot time FPU feature detection code: */ unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu; -EXPORT_SYMBOL_GPL(mxcsr_feature_mask); static void __init fpu__init_system_mxcsr(void) { @@ -150,11 +154,13 @@ static void __init fpu__init_task_struct_size(void) { int task_size = sizeof(struct task_struct); + task_size += sizeof(struct fpu); + /* * Subtract off the static size of the register state. * It potentially has a bunch of padding. */ - task_size -= sizeof(current->thread.fpu.__fpstate.regs); + task_size -= sizeof(union fpregs_state); /* * Add back the dynamically-calculated register state @@ -164,14 +170,9 @@ static void __init fpu__init_task_struct_size(void) /* * We dynamically size 'struct fpu', so we require that - * it be at the end of 'thread_struct' and that - * 'thread_struct' be at the end of 'task_struct'. If - * you hit a compile error here, check the structure to - * see if something got added to the end. + * 'state' be at the end of 'it: */ CHECK_MEMBER_AT_END_OF(struct fpu, __fpstate); - CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu); - CHECK_MEMBER_AT_END_OF(struct task_struct, thread); arch_task_struct_size = task_size; } @@ -204,7 +205,6 @@ static void __init fpu__init_system_xstate_size_legacy(void) fpu_kernel_cfg.default_size = size; fpu_user_cfg.max_size = size; fpu_user_cfg.default_size = size; - fpstate_reset(¤t->thread.fpu); } /* @@ -213,7 +213,6 @@ static void __init fpu__init_system_xstate_size_legacy(void) */ void __init fpu__init_system(void) { - fpstate_reset(¤t->thread.fpu); fpu__init_system_early_generic(); /* diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index 887b0b8e21e3..0986c2200adc 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -45,7 +45,7 @@ int regset_xregset_fpregs_active(struct task_struct *target, const struct user_r */ static void sync_fpstate(struct fpu *fpu) { - if (fpu == ¤t->thread.fpu) + if (fpu == x86_task_fpu(current)) fpu_sync_fpstate(fpu); } @@ -63,7 +63,7 @@ static void fpu_force_restore(struct fpu *fpu) * Only stopped child tasks can be used to modify the FPU * state in the fpstate buffer: */ - WARN_ON_FPU(fpu == ¤t->thread.fpu); + WARN_ON_FPU(fpu == x86_task_fpu(current)); __fpu_invalidate_fpregs_state(fpu); } @@ -71,7 +71,7 @@ static void fpu_force_restore(struct fpu *fpu) int xfpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { - struct fpu *fpu = &target->thread.fpu; + struct fpu *fpu = x86_task_fpu(target); if (!cpu_feature_enabled(X86_FEATURE_FXSR)) return -ENODEV; @@ -91,7 +91,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct fpu *fpu = &target->thread.fpu; + struct fpu *fpu = x86_task_fpu(target); struct fxregs_state newstate; int ret; @@ -133,7 +133,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) return -ENODEV; - sync_fpstate(&target->thread.fpu); + sync_fpstate(x86_task_fpu(target)); copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE); return 0; @@ -143,7 +143,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct fpu *fpu = &target->thread.fpu; + struct fpu *fpu = x86_task_fpu(target); struct xregs_state *tmpbuf = NULL; int ret; @@ -187,7 +187,7 @@ int ssp_active(struct task_struct *target, const struct user_regset *regset) int ssp_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { - struct fpu *fpu = &target->thread.fpu; + struct fpu *fpu = x86_task_fpu(target); struct cet_user_state *cetregs; if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || @@ -214,7 +214,7 @@ int ssp_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct fpu *fpu = &target->thread.fpu; + struct fpu *fpu = x86_task_fpu(target); struct xregs_state *xsave = &fpu->fpstate->regs.xsave; struct cet_user_state *cetregs; unsigned long user_ssp; @@ -368,7 +368,7 @@ static void __convert_from_fxsr(struct user_i387_ia32_struct *env, void convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { - __convert_from_fxsr(env, tsk, &tsk->thread.fpu.fpstate->regs.fxsave); + __convert_from_fxsr(env, tsk, &x86_task_fpu(tsk)->fpstate->regs.fxsave); } void convert_to_fxsr(struct fxregs_state *fxsave, @@ -401,7 +401,7 @@ void convert_to_fxsr(struct fxregs_state *fxsave, int fpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { - struct fpu *fpu = &target->thread.fpu; + struct fpu *fpu = x86_task_fpu(target); struct user_i387_ia32_struct env; struct fxregs_state fxsave, *fx; @@ -433,7 +433,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct fpu *fpu = &target->thread.fpu; + struct fpu *fpu = x86_task_fpu(target); struct user_i387_ia32_struct env; int ret; diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 6c69cb28b298..c3ec2512f2bb 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -43,13 +43,13 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf, * fpstate layout with out copying the extended state information * in the memory layout. */ - if (__get_user(magic2, (__u32 __user *)(fpstate + current->thread.fpu.fpstate->user_size))) + if (__get_user(magic2, (__u32 __user *)(fpstate + x86_task_fpu(current)->fpstate->user_size))) return false; if (likely(magic2 == FP_XSTATE_MAGIC2)) return true; setfx: - trace_x86_fpu_xstate_check_failed(¤t->thread.fpu); + trace_x86_fpu_xstate_check_failed(x86_task_fpu(current)); /* Set the parameters for fx only state */ fx_sw->magic1 = 0; @@ -64,13 +64,13 @@ setfx: static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf) { if (use_fxsr()) { - struct xregs_state *xsave = &tsk->thread.fpu.fpstate->regs.xsave; + struct xregs_state *xsave = &x86_task_fpu(tsk)->fpstate->regs.xsave; struct user_i387_ia32_struct env; struct _fpstate_32 __user *fp = buf; fpregs_lock(); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) - fxsave(&tsk->thread.fpu.fpstate->regs.fxsave); + fxsave(&x86_task_fpu(tsk)->fpstate->regs.fxsave); fpregs_unlock(); convert_from_fxsr(&env, tsk); @@ -114,7 +114,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, { struct xregs_state __user *x = buf; struct _fpx_sw_bytes sw_bytes = {}; - u32 xfeatures; int err; /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ @@ -128,12 +127,6 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, (__u32 __user *)(buf + fpstate->user_size)); /* - * Read the xfeatures which we copied (directly from the cpu or - * from the state in task struct) to the user buffers. - */ - err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures); - - /* * For legacy compatible, we always set FP/SSE bits in the bit * vector while saving the state to the user context. This will * enable us capturing any changes(during sigreturn) to @@ -144,9 +137,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, * header as well as change any contents in the memory layout. * xrestore as part of sigreturn will capture all the changes. */ - xfeatures |= XFEATURE_MASK_FPSSE; - - err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures); + err |= set_xfeature_in_sigframe(x, XFEATURE_MASK_FPSSE); return !err; } @@ -184,7 +175,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pk bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru) { struct task_struct *tsk = current; - struct fpstate *fpstate = tsk->thread.fpu.fpstate; + struct fpstate *fpstate = x86_task_fpu(tsk)->fpstate; bool ia32_fxstate = (buf != buf_fx); int ret; @@ -272,7 +263,7 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures, */ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); int ret; /* Restore enabled features only. */ @@ -332,7 +323,7 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, bool ia32_fxstate) { struct task_struct *tsk = current; - struct fpu *fpu = &tsk->thread.fpu; + struct fpu *fpu = x86_task_fpu(tsk); struct user_i387_ia32_struct env; bool success, fx_only = false; union fpregs_state *fpregs; @@ -452,7 +443,7 @@ static inline unsigned int xstate_sigframe_size(struct fpstate *fpstate) */ bool fpu__restore_sig(void __user *buf, int ia32_frame) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); void __user *buf_fx = buf; bool ia32_fxstate = false; bool success = false; @@ -499,7 +490,7 @@ unsigned long fpu__alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, unsigned long *size) { - unsigned long frame_size = xstate_sigframe_size(current->thread.fpu.fpstate); + unsigned long frame_size = xstate_sigframe_size(x86_task_fpu(current)->fpstate); *buf_fx = sp = round_down(sp - frame_size, 64); if (ia32_frame && use_fxsr()) { diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 6a41d1610d8b..9aa9ac8399ae 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -14,13 +14,15 @@ #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/coredump.h> +#include <linux/sort.h> #include <asm/fpu/api.h> #include <asm/fpu/regset.h> #include <asm/fpu/signal.h> #include <asm/fpu/xcr.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> +#include <asm/msr.h> #include <asm/tlbflush.h> #include <asm/prctl.h> #include <asm/elf.h> @@ -62,6 +64,7 @@ static const char *xfeature_names[] = "unknown xstate feature", "AMX Tile config", "AMX Tile data", + "APX registers", "unknown xstate feature", }; @@ -80,6 +83,7 @@ static unsigned short xsave_cpuid_features[] __initdata = { [XFEATURE_CET_USER] = X86_FEATURE_SHSTK, [XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE, [XFEATURE_XTILE_DATA] = X86_FEATURE_AMX_TILE, + [XFEATURE_APX] = X86_FEATURE_APX, }; static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init = @@ -88,6 +92,31 @@ static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init = { [ 0 ... XFEATURE_MAX - 1] = -1}; static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init; +/* + * Ordering of xstate components in uncompacted format: The xfeature + * number does not necessarily indicate its position in the XSAVE buffer. + * This array defines the traversal order of xstate features. + */ +static unsigned int xfeature_uncompact_order[XFEATURE_MAX] __ro_after_init = + { [ 0 ... XFEATURE_MAX - 1] = -1}; + +static inline unsigned int next_xfeature_order(unsigned int i, u64 mask) +{ + for (; xfeature_uncompact_order[i] != -1; i++) { + if (mask & BIT_ULL(xfeature_uncompact_order[i])) + break; + } + + return i; +} + +/* Iterate xstate features in uncompacted order: */ +#define for_each_extended_xfeature_in_order(i, mask) \ + for (i = 0; \ + i = next_xfeature_order(i, mask), \ + xfeature_uncompact_order[i] != -1; \ + i++) + #define XSTATE_FLAG_SUPERVISOR BIT(0) #define XSTATE_FLAG_ALIGNED64 BIT(1) @@ -199,7 +228,7 @@ void fpu__init_cpu_xstate(void) * MSR_IA32_XSS sets supervisor states managed by XSAVES. */ if (boot_cpu_has(X86_FEATURE_XSAVES)) { - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | xfeatures_mask_independent()); } } @@ -209,16 +238,20 @@ static bool xfeature_enabled(enum xfeature xfeature) return fpu_kernel_cfg.max_features & BIT_ULL(xfeature); } +static int compare_xstate_offsets(const void *xfeature1, const void *xfeature2) +{ + return xstate_offsets[*(unsigned int *)xfeature1] - + xstate_offsets[*(unsigned int *)xfeature2]; +} + /* * Record the offsets and sizes of various xstates contained - * in the XSAVE state memory layout. + * in the XSAVE state memory layout. Also, create an ordered + * list of xfeatures for handling out-of-order offsets. */ static void __init setup_xstate_cache(void) { - u32 eax, ebx, ecx, edx, i; - /* start at the beginning of the "extended state" */ - unsigned int last_good_offset = offsetof(struct xregs_state, - extended_state_area); + u32 eax, ebx, ecx, edx, xfeature, i = 0; /* * The FP xstates and SSE xstates are legacy states. They are always * in the fixed offsets in the xsave area in either compacted form @@ -232,31 +265,30 @@ static void __init setup_xstate_cache(void) xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state, xmm_space); - for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) { - cpuid_count(CPUID_LEAF_XSTATE, i, &eax, &ebx, &ecx, &edx); + for_each_extended_xfeature(xfeature, fpu_kernel_cfg.max_features) { + cpuid_count(CPUID_LEAF_XSTATE, xfeature, &eax, &ebx, &ecx, &edx); - xstate_sizes[i] = eax; - xstate_flags[i] = ecx; + xstate_sizes[xfeature] = eax; + xstate_flags[xfeature] = ecx; /* * If an xfeature is supervisor state, the offset in EBX is * invalid, leave it to -1. */ - if (xfeature_is_supervisor(i)) + if (xfeature_is_supervisor(xfeature)) continue; - xstate_offsets[i] = ebx; + xstate_offsets[xfeature] = ebx; - /* - * In our xstate size checks, we assume that the highest-numbered - * xstate feature has the highest offset in the buffer. Ensure - * it does. - */ - WARN_ONCE(last_good_offset > xstate_offsets[i], - "x86/fpu: misordered xstate at %d\n", last_good_offset); - - last_good_offset = xstate_offsets[i]; + /* Populate the list of xfeatures before sorting */ + xfeature_uncompact_order[i++] = xfeature; } + + /* + * Sort xfeatures by their offsets to support out-of-order + * offsets in the uncompacted format. + */ + sort(xfeature_uncompact_order, i, sizeof(unsigned int), compare_xstate_offsets, NULL); } /* @@ -340,7 +372,8 @@ static __init void os_xrstor_booting(struct xregs_state *xstate) XFEATURE_MASK_BNDCSR | \ XFEATURE_MASK_PASID | \ XFEATURE_MASK_CET_USER | \ - XFEATURE_MASK_XTILE) + XFEATURE_MASK_XTILE | \ + XFEATURE_MASK_APX) /* * setup the xstate image representing the init state @@ -540,6 +573,7 @@ static bool __init check_xstate_against_struct(int nr) case XFEATURE_PASID: return XCHECK_SZ(sz, nr, struct ia32_pasid_state); case XFEATURE_XTILE_CFG: return XCHECK_SZ(sz, nr, struct xtile_cfg); case XFEATURE_CET_USER: return XCHECK_SZ(sz, nr, struct cet_user_state); + case XFEATURE_APX: return XCHECK_SZ(sz, nr, struct apx_state); case XFEATURE_XTILE_DATA: check_xtile_data_against_struct(sz); return true; default: XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr); @@ -552,13 +586,20 @@ static bool __init check_xstate_against_struct(int nr) static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted) { unsigned int topmost = fls64(xfeatures) - 1; - unsigned int offset = xstate_offsets[topmost]; + unsigned int offset, i; if (topmost <= XFEATURE_SSE) return sizeof(struct xregs_state); - if (compacted) + if (compacted) { offset = xfeature_get_offset(xfeatures, topmost); + } else { + /* Walk through the xfeature order to pick the last */ + for_each_extended_xfeature_in_order(i, xfeatures) + topmost = xfeature_uncompact_order[i]; + offset = xstate_offsets[topmost]; + } + return offset + xstate_sizes[topmost]; } @@ -639,7 +680,7 @@ static unsigned int __init get_xsave_compacted_size(void) return get_compacted_size(); /* Disable independent features. */ - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor()); /* * Ask the hardware what size is required of the buffer. @@ -648,7 +689,7 @@ static unsigned int __init get_xsave_compacted_size(void) size = get_compacted_size(); /* Re-enable independent features so XSAVES will work on them again. */ - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); return size; } @@ -711,6 +752,8 @@ static int __init init_xstate_size(void) */ static void __init fpu__init_disable_system_xstate(unsigned int legacy_size) { + pr_info("x86/fpu: XSAVE disabled\n"); + fpu_kernel_cfg.max_features = 0; cr4_clear_bits(X86_CR4_OSXSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVE); @@ -727,7 +770,7 @@ static void __init fpu__init_disable_system_xstate(unsigned int legacy_size) */ init_fpstate.xfd = 0; - fpstate_reset(¤t->thread.fpu); + fpstate_reset(x86_task_fpu(current)); } /* @@ -775,6 +818,17 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) goto out_disable; } + if (fpu_kernel_cfg.max_features & XFEATURE_MASK_APX && + fpu_kernel_cfg.max_features & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) { + /* + * This is a problematic CPU configuration where two + * conflicting state components are both enumerated. + */ + pr_err("x86/fpu: Both APX/MPX present in the CPU's xstate features: 0x%llx.\n", + fpu_kernel_cfg.max_features); + goto out_disable; + } + fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features & XFEATURE_MASK_INDEPENDENT; @@ -834,9 +888,6 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) if (err) goto out_disable; - /* Reset the state for the current task */ - fpstate_reset(¤t->thread.fpu); - /* * Update info used for ptrace frames; use standard-format size and no * supervisor xstates: @@ -852,7 +903,7 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) init_fpstate.xfeatures = fpu_kernel_cfg.default_features; if (init_fpstate.size > sizeof(init_fpstate.regs)) { - pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n", + pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d)\n", sizeof(init_fpstate.regs), init_fpstate.size); goto out_disable; } @@ -864,7 +915,7 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) * xfeatures mask. */ if (xfeatures != fpu_kernel_cfg.max_features) { - pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n", + pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init\n", xfeatures, fpu_kernel_cfg.max_features); goto out_disable; } @@ -904,12 +955,12 @@ void fpu__resume_cpu(void) * of XSAVES and MSR_IA32_XSS. */ if (cpu_feature_enabled(X86_FEATURE_XSAVES)) { - wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | + wrmsrq(MSR_IA32_XSS, xfeatures_mask_supervisor() | xfeatures_mask_independent()); } if (fpu_state_size_dynamic()) - wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd); + wrmsrq(MSR_IA32_XFD, x86_task_fpu(current)->fpstate->xfd); } /* @@ -1071,10 +1122,9 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr); struct xregs_state *xinit = &init_fpstate.regs.xsave; struct xregs_state *xsave = &fpstate->regs.xsave; + unsigned int zerofrom, i, xfeature; struct xstate_header header; - unsigned int zerofrom; u64 mask; - int i; memset(&header, 0, sizeof(header)); header.xfeatures = xsave->header.xfeatures; @@ -1143,15 +1193,16 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, */ mask = header.xfeatures; - for_each_extended_xfeature(i, mask) { + for_each_extended_xfeature_in_order(i, mask) { + xfeature = xfeature_uncompact_order[i]; /* * If there was a feature or alignment gap, zero the space * in the destination buffer. */ - if (zerofrom < xstate_offsets[i]) - membuf_zero(&to, xstate_offsets[i] - zerofrom); + if (zerofrom < xstate_offsets[xfeature]) + membuf_zero(&to, xstate_offsets[xfeature] - zerofrom); - if (i == XFEATURE_PKRU) { + if (xfeature == XFEATURE_PKRU) { struct pkru_state pkru = {0}; /* * PKRU is not necessarily up to date in the @@ -1161,14 +1212,14 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, membuf_write(&to, &pkru, sizeof(pkru)); } else { membuf_write(&to, - __raw_xsave_addr(xsave, i), - xstate_sizes[i]); + __raw_xsave_addr(xsave, xfeature), + xstate_sizes[xfeature]); } /* * Keep track of the last copied state in the non-compacted * target buffer for gap zeroing. */ - zerofrom = xstate_offsets[i] + xstate_sizes[i]; + zerofrom = xstate_offsets[xfeature] + xstate_sizes[xfeature]; } out: @@ -1191,8 +1242,8 @@ out: void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk, enum xstate_copy_mode copy_mode) { - __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate, - tsk->thread.fpu.fpstate->user_xfeatures, + __copy_xstate_to_uabi_buf(to, x86_task_fpu(tsk)->fpstate, + x86_task_fpu(tsk)->fpstate->user_xfeatures, tsk->thread.pkru, copy_mode); } @@ -1332,7 +1383,7 @@ int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf) { - return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru); + return copy_uabi_to_xstate(x86_task_fpu(tsk)->fpstate, NULL, ubuf, &tsk->thread.pkru); } static bool validate_independent_components(u64 mask) @@ -1398,9 +1449,9 @@ void xrstors(struct xregs_state *xstate, u64 mask) } #if IS_ENABLED(CONFIG_KVM) -void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature) +void fpstate_clear_xstate_component(struct fpstate *fpstate, unsigned int xfeature) { - void *addr = get_xsave_addr(&fps->regs.xsave, xfeature); + void *addr = get_xsave_addr(&fpstate->regs.xsave, xfeature); if (addr) memset(addr, 0, xstate_sizes[xfeature]); @@ -1426,7 +1477,7 @@ static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor) * The XFD MSR does not match fpstate->xfd. That's invalid when * the passed in fpstate is current's fpstate. */ - if (fpstate->xfd == current->thread.fpu.fpstate->xfd) + if (fpstate->xfd == x86_task_fpu(current)->fpstate->xfd) return false; /* @@ -1503,7 +1554,7 @@ void fpstate_free(struct fpu *fpu) static int fpstate_realloc(u64 xfeatures, unsigned int ksize, unsigned int usize, struct fpu_guest *guest_fpu) { - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); struct fpstate *curfps, *newfps = NULL; unsigned int fpsize; bool in_use; @@ -1596,7 +1647,7 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest) * AVX512. */ bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); - struct fpu *fpu = ¤t->group_leader->thread.fpu; + struct fpu *fpu = x86_task_fpu(current->group_leader); struct fpu_state_perm *perm; unsigned int ksize, usize; u64 mask; @@ -1606,16 +1657,20 @@ static int __xstate_request_perm(u64 permitted, u64 requested, bool guest) if ((permitted & requested) == requested) return 0; - /* Calculate the resulting kernel state size */ + /* + * Calculate the resulting kernel state size. Note, @permitted also + * contains supervisor xfeatures even though supervisor are always + * permitted for kernel and guest FPUs, and never permitted for user + * FPUs. + */ mask = permitted | requested; - /* Take supervisor states into account on the host */ - if (!guest) - mask |= xfeatures_mask_supervisor(); ksize = xstate_calculate_size(mask, compacted); - /* Calculate the resulting user state size */ - mask &= XFEATURE_MASK_USER_SUPPORTED; - usize = xstate_calculate_size(mask, false); + /* + * Calculate the resulting user state size. Take care not to clobber + * the supervisor xfeatures in the new mask! + */ + usize = xstate_calculate_size(mask & XFEATURE_MASK_USER_SUPPORTED, false); if (!guest) { ret = validate_sigaltstack(usize); @@ -1699,7 +1754,7 @@ int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu) return -EPERM; } - fpu = ¤t->group_leader->thread.fpu; + fpu = x86_task_fpu(current->group_leader); perm = guest_fpu ? &fpu->guest_perm : &fpu->perm; ksize = perm->__state_size; usize = perm->__user_state_size; @@ -1804,7 +1859,7 @@ long fpu_xstate_prctl(int option, unsigned long arg2) */ static void avx512_status(struct seq_file *m, struct task_struct *task) { - unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp); + unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); long delta; if (!timestamp) { diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index 0fd34f53f025..52ce19289989 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -5,6 +5,7 @@ #include <asm/cpufeature.h> #include <asm/fpu/xstate.h> #include <asm/fpu/xcr.h> +#include <asm/msr.h> #ifdef CONFIG_X86_64 DECLARE_PER_CPU(u64, xfd_state); @@ -22,7 +23,7 @@ static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask) static inline u64 xstate_get_group_perm(bool guest) { - struct fpu *fpu = ¤t->group_leader->thread.fpu; + struct fpu *fpu = x86_task_fpu(current->group_leader); struct fpu_state_perm *perm; /* Pairs with WRITE_ONCE() in xstate_request_perm() */ @@ -69,21 +70,31 @@ static inline u64 xfeatures_mask_independent(void) return fpu_kernel_cfg.independent_features; } +static inline int set_xfeature_in_sigframe(struct xregs_state __user *xbuf, u64 mask) +{ + u64 xfeatures; + int err; + + /* Read the xfeatures value already saved in the user buffer */ + err = __get_user(xfeatures, &xbuf->header.xfeatures); + xfeatures |= mask; + err |= __put_user(xfeatures, &xbuf->header.xfeatures); + + return err; +} + /* * Update the value of PKRU register that was already pushed onto the signal frame. */ -static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru) +static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru) { - u64 xstate_bv; int err; if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE))) return 0; /* Mark PKRU as in-use so that it is restored correctly. */ - xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU; - - err = __put_user(xstate_bv, &buf->header.xfeatures); + err = set_xfeature_in_sigframe(buf, XFEATURE_MASK_PKRU); if (err) return err; @@ -171,7 +182,7 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs #ifdef CONFIG_X86_64 static inline void xfd_set_state(u64 xfd) { - wrmsrl(MSR_IA32_XFD, xfd); + wrmsrq(MSR_IA32_XFD, xfd); __this_cpu_write(xfd_state, xfd); } @@ -288,7 +299,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr * internally, e.g. PKRU. That's user space ABI and also required * to allow the signal handler to modify PKRU. */ - struct fpstate *fpstate = current->thread.fpu.fpstate; + struct fpstate *fpstate = x86_task_fpu(current)->fpstate; u64 mask = fpstate->user_xfeatures; u32 lmask; u32 hmask; @@ -307,7 +318,7 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkr clac(); if (!err) - err = update_pkru_in_sigframe(buf, mask, pkru); + err = update_pkru_in_sigframe(buf, pkru); return err; } @@ -322,7 +333,7 @@ static inline int xrstor_from_user_sigframe(struct xregs_state __user *buf, u64 u32 hmask = mask >> 32; int err; - xfd_validate_state(current->thread.fpu.fpstate, mask, true); + xfd_validate_state(x86_task_fpu(current)->fpstate, mask, true); stac(); XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c index 5e2cd1004980..816187da3a47 100644 --- a/arch/x86/kernel/fred.c +++ b/arch/x86/kernel/fred.c @@ -3,6 +3,7 @@ #include <asm/desc.h> #include <asm/fred.h> +#include <asm/msr.h> #include <asm/tlbflush.h> #include <asm/traps.h> @@ -43,23 +44,23 @@ void cpu_init_fred_exceptions(void) */ loadsegment(ss, __KERNEL_DS); - wrmsrl(MSR_IA32_FRED_CONFIG, + wrmsrq(MSR_IA32_FRED_CONFIG, /* Reserve for CALL emulation */ FRED_CONFIG_REDZONE | FRED_CONFIG_INT_STKLVL(0) | FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user)); - wrmsrl(MSR_IA32_FRED_STKLVLS, 0); + wrmsrq(MSR_IA32_FRED_STKLVLS, 0); /* * Ater a CPU offline/online cycle, the FRED RSP0 MSR should be * resynchronized with its per-CPU cache. */ - wrmsrl(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0)); + wrmsrq(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0)); - wrmsrl(MSR_IA32_FRED_RSP1, 0); - wrmsrl(MSR_IA32_FRED_RSP2, 0); - wrmsrl(MSR_IA32_FRED_RSP3, 0); + wrmsrq(MSR_IA32_FRED_RSP1, 0); + wrmsrq(MSR_IA32_FRED_RSP2, 0); + wrmsrq(MSR_IA32_FRED_RSP3, 0); /* Enable FRED */ cr4_set_bits(X86_CR4_FRED); @@ -79,14 +80,14 @@ void cpu_init_fred_rsps(void) * (remember that user space faults are always taken on stack level 0) * is to avoid overflowing the kernel stack. */ - wrmsrl(MSR_IA32_FRED_STKLVLS, + wrmsrq(MSR_IA32_FRED_STKLVLS, FRED_STKLVL(X86_TRAP_DB, FRED_DB_STACK_LEVEL) | FRED_STKLVL(X86_TRAP_NMI, FRED_NMI_STACK_LEVEL) | FRED_STKLVL(X86_TRAP_MC, FRED_MC_STACK_LEVEL) | FRED_STKLVL(X86_TRAP_DF, FRED_DF_STACK_LEVEL)); /* The FRED equivalents to IST stacks... */ - wrmsrl(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB)); - wrmsrl(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI)); - wrmsrl(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF)); + wrmsrq(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB)); + wrmsrq(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI)); + wrmsrq(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF)); } diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 5eb1514af559..252e82bcfd2f 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -55,10 +55,10 @@ void ftrace_arch_code_modify_post_process(void) { /* * ftrace_make_{call,nop}() may be called during - * module load, and we need to finish the text_poke_queue() + * module load, and we need to finish the smp_text_poke_batch_add() * that they do, here. */ - text_poke_finish(); + smp_text_poke_batch_finish(); ftrace_poke_late = 0; mutex_unlock(&text_mutex); } @@ -119,7 +119,7 @@ ftrace_modify_code_direct(unsigned long ip, const char *old_code, /* replace the text with the new text */ if (ftrace_poke_late) - text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL); + smp_text_poke_batch_add((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL); else text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE); return 0; @@ -186,11 +186,11 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ip = (unsigned long)(&ftrace_call); new = ftrace_call_replace(ip, (unsigned long)func); - text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); + smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL); ip = (unsigned long)(&ftrace_regs_call); new = ftrace_call_replace(ip, (unsigned long)func); - text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); + smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL); return 0; } @@ -247,10 +247,10 @@ void ftrace_replace_code(int enable) break; } - text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL); + smp_text_poke_batch_add((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL); ftrace_update_record(rec, enable); } - text_poke_finish(); + smp_text_poke_batch_finish(); } void arch_ftrace_update_code(int command) @@ -492,7 +492,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) mutex_lock(&text_mutex); /* Do a safe modify in case the trampoline is executing */ new = ftrace_call_replace(ip, (unsigned long)func); - text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); + smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL); mutex_unlock(&text_mutex); } @@ -586,7 +586,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func) const char *new; new = ftrace_jmp_replace(ip, (unsigned long)func); - text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); + smp_text_poke_single((void *)ip, new, MCOUNT_INSN_SIZE, NULL); return 0; } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index fa9b6339975f..533fcf5636fc 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -47,234 +47,22 @@ * Manage page tables very early on. */ extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; -static unsigned int __initdata next_early_pgt; +unsigned int __initdata next_early_pgt; +SYM_PIC_ALIAS(next_early_pgt); pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); -#ifdef CONFIG_X86_5LEVEL unsigned int __pgtable_l5_enabled __ro_after_init; unsigned int pgdir_shift __ro_after_init = 39; EXPORT_SYMBOL(pgdir_shift); unsigned int ptrs_per_p4d __ro_after_init = 1; EXPORT_SYMBOL(ptrs_per_p4d); -#endif -#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; EXPORT_SYMBOL(page_offset_base); unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; EXPORT_SYMBOL(vmalloc_base); unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; EXPORT_SYMBOL(vmemmap_base); -#endif - -static inline bool check_la57_support(void) -{ - if (!IS_ENABLED(CONFIG_X86_5LEVEL)) - return false; - - /* - * 5-level paging is detected and enabled at kernel decompression - * stage. Only check if it has been enabled there. - */ - if (!(native_read_cr4() & X86_CR4_LA57)) - return false; - - RIP_REL_REF(__pgtable_l5_enabled) = 1; - RIP_REL_REF(pgdir_shift) = 48; - RIP_REL_REF(ptrs_per_p4d) = 512; - RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5; - RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5; - RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5; - - return true; -} - -static unsigned long __head sme_postprocess_startup(struct boot_params *bp, - pmdval_t *pmd, - unsigned long p2v_offset) -{ - unsigned long paddr, paddr_end; - int i; - - /* Encrypt the kernel and related (if SME is active) */ - sme_encrypt_kernel(bp); - - /* - * Clear the memory encryption mask from the .bss..decrypted section. - * The bss section will be memset to zero later in the initialization so - * there is no need to zero it after changing the memory encryption - * attribute. - */ - if (sme_get_me_mask()) { - paddr = (unsigned long)&RIP_REL_REF(__start_bss_decrypted); - paddr_end = (unsigned long)&RIP_REL_REF(__end_bss_decrypted); - - for (; paddr < paddr_end; paddr += PMD_SIZE) { - /* - * On SNP, transition the page to shared in the RMP table so that - * it is consistent with the page table attribute change. - * - * __start_bss_decrypted has a virtual address in the high range - * mapping (kernel .text). PVALIDATE, by way of - * early_snp_set_memory_shared(), requires a valid virtual - * address but the kernel is currently running off of the identity - * mapping so use the PA to get a *currently* valid virtual address. - */ - early_snp_set_memory_shared(paddr, paddr, PTRS_PER_PMD); - - i = pmd_index(paddr - p2v_offset); - pmd[i] -= sme_get_me_mask(); - } - } - - /* - * Return the SME encryption mask (if SME is active) to be used as a - * modifier for the initial pgdir entry programmed into CR3. - */ - return sme_get_me_mask(); -} - -/* Code in __startup_64() can be relocated during execution, but the compiler - * doesn't have to generate PC-relative relocations when accessing globals from - * that function. Clang actually does not generate them, which leads to - * boot-time crashes. To work around this problem, every global pointer must - * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined - * by subtracting p2v_offset from the RIP-relative address. - */ -unsigned long __head __startup_64(unsigned long p2v_offset, - struct boot_params *bp) -{ - pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts); - unsigned long physaddr = (unsigned long)&RIP_REL_REF(_text); - unsigned long va_text, va_end; - unsigned long pgtable_flags; - unsigned long load_delta; - pgdval_t *pgd; - p4dval_t *p4d; - pudval_t *pud; - pmdval_t *pmd, pmd_entry; - bool la57; - int i; - - la57 = check_la57_support(); - - /* Is the address too large? */ - if (physaddr >> MAX_PHYSMEM_BITS) - for (;;); - - /* - * Compute the delta between the address I am compiled to run at - * and the address I am actually running at. - */ - load_delta = __START_KERNEL_map + p2v_offset; - RIP_REL_REF(phys_base) = load_delta; - - /* Is the address not 2M aligned? */ - if (load_delta & ~PMD_MASK) - for (;;); - - va_text = physaddr - p2v_offset; - va_end = (unsigned long)&RIP_REL_REF(_end) - p2v_offset; - - /* Include the SME encryption mask in the fixup value */ - load_delta += sme_get_me_mask(); - - /* Fixup the physical addresses in the page table */ - - pgd = &RIP_REL_REF(early_top_pgt)->pgd; - pgd[pgd_index(__START_KERNEL_map)] += load_delta; - - if (IS_ENABLED(CONFIG_X86_5LEVEL) && la57) { - p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt); - p4d[MAX_PTRS_PER_P4D - 1] += load_delta; - - pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; - } - - RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta; - RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta; - - for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) - RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta; - - /* - * Set up the identity mapping for the switchover. These - * entries should *NOT* have the global bit set! This also - * creates a bunch of nonsense entries but that is fine -- - * it avoids problems around wraparound. - */ - - pud = &early_pgts[0]->pmd; - pmd = &early_pgts[1]->pmd; - RIP_REL_REF(next_early_pgt) = 2; - - pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); - - if (la57) { - p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd; - - i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; - pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; - pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; - - i = physaddr >> P4D_SHIFT; - p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; - p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; - } else { - i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; - pgd[i + 0] = (pgdval_t)pud + pgtable_flags; - pgd[i + 1] = (pgdval_t)pud + pgtable_flags; - } - - i = physaddr >> PUD_SHIFT; - pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; - pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; - - pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; - /* Filter out unsupported __PAGE_KERNEL_* bits: */ - pmd_entry &= RIP_REL_REF(__supported_pte_mask); - pmd_entry += sme_get_me_mask(); - pmd_entry += physaddr; - - for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) { - int idx = i + (physaddr >> PMD_SHIFT); - - pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; - } - - /* - * Fixup the kernel text+data virtual addresses. Note that - * we might write invalid pmds, when the kernel is relocated - * cleanup_highmap() fixes this up along with the mappings - * beyond _end. - * - * Only the region occupied by the kernel image has so far - * been checked against the table of usable memory regions - * provided by the firmware, so invalidate pages outside that - * region. A page table entry that maps to a reserved area of - * memory would allow processor speculation into that area, - * and on some hardware (particularly the UV platform) even - * speculative access to some reserved areas is caught as an - * error, causing the BIOS to halt the system. - */ - - pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd; - - /* invalidate pages before the kernel image */ - for (i = 0; i < pmd_index(va_text); i++) - pmd[i] &= ~_PAGE_PRESENT; - - /* fixup pages that are part of the kernel image */ - for (; i <= pmd_index(va_end); i++) - if (pmd[i] & _PAGE_PRESENT) - pmd[i] += load_delta; - - /* invalidate pages after the kernel image */ - for (; i < PTRS_PER_PMD; i++) - pmd[i] &= ~_PAGE_PRESENT; - - return sme_postprocess_startup(bp, pmd, p2v_offset); -} /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) @@ -449,6 +237,12 @@ asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode /* Kill off the identity-map trampoline */ reset_early_page_tables(); + if (pgtable_l5_enabled()) { + page_offset_base = __PAGE_OFFSET_BASE_L5; + vmalloc_base = __VMALLOC_BASE_L5; + vmemmap_base = __VMEMMAP_BASE_L5; + } + clear_bss(); /* @@ -513,41 +307,6 @@ void __init __noreturn x86_64_start_reservations(char *real_mode_data) start_kernel(); } -/* - * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is - * used until the idt_table takes over. On the boot CPU this happens in - * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases - * this happens in the functions called from head_64.S. - * - * The idt_table can't be used that early because all the code modifying it is - * in idt.c and can be instrumented by tracing or KASAN, which both don't work - * during early CPU bringup. Also the idt_table has the runtime vectors - * configured which require certain CPU state to be setup already (like TSS), - * which also hasn't happened yet in early CPU bringup. - */ -static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; - -/* This may run while still in the direct mapping */ -static void __head startup_64_load_idt(void *vc_handler) -{ - struct desc_ptr desc = { - .address = (unsigned long)&RIP_REL_REF(bringup_idt_table), - .size = sizeof(bringup_idt_table) - 1, - }; - struct idt_data data; - gate_desc idt_desc; - - /* @vc_handler is set only for a VMM Communication Exception */ - if (vc_handler) { - init_idt_data(&data, X86_TRAP_VC, vc_handler); - idt_init_desc(&idt_desc, &data); - native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc); - } - - native_load_idt(&desc); -} - -/* This is used when running on kernel addresses */ void early_setup_idt(void) { void *handler = NULL; @@ -559,30 +318,3 @@ void early_setup_idt(void) startup_64_load_idt(handler); } - -/* - * Setup boot CPU state needed before kernel switches to virtual addresses. - */ -void __head startup_64_setup_gdt_idt(void) -{ - struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt; - void *handler = NULL; - - struct desc_ptr startup_gdt_descr = { - .address = (unsigned long)&RIP_REL_REF(*gdt), - .size = GDT_SIZE - 1, - }; - - /* Load GDT */ - native_load_gdt(&startup_gdt_descr); - - /* New GDT is live - reload data segment registers */ - asm volatile("movl %%eax, %%ds\n" - "movl %%eax, %%ss\n" - "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory"); - - if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) - handler = &RIP_REL_REF(vc_no_ghcb); - - startup_64_load_idt(handler); -} diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 2e42056d2306..76743dfad6ab 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -86,7 +86,7 @@ SYM_CODE_START(startup_32) movl $pa(__bss_stop),%ecx subl %edi,%ecx shrl $2,%ecx - rep ; stosl + rep stosl /* * Copy bootup parameters out of the way. * Note: %esi still has the pointer to the real-mode data. @@ -98,15 +98,13 @@ SYM_CODE_START(startup_32) movl $pa(boot_params),%edi movl $(PARAM_SIZE/4),%ecx cld - rep - movsl + rep movsl movl pa(boot_params) + NEW_CL_POINTER,%esi andl %esi,%esi jz 1f # No command line movl $pa(boot_command_line),%edi movl $(COMMAND_LINE_SIZE/4),%ecx - rep - movsl + rep movsl 1: #ifdef CONFIG_OLPC diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index fefe2a25cf02..3e9b3a3bd039 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -573,6 +573,7 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb) /* Pure iret required here - don't use INTERRUPT_RETURN */ iretq SYM_CODE_END(vc_no_ghcb) +SYM_PIC_ALIAS(vc_no_ghcb); #endif #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION @@ -604,10 +605,12 @@ SYM_DATA_START_PTI_ALIGNED(early_top_pgt) .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC .fill PTI_USER_PGD_FILL,8,0 SYM_DATA_END(early_top_pgt) +SYM_PIC_ALIAS(early_top_pgt) SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 SYM_DATA_END(early_dynamic_pgts) +SYM_PIC_ALIAS(early_dynamic_pgts); SYM_DATA(early_recursion_flag, .long 0) @@ -646,12 +649,11 @@ SYM_DATA_START_PTI_ALIGNED(init_top_pgt) SYM_DATA_END(init_top_pgt) #endif -#ifdef CONFIG_X86_5LEVEL SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) .fill 511,8,0 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC SYM_DATA_END(level4_kernel_pgt) -#endif +SYM_PIC_ALIAS(level4_kernel_pgt) SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 @@ -659,6 +661,7 @@ SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC SYM_DATA_END(level3_kernel_pgt) +SYM_PIC_ALIAS(level3_kernel_pgt) SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) /* @@ -676,6 +679,7 @@ SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) SYM_DATA_END(level2_kernel_pgt) +SYM_PIC_ALIAS(level2_kernel_pgt) SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 @@ -688,6 +692,7 @@ SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) /* 6 MB reserved space + a 2MB hole */ .fill 4,8,0 SYM_DATA_END(level2_fixmap_pgt) +SYM_PIC_ALIAS(level2_fixmap_pgt) SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) .rept (FIXMAP_PMD_NUM) @@ -703,6 +708,7 @@ SYM_DATA(smpboot_control, .long 0) .align 16 /* This must match the first entry in level2_kernel_pgt */ SYM_DATA(phys_base, .quad 0x0) +SYM_PIC_ALIAS(phys_base); EXPORT_SYMBOL(phys_base) #include "../xen/xen-head.S" diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7f4b2966e15c..d6387dde3ff9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -7,11 +7,12 @@ #include <linux/cpu.h> #include <linux/irq.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/irq_remapping.h> #include <asm/hpet.h> #include <asm/time.h> #include <asm/mwait.h> +#include <asm/msr.h> #undef pr_fmt #define pr_fmt(fmt) "hpet: " fmt @@ -970,7 +971,7 @@ static bool __init hpet_is_pc10_damaged(void) return false; /* Check whether PC10 is enabled in PKG C-state limit */ - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, pcfg); if ((pcfg & 0xF) < 8) return false; diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index cd8ed1edbf9e..9e9a591a5fec 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -49,7 +49,7 @@ static uint32_t jailhouse_cpuid_base(void) !boot_cpu_has(X86_FEATURE_HYPERVISOR)) return 0; - return hypervisor_cpuid_base("Jailhouse\0\0\0", 0); + return cpuid_base_hypervisor("Jailhouse\0\0\0", 0); } static uint32_t __init jailhouse_detect(void) diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index f5b8ef02d172..a7949a54a0ff 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -102,7 +102,7 @@ __jump_label_transform(struct jump_entry *entry, return; } - text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL); + smp_text_poke_single((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL); } static void __ref jump_label_transform(struct jump_entry *entry, @@ -135,7 +135,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry, mutex_lock(&text_mutex); jlp = __jump_label_patch(entry, type); - text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL); + smp_text_poke_batch_add((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL); mutex_unlock(&text_mutex); return true; } @@ -143,6 +143,6 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry, void arch_jump_label_transform_apply(void) { mutex_lock(&text_mutex); - text_poke_finish(); + smp_text_poke_batch_finish(); mutex_unlock(&text_mutex); } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 09608fd93687..47cb8eb138ba 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -808,7 +808,7 @@ void arch_arm_kprobe(struct kprobe *p) u8 int3 = INT3_INSN_OPCODE; text_poke(p->addr, &int3, 1); - text_poke_sync(); + smp_text_poke_sync_each_cpu(); perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1); } @@ -818,7 +818,7 @@ void arch_disarm_kprobe(struct kprobe *p) perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1); text_poke(p->addr, &p->opcode, 1); - text_poke_sync(); + smp_text_poke_sync_each_cpu(); } void arch_remove_kprobe(struct kprobe *p) diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 36d6809c6c9e..0aabd4c4e2c4 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -488,7 +488,7 @@ void arch_optimize_kprobes(struct list_head *oplist) insn_buff[0] = JMP32_INSN_OPCODE; *(s32 *)(&insn_buff[1]) = rel; - text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL); + smp_text_poke_single(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL); list_del_init(&op->list); } @@ -513,11 +513,11 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op) JMP32_INSN_SIZE - INT3_INSN_SIZE); text_poke(addr, new, INT3_INSN_SIZE); - text_poke_sync(); + smp_text_poke_sync_each_cpu(); text_poke(addr + INT3_INSN_SIZE, new + INT3_INSN_SIZE, JMP32_INSN_SIZE - INT3_INSN_SIZE); - text_poke_sync(); + smp_text_poke_sync_each_cpu(); perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE); } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 3be9b3342c67..921c1c783bc1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -40,6 +40,7 @@ #include <asm/mtrr.h> #include <asm/tlb.h> #include <asm/cpuidle_haltpoll.h> +#include <asm/msr.h> #include <asm/ptrace.h> #include <asm/reboot.h> #include <asm/svm.h> @@ -301,7 +302,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt) token = __this_cpu_read(apf_reason.token); kvm_async_pf_task_wake(token); __this_cpu_write(apf_reason.token, 0); - wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1); + wrmsrq(MSR_KVM_ASYNC_PF_ACK, 1); } set_irq_regs(old_regs); @@ -327,7 +328,7 @@ static void kvm_register_steal_time(void) if (!has_steal_clock) return; - wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); + wrmsrq(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); pr_debug("stealtime: cpu %d, msr %llx\n", cpu, (unsigned long long) slow_virt_to_phys(st)); } @@ -361,9 +362,9 @@ static void kvm_guest_cpu_init(void) if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; - wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR); + wrmsrq(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR); - wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); + wrmsrq(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(async_pf_enabled, true); pr_debug("setup async PF for cpu %d\n", smp_processor_id()); } @@ -376,7 +377,7 @@ static void kvm_guest_cpu_init(void) __this_cpu_write(kvm_apic_eoi, 0); pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) | KVM_MSR_ENABLED; - wrmsrl(MSR_KVM_PV_EOI_EN, pa); + wrmsrq(MSR_KVM_PV_EOI_EN, pa); } if (has_steal_clock) @@ -388,7 +389,7 @@ static void kvm_pv_disable_apf(void) if (!__this_cpu_read(async_pf_enabled)) return; - wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); + wrmsrq(MSR_KVM_ASYNC_PF_EN, 0); __this_cpu_write(async_pf_enabled, false); pr_debug("disable async PF for cpu %d\n", smp_processor_id()); @@ -399,7 +400,7 @@ static void kvm_disable_steal_time(void) if (!has_steal_clock) return; - wrmsr(MSR_KVM_STEAL_TIME, 0, 0); + wrmsrq(MSR_KVM_STEAL_TIME, 0); } static u64 kvm_steal_clock(int cpu) @@ -451,9 +452,9 @@ static void kvm_guest_cpu_offline(bool shutdown) { kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) - wrmsrl(MSR_KVM_PV_EOI_EN, 0); + wrmsrq(MSR_KVM_PV_EOI_EN, 0); if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) - wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0); + wrmsrq(MSR_KVM_MIGRATION_CONTROL, 0); kvm_pv_disable_apf(); if (!shutdown) apf_task_wake_all(); @@ -615,7 +616,7 @@ static int __init setup_efi_kvm_sev_migration(void) } pr_info("%s : live migration enabled in EFI\n", __func__); - wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); + wrmsrq(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); return 1; } @@ -728,7 +729,7 @@ static int kvm_suspend(void) #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) - rdmsrl(MSR_KVM_POLL_CONTROL, val); + rdmsrq(MSR_KVM_POLL_CONTROL, val); has_guest_poll = !(val & 1); #endif return 0; @@ -740,7 +741,7 @@ static void kvm_resume(void) #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll) - wrmsrl(MSR_KVM_POLL_CONTROL, 0); + wrmsrq(MSR_KVM_POLL_CONTROL, 0); #endif } @@ -874,7 +875,7 @@ static noinline uint32_t __kvm_cpuid_base(void) return 0; /* So we don't blow up on old processors */ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) - return hypervisor_cpuid_base(KVM_SIGNATURE, 0); + return cpuid_base_hypervisor(KVM_SIGNATURE, 0); return 0; } @@ -975,7 +976,7 @@ static void __init kvm_init_platform(void) * If not booted using EFI, enable Live migration support. */ if (!efi_enabled(EFI_BOOT)) - wrmsrl(MSR_KVM_MIGRATION_CONTROL, + wrmsrq(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); } kvmclock_init(); @@ -1124,12 +1125,12 @@ out: static void kvm_disable_host_haltpoll(void *i) { - wrmsrl(MSR_KVM_POLL_CONTROL, 0); + wrmsrq(MSR_KVM_POLL_CONTROL, 0); } static void kvm_enable_host_haltpoll(void *i) { - wrmsrl(MSR_KVM_POLL_CONTROL, 1); + wrmsrq(MSR_KVM_POLL_CONTROL, 1); } void arch_haltpoll_enable(unsigned int cpu) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 5b2c15214a6b..ca0a49eeac4a 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -60,7 +60,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu); */ static void kvm_get_wallclock(struct timespec64 *now) { - wrmsrl(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock)); + wrmsrq(msr_kvm_wall_clock, slow_virt_to_phys(&wall_clock)); preempt_disable(); pvclock_read_wallclock(&wall_clock, this_cpu_pvti(), now); preempt_enable(); @@ -173,7 +173,7 @@ static void kvm_register_clock(char *txt) return; pa = slow_virt_to_phys(&src->pvti) | 0x01ULL; - wrmsrl(msr_kvm_system_time, pa); + wrmsrq(msr_kvm_system_time, pa); pr_debug("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt); } @@ -196,7 +196,7 @@ static void kvm_setup_secondary_clock(void) void kvmclock_disable(void) { if (msr_kvm_system_time) - native_write_msr(msr_kvm_system_time, 0, 0); + native_write_msr(msr_kvm_system_time, 0); } static void __init kvmclock_init_mem(void) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index a68f5a0a9f37..949c9e4bfad2 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -76,6 +76,19 @@ map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) static int map_acpi_tables(struct x86_mapping_info *info, pgd_t *level4p) { return 0; } #endif +static int map_mmio_serial(struct x86_mapping_info *info, pgd_t *level4p) +{ + unsigned long mstart, mend; + + if (!kexec_debug_8250_mmio32) + return 0; + + mstart = kexec_debug_8250_mmio32 & PAGE_MASK; + mend = (kexec_debug_8250_mmio32 + PAGE_SIZE + 23) & PAGE_MASK; + pr_info("Map PCI serial at %lx - %lx\n", mstart, mend); + return kernel_ident_mapping_init(info, level4p, mstart, mend); +} + #ifdef CONFIG_KEXEC_FILE const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_bzImage64_ops, @@ -285,6 +298,10 @@ static int init_pgtable(struct kimage *image, unsigned long control_page) if (result) return result; + result = map_mmio_serial(&info, image->arch.pgd); + if (result) + return result; + /* * This must be last because the intermediate page table pages it * allocates will not be control pages and may overlap the image. @@ -304,6 +321,24 @@ static void load_segments(void) ); } +static void prepare_debug_idt(unsigned long control_page, unsigned long vec_ofs) +{ + gate_desc idtentry = { 0 }; + int i; + + idtentry.bits.p = 1; + idtentry.bits.type = GATE_TRAP; + idtentry.segment = __KERNEL_CS; + idtentry.offset_low = (control_page & 0xFFFF) + vec_ofs; + idtentry.offset_middle = (control_page >> 16) & 0xFFFF; + idtentry.offset_high = control_page >> 32; + + for (i = 0; i < 16; i++) { + kexec_debug_idt[i] = idtentry; + idtentry.offset_low += KEXEC_DEBUG_EXC_HANDLER_SIZE; + } +} + int machine_kexec_prepare(struct kimage *image) { void *control_page = page_address(image->control_code_page); @@ -321,6 +356,9 @@ int machine_kexec_prepare(struct kimage *image) if (image->type == KEXEC_TYPE_DEFAULT) kexec_pa_swap_page = page_to_pfn(image->swap_page) << PAGE_SHIFT; + prepare_debug_idt((unsigned long)__pa(control_page), + (unsigned long)kexec_debug_exc_vectors - reloc_start); + __memcpy(control_page, __relocate_kernel_start, reloc_end - reloc_start); set_memory_rox((unsigned long)control_page, 1); @@ -396,16 +434,10 @@ void __nocfi machine_kexec(struct kimage *image) * with from a table in memory. At no other time is the * descriptor table in memory accessed. * - * I take advantage of this here by force loading the - * segments, before I zap the gdt with an invalid value. + * Take advantage of this here by force loading the segments, + * before the GDT is zapped with an invalid value. */ load_segments(); - /* - * The gdt & idt are now invalid. - * If you want to load them you must set up your own idt & gdt. - */ - native_idt_invalidate(); - native_gdt_invalidate(); /* now call it */ image->start = relocate_kernel_ptr((unsigned long)image->head, diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index 1f54eedc3015..ef6104e7cc72 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -97,7 +97,7 @@ static void get_fam10h_pci_mmconf_base(void) /* SYS_CFG */ address = MSR_AMD64_SYSCFG; - rdmsrl(address, val); + rdmsrq(address, val); /* TOP_MEM2 is not enabled? */ if (!(val & (1<<21))) { @@ -105,7 +105,7 @@ static void get_fam10h_pci_mmconf_base(void) } else { /* TOP_MEM2 */ address = MSR_K8_TOP_MEM2; - rdmsrl(address, val); + rdmsrq(address, val); tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); } @@ -177,7 +177,7 @@ void fam10h_check_enable_mmcfg(void) return; address = MSR_FAM10H_MMIO_CONF_BASE; - rdmsrl(address, val); + rdmsrq(address, val); /* try to make sure that AP's setting is identical to BSP setting */ if (val & FAM10H_MMIO_CONF_ENABLE) { @@ -212,7 +212,7 @@ void fam10h_check_enable_mmcfg(void) (FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT)); val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | FAM10H_MMIO_CONF_ENABLE; - wrmsrl(address, val); + wrmsrq(address, val); } static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d) diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index ff07558b7ebc..0ffbae902e2f 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -206,7 +206,7 @@ static int write_relocate_add(Elf64_Shdr *sechdrs, write, apply); if (!early) { - text_poke_sync(); + smp_text_poke_sync_each_cpu(); mutex_unlock(&text_mutex); } diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 9a95d00f1423..be93ec7255bf 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -49,27 +49,20 @@ struct nmi_desc { struct list_head head; }; -static struct nmi_desc nmi_desc[NMI_MAX] = -{ - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), - .head = LIST_HEAD_INIT(nmi_desc[0].head), - }, - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), - .head = LIST_HEAD_INIT(nmi_desc[1].head), - }, - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock), - .head = LIST_HEAD_INIT(nmi_desc[2].head), - }, - { - .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock), - .head = LIST_HEAD_INIT(nmi_desc[3].head), - }, +#define NMI_DESC_INIT(type) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[type].lock), \ + .head = LIST_HEAD_INIT(nmi_desc[type].head), \ +} +static struct nmi_desc nmi_desc[NMI_MAX] = { + NMI_DESC_INIT(NMI_LOCAL), + NMI_DESC_INIT(NMI_UNKNOWN), + NMI_DESC_INIT(NMI_SERR), + NMI_DESC_INIT(NMI_IO_CHECK), }; +#define nmi_to_desc(type) (&nmi_desc[type]) + struct nmi_stats { unsigned int normal; unsigned int unknown; @@ -91,6 +84,9 @@ static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); static int ignore_nmis __read_mostly; int unknown_nmi_panic; +int panic_on_unrecovered_nmi; +int panic_on_io_nmi; + /* * Prevent NMI reason port (0x61) being accessed simultaneously, can * only be used in NMI handler. @@ -104,8 +100,6 @@ static int __init setup_unknown_nmi_panic(char *str) } __setup("unknown_nmi_panic", setup_unknown_nmi_panic); -#define nmi_to_desc(type) (&nmi_desc[type]) - static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; static int __init nmi_warning_debugfs(void) @@ -125,12 +119,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) action->max_duration = duration; - remainder_ns = do_div(duration, (1000 * 1000)); - decimal_msecs = remainder_ns / 1000; + /* Convert duration from nsec to msec */ + remainder_ns = do_div(duration, NSEC_PER_MSEC); + decimal_msecs = remainder_ns / NSEC_PER_USEC; - printk_ratelimited(KERN_INFO - "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", - action->handler, duration, decimal_msecs); + pr_info_ratelimited("INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", + action->handler, duration, decimal_msecs); } static int nmi_handle(unsigned int type, struct pt_regs *regs) @@ -333,10 +327,9 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) int handled; /* - * Use 'false' as back-to-back NMIs are dealt with one level up. - * Of course this makes having multiple 'unknown' handlers useless - * as only the first one is ever run (unless it can actually determine - * if it caused the NMI) + * As a last resort, let the "unknown" handlers make a + * best-effort attempt to figure out if they can claim + * responsibility for this Unknown NMI. */ handled = nmi_handle(NMI_UNKNOWN, regs); if (handled) { @@ -366,17 +359,18 @@ static noinstr void default_do_nmi(struct pt_regs *regs) bool b2b = false; /* - * CPU-specific NMI must be processed before non-CPU-specific - * NMI, otherwise we may lose it, because the CPU-specific - * NMI can not be detected/processed on other CPUs. - */ - - /* - * Back-to-back NMIs are interesting because they can either - * be two NMI or more than two NMIs (any thing over two is dropped - * due to NMI being edge-triggered). If this is the second half - * of the back-to-back NMI, assume we dropped things and process - * more handlers. Otherwise reset the 'swallow' NMI behaviour + * Back-to-back NMIs are detected by comparing the RIP of the + * current NMI with that of the previous NMI. If it is the same, + * it is assumed that the CPU did not have a chance to jump back + * into a non-NMI context and execute code in between the two + * NMIs. + * + * They are interesting because even if there are more than two, + * only a maximum of two can be detected (anything over two is + * dropped due to NMI being edge-triggered). If this is the + * second half of the back-to-back NMI, assume we dropped things + * and process more handlers. Otherwise, reset the 'swallow' NMI + * behavior. */ if (regs->ip == __this_cpu_read(last_nmi_rip)) b2b = true; @@ -390,6 +384,11 @@ static noinstr void default_do_nmi(struct pt_regs *regs) if (microcode_nmi_handler_enabled() && microcode_nmi_handler()) goto out; + /* + * CPU-specific NMI must be processed before non-CPU-specific + * NMI, otherwise we may lose it, because the CPU-specific + * NMI can not be detected/processed on other CPUs. + */ handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { @@ -426,13 +425,14 @@ static noinstr void default_do_nmi(struct pt_regs *regs) pci_serr_error(reason, regs); else if (reason & NMI_REASON_IOCHK) io_check_error(reason, regs); -#ifdef CONFIG_X86_32 + /* * Reassert NMI in case it became active * meanwhile as it's edge-triggered: */ - reassert_nmi(); -#endif + if (IS_ENABLED(CONFIG_X86_32)) + reassert_nmi(); + __this_cpu_add(nmi_stats.external, 1); raw_spin_unlock(&nmi_reason_lock); goto out; @@ -751,4 +751,3 @@ void local_touch_nmi(void) { __this_cpu_write(last_nmi_rip, 0); } -EXPORT_SYMBOL_GPL(local_touch_nmi); diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index e93a8545c74d..a010e9d062bf 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 /* - * arch/x86/kernel/nmi-selftest.c - * * Testsuite for NMI: IPIs * * Started by Don Zickus: @@ -30,7 +28,6 @@ static DECLARE_BITMAP(nmi_ipi_mask, NR_CPUS) __initdata; static int __initdata testcase_total; static int __initdata testcase_successes; -static int __initdata expected_testcase_failures; static int __initdata unexpected_testcase_failures; static int __initdata unexpected_testcase_unknowns; @@ -120,26 +117,22 @@ static void __init dotest(void (*testcase_fn)(void), int expected) unexpected_testcase_failures++; if (nmi_fail == FAILURE) - printk(KERN_CONT "FAILED |"); + pr_cont("FAILED |"); else if (nmi_fail == TIMEOUT) - printk(KERN_CONT "TIMEOUT|"); + pr_cont("TIMEOUT|"); else - printk(KERN_CONT "ERROR |"); + pr_cont("ERROR |"); dump_stack(); } else { testcase_successes++; - printk(KERN_CONT " ok |"); + pr_cont(" ok |"); } - testcase_total++; + pr_cont("\n"); + testcase_total++; reset_nmi(); } -static inline void __init print_testname(const char *testname) -{ - printk("%12s:", testname); -} - void __init nmi_selftest(void) { init_nmi_testsuite(); @@ -147,38 +140,25 @@ void __init nmi_selftest(void) /* * Run the testsuite: */ - printk("----------------\n"); - printk("| NMI testsuite:\n"); - printk("--------------------\n"); + pr_info("----------------\n"); + pr_info("| NMI testsuite:\n"); + pr_info("--------------------\n"); - print_testname("remote IPI"); + pr_info("%12s:", "remote IPI"); dotest(remote_ipi, SUCCESS); - printk(KERN_CONT "\n"); - print_testname("local IPI"); + + pr_info("%12s:", "local IPI"); dotest(local_ipi, SUCCESS); - printk(KERN_CONT "\n"); cleanup_nmi_testsuite(); + pr_info("--------------------\n"); if (unexpected_testcase_failures) { - printk("--------------------\n"); - printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n", + pr_info("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n", unexpected_testcase_failures, testcase_total); - printk("-----------------------------------------------------------------\n"); - } else if (expected_testcase_failures && testcase_successes) { - printk("--------------------\n"); - printk("%3d out of %3d testcases failed, as expected. |\n", - expected_testcase_failures, testcase_total); - printk("----------------------------------------------------\n"); - } else if (expected_testcase_failures && !testcase_successes) { - printk("--------------------\n"); - printk("All %3d testcases failed, as expected. |\n", - expected_testcase_failures); - printk("----------------------------------------\n"); } else { - printk("--------------------\n"); - printk("Good, all %3d testcases passed! |\n", + pr_info("Good, all %3d testcases passed! |\n", testcase_successes); - printk("---------------------------------\n"); } + pr_info("-----------------------------------------------------------------\n"); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1ccd05d8999f..ab3e172dcc69 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -33,6 +33,7 @@ #include <asm/tlb.h> #include <asm/io_bitmap.h> #include <asm/gsseg.h> +#include <asm/msr.h> /* stub always returning 0. */ DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text); @@ -210,12 +211,10 @@ struct paravirt_patch_template pv_ops = { .mmu.set_p4d = native_set_p4d, -#if CONFIG_PGTABLE_LEVELS >= 5 .mmu.p4d_val = PTE_IDENT, .mmu.make_p4d = PTE_IDENT, .mmu.set_pgd = native_set_pgd, -#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ .mmu.pte_val = PTE_IDENT, .mmu.pgd_val = PTE_IDENT, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 962c3ce39323..c1d2dac72b9c 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -30,7 +30,7 @@ #include <linux/hw_breakpoint.h> #include <linux/entry-common.h> #include <asm/cpu.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/apic.h> #include <linux/uaccess.h> #include <asm/mwait.h> @@ -52,6 +52,7 @@ #include <asm/unwind.h> #include <asm/tdx.h> #include <asm/mmu_context.h> +#include <asm/msr.h> #include <asm/shstk.h> #include "process.h" @@ -93,17 +94,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - /* init_task is not dynamically sized (incomplete FPU state) */ - if (unlikely(src == &init_task)) - memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0); - else - memcpy(dst, src, arch_task_struct_size); + /* fpu_clone() will initialize the "dst_fpu" memory */ + memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(*dst), 0); #ifdef CONFIG_VM86 dst->thread.vm86 = NULL; #endif - /* Drop the copied pointer to current's fpstate */ - dst->thread.fpu.fpstate = NULL; return 0; } @@ -111,8 +107,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) #ifdef CONFIG_X86_64 void arch_release_task_struct(struct task_struct *tsk) { - if (fpu_state_size_dynamic()) - fpstate_free(&tsk->thread.fpu); + if (fpu_state_size_dynamic() && !(tsk->flags & (PF_KTHREAD | PF_USER_WORKER))) + fpstate_free(x86_task_fpu(tsk)); } #endif @@ -122,7 +118,6 @@ void arch_release_task_struct(struct task_struct *tsk) void exit_thread(struct task_struct *tsk) { struct thread_struct *t = &tsk->thread; - struct fpu *fpu = &t->fpu; if (test_thread_flag(TIF_IO_BITMAP)) io_bitmap_exit(tsk); @@ -130,7 +125,7 @@ void exit_thread(struct task_struct *tsk) free_vm86(t); shstk_free(tsk); - fpu__drop(fpu); + fpu__drop(tsk); } static int set_new_tls(struct task_struct *p, unsigned long tls) @@ -344,7 +339,7 @@ static void set_cpuid_faulting(bool on) msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT); this_cpu_write(msr_misc_features_shadow, msrval); - wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval); + wrmsrq(MSR_MISC_FEATURES_ENABLES, msrval); } static void disable_cpuid(void) @@ -561,7 +556,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn) if (!static_cpu_has(X86_FEATURE_ZEN)) { msr |= ssbd_tif_to_amd_ls_cfg(tifn); - wrmsrl(MSR_AMD64_LS_CFG, msr); + wrmsrq(MSR_AMD64_LS_CFG, msr); return; } @@ -578,7 +573,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn) raw_spin_lock(&st->shared_state->lock); /* First sibling enables SSBD: */ if (!st->shared_state->disable_state) - wrmsrl(MSR_AMD64_LS_CFG, msr); + wrmsrq(MSR_AMD64_LS_CFG, msr); st->shared_state->disable_state++; raw_spin_unlock(&st->shared_state->lock); } else { @@ -588,7 +583,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn) raw_spin_lock(&st->shared_state->lock); st->shared_state->disable_state--; if (!st->shared_state->disable_state) - wrmsrl(MSR_AMD64_LS_CFG, msr); + wrmsrq(MSR_AMD64_LS_CFG, msr); raw_spin_unlock(&st->shared_state->lock); } } @@ -597,7 +592,7 @@ static __always_inline void amd_set_core_ssb_state(unsigned long tifn) { u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); - wrmsrl(MSR_AMD64_LS_CFG, msr); + wrmsrq(MSR_AMD64_LS_CFG, msr); } #endif @@ -607,7 +602,7 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, * so ssbd_tif_to_spec_ctrl() just works. */ - wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); + wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); } /* @@ -710,11 +705,11 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) arch_has_block_step()) { unsigned long debugctl, msk; - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); debugctl &= ~DEBUGCTLMSR_BTF; msk = tifn & _TIF_BLOCKSTEP; debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT; - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); } if ((tifp ^ tifn) & _TIF_NOTSC) @@ -907,13 +902,10 @@ static __init bool prefer_mwait_c1_over_halt(void) static __cpuidle void mwait_idle(void) { if (!current_set_polling_and_test()) { - if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { - mb(); /* quirk */ - clflush((void *)¤t_thread_info()->flags); - mb(); /* quirk */ - } + const void *addr = ¤t_thread_info()->flags; - __monitor((void *)¤t_thread_info()->flags, 0, 0); + alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); + __monitor(addr, 0, 0); if (!need_resched()) { __sti_mwait(0, 0); raw_local_irq_disable(); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4636ef359973..9bd4fa694da5 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -160,8 +160,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ - if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_p, cpu); + switch_fpu(prev_p, cpu); /* * Save away %gs. No need to save %fs, as it was saved on the @@ -208,8 +207,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) raw_cpu_write(current_task, next_p); - switch_fpu_finish(next_p); - /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(next_p); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 7196ca7048be..f39ff02e498d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -57,6 +57,7 @@ #include <asm/unistd.h> #include <asm/fsgsbase.h> #include <asm/fred.h> +#include <asm/msr.h> #ifdef CONFIG_IA32_EMULATION /* Not included via unistd.h */ #include <asm/unistd_32_ia32.h> @@ -95,8 +96,8 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, return; if (mode == SHOW_REGS_USER) { - rdmsrl(MSR_FS_BASE, fs); - rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); + rdmsrq(MSR_FS_BASE, fs); + rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); printk("%sFS: %016lx GS: %016lx\n", log_lvl, fs, shadowgs); return; @@ -107,9 +108,9 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%gs,%0" : "=r" (gsindex)); - rdmsrl(MSR_FS_BASE, fs); - rdmsrl(MSR_GS_BASE, gs); - rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); + rdmsrq(MSR_FS_BASE, fs); + rdmsrq(MSR_GS_BASE, gs); + rdmsrq(MSR_KERNEL_GS_BASE, shadowgs); cr0 = read_cr0(); cr2 = read_cr2(); @@ -195,7 +196,7 @@ static noinstr unsigned long __rdgsbase_inactive(void) native_swapgs(); } else { instrumentation_begin(); - rdmsrl(MSR_KERNEL_GS_BASE, gsbase); + rdmsrq(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } @@ -221,7 +222,7 @@ static noinstr void __wrgsbase_inactive(unsigned long gsbase) native_swapgs(); } else { instrumentation_begin(); - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); + wrmsrq(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } } @@ -353,7 +354,7 @@ static __always_inline void load_seg_legacy(unsigned short prev_index, } else { if (prev_index != next_index) loadseg(which, next_index); - wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, + wrmsrq(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, next_base); } } else { @@ -463,7 +464,7 @@ unsigned long x86_gsbase_read_cpu_inactive(void) gsbase = __rdgsbase_inactive(); local_irq_restore(flags); } else { - rdmsrl(MSR_KERNEL_GS_BASE, gsbase); + rdmsrq(MSR_KERNEL_GS_BASE, gsbase); } return gsbase; @@ -478,7 +479,7 @@ void x86_gsbase_write_cpu_inactive(unsigned long gsbase) __wrgsbase_inactive(gsbase); local_irq_restore(flags); } else { - wrmsrl(MSR_KERNEL_GS_BASE, gsbase); + wrmsrq(MSR_KERNEL_GS_BASE, gsbase); } } @@ -616,8 +617,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(hardirq_stack_inuse)); - if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_p, cpu); + switch_fpu(prev_p, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). @@ -671,8 +671,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) raw_cpu_write(current_task, next_p); raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); - switch_fpu_finish(next_p); - /* Reload sp0. */ update_task_stack(next_p); diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c index b7c0f142d026..4679ac0a03eb 100644 --- a/arch/x86/kernel/reboot_fixups_32.c +++ b/arch/x86/kernel/reboot_fixups_32.c @@ -27,7 +27,7 @@ static void cs5530a_warm_reset(struct pci_dev *dev) static void cs5536_warm_reset(struct pci_dev *dev) { /* writing 1 to the LSB of this MSR causes a hard reset */ - wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL); + wrmsrq(MSR_DIVIL_SOFT_RESET, 1ULL); udelay(50); /* shouldn't get here but be safe and spin a while */ } diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index c7c4b1917336..57276f134d12 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -263,17 +263,17 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) movl %edx, %edi movl $1024, %ecx - rep ; movsl + rep movsl movl %ebp, %edi movl %eax, %esi movl $1024, %ecx - rep ; movsl + rep movsl movl %eax, %edi movl %edx, %esi movl $1024, %ecx - rep ; movsl + rep movsl lea PAGE_SIZE(%ebp), %esi jmp 0b diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index ac058971a382..ea604f4d0b52 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -39,6 +39,8 @@ SYM_DATA(kexec_va_control_page, .quad 0) SYM_DATA(kexec_pa_table_page, .quad 0) SYM_DATA(kexec_pa_swap_page, .quad 0) SYM_DATA_LOCAL(pa_backup_pages_map, .quad 0) +SYM_DATA(kexec_debug_8250_mmio32, .quad 0) +SYM_DATA(kexec_debug_8250_port, .word 0) .balign 16 SYM_DATA_START_LOCAL(kexec_debug_gdt) @@ -50,6 +52,11 @@ SYM_DATA_START_LOCAL(kexec_debug_gdt) .quad 0x00cf92000000ffff /* __KERNEL_DS */ SYM_DATA_END_LABEL(kexec_debug_gdt, SYM_L_LOCAL, kexec_debug_gdt_end) + .balign 8 +SYM_DATA_START(kexec_debug_idt) + .skip 0x100, 0x00 +SYM_DATA_END(kexec_debug_idt) + .section .text..relocate_kernel,"ax"; .code64 SYM_CODE_START_NOALIGN(relocate_kernel) @@ -72,8 +79,13 @@ SYM_CODE_START_NOALIGN(relocate_kernel) pushq %r15 pushf - /* zero out flags, and disable interrupts */ - pushq $0 + /* Invalidate GDT/IDT, zero out flags */ + pushq $0 + pushq $0 + + lidt (%rsp) + lgdt (%rsp) + addq $8, %rsp popfq /* Switch to the identity mapped page tables */ @@ -139,6 +151,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) movq %ds, %rax movq %rax, %ds + /* Now an IDTR on the stack to load the IDT the kernel created */ + leaq kexec_debug_idt(%rip), %rsi + pushq %rsi + pushw $0xff + lidt (%rsp) + addq $10, %rsp + + //int3 + /* * Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP * below. @@ -342,20 +363,20 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) /* copy source page to swap page */ movq kexec_pa_swap_page(%rip), %rdi movl $512, %ecx - rep ; movsq + rep movsq /* copy destination page to source page */ movq %rax, %rdi movq %rdx, %rsi movl $512, %ecx - rep ; movsq + rep movsq /* copy swap page to destination page */ movq %rdx, %rdi movq kexec_pa_swap_page(%rip), %rsi .Lnoswap: movl $512, %ecx - rep ; movsq + rep movsq lea PAGE_SIZE(%rax), %rsi jmp .Lloop @@ -364,3 +385,222 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) ret int3 SYM_CODE_END(swap_pages) + +/* + * Generic 'print character' routine + * - %al: Character to be printed (may clobber %rax) + * - %rdx: MMIO address or port. + */ +#define XMTRDY 0x20 + +#define TXR 0 /* Transmit register (WRITE) */ +#define LSR 5 /* Line Status */ + +SYM_CODE_START_LOCAL_NOALIGN(pr_char_8250) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + addw $LSR, %dx + xchg %al, %ah +.Lxmtrdy_loop: + inb %dx, %al + testb $XMTRDY, %al + jnz .Lready + pause + jmp .Lxmtrdy_loop + +.Lready: + subw $LSR, %dx + xchg %al, %ah + outb %al, %dx +pr_char_null: + ANNOTATE_NOENDBR + + ANNOTATE_UNRET_SAFE + ret +SYM_CODE_END(pr_char_8250) + +SYM_CODE_START_LOCAL_NOALIGN(pr_char_8250_mmio32) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR +.Lxmtrdy_loop_mmio: + movb (LSR*4)(%rdx), %ah + testb $XMTRDY, %ah + jnz .Lready_mmio + pause + jmp .Lxmtrdy_loop_mmio + +.Lready_mmio: + movb %al, (%rdx) + ANNOTATE_UNRET_SAFE + ret +SYM_CODE_END(pr_char_8250_mmio32) + +/* + * Load pr_char function pointer into %rsi and load %rdx with whatever + * that function wants to see there (typically port/MMIO address). + */ +.macro pr_setup + leaq pr_char_8250(%rip), %rsi + movw kexec_debug_8250_port(%rip), %dx + testw %dx, %dx + jnz 1f + + leaq pr_char_8250_mmio32(%rip), %rsi + movq kexec_debug_8250_mmio32(%rip), %rdx + testq %rdx, %rdx + jnz 1f + + leaq pr_char_null(%rip), %rsi +1: +.endm + +/* Print the nybble in %bl, clobber %rax */ +SYM_CODE_START_LOCAL_NOALIGN(pr_nybble) + UNWIND_HINT_FUNC + movb %bl, %al + nop + andb $0x0f, %al + addb $0x30, %al + cmpb $0x3a, %al + jb 1f + addb $('a' - '0' - 10), %al + ANNOTATE_RETPOLINE_SAFE +1: jmp *%rsi +SYM_CODE_END(pr_nybble) + +SYM_CODE_START_LOCAL_NOALIGN(pr_qword) + UNWIND_HINT_FUNC + movq $16, %rcx +1: rolq $4, %rbx + call pr_nybble + loop 1b + movb $'\n', %al + ANNOTATE_RETPOLINE_SAFE + jmp *%rsi +SYM_CODE_END(pr_qword) + +.macro print_reg a, b, c, d, r + movb $\a, %al + ANNOTATE_RETPOLINE_SAFE + call *%rsi + movb $\b, %al + ANNOTATE_RETPOLINE_SAFE + call *%rsi + movb $\c, %al + ANNOTATE_RETPOLINE_SAFE + call *%rsi + movb $\d, %al + ANNOTATE_RETPOLINE_SAFE + call *%rsi + movq \r, %rbx + call pr_qword +.endm + +SYM_CODE_START_NOALIGN(kexec_debug_exc_vectors) + /* Each of these is 6 bytes. */ +.macro vec_err exc + UNWIND_HINT_ENTRY + . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE) + nop + nop + pushq $\exc + jmp exc_handler +.endm + +.macro vec_noerr exc + UNWIND_HINT_ENTRY + . = kexec_debug_exc_vectors + (\exc * KEXEC_DEBUG_EXC_HANDLER_SIZE) + pushq $0 + pushq $\exc + jmp exc_handler +.endm + + ANNOTATE_NOENDBR + vec_noerr 0 // #DE + vec_noerr 1 // #DB + vec_noerr 2 // #NMI + vec_noerr 3 // #BP + vec_noerr 4 // #OF + vec_noerr 5 // #BR + vec_noerr 6 // #UD + vec_noerr 7 // #NM + vec_err 8 // #DF + vec_noerr 9 + vec_err 10 // #TS + vec_err 11 // #NP + vec_err 12 // #SS + vec_err 13 // #GP + vec_err 14 // #PF + vec_noerr 15 +SYM_CODE_END(kexec_debug_exc_vectors) + +SYM_CODE_START_LOCAL_NOALIGN(exc_handler) + /* No need for RET mitigations during kexec */ + VALIDATE_UNRET_END + + pushq %rax + pushq %rbx + pushq %rcx + pushq %rdx + pushq %rsi + + /* Stack frame */ +#define EXC_SS 0x58 /* Architectural... */ +#define EXC_RSP 0x50 +#define EXC_EFLAGS 0x48 +#define EXC_CS 0x40 +#define EXC_RIP 0x38 +#define EXC_ERRORCODE 0x30 /* Either architectural or zero pushed by handler */ +#define EXC_EXCEPTION 0x28 /* Pushed by handler entry point */ +#define EXC_RAX 0x20 /* Pushed just above in exc_handler */ +#define EXC_RBX 0x18 +#define EXC_RCX 0x10 +#define EXC_RDX 0x08 +#define EXC_RSI 0x00 + + /* Set up %rdx/%rsi for debug output */ + pr_setup + + /* rip and exception info */ + print_reg 'E', 'x', 'c', ':', EXC_EXCEPTION(%rsp) + print_reg 'E', 'r', 'r', ':', EXC_ERRORCODE(%rsp) + print_reg 'r', 'i', 'p', ':', EXC_RIP(%rsp) + print_reg 'r', 's', 'p', ':', EXC_RSP(%rsp) + + /* We spilled these to the stack */ + print_reg 'r', 'a', 'x', ':', EXC_RAX(%rsp) + print_reg 'r', 'b', 'x', ':', EXC_RBX(%rsp) + print_reg 'r', 'c', 'x', ':', EXC_RCX(%rsp) + print_reg 'r', 'd', 'x', ':', EXC_RDX(%rsp) + print_reg 'r', 's', 'i', ':', EXC_RSI(%rsp) + + /* Other registers untouched */ + print_reg 'r', 'd', 'i', ':', %rdi + print_reg 'r', '8', ' ', ':', %r8 + print_reg 'r', '9', ' ', ':', %r9 + print_reg 'r', '1', '0', ':', %r10 + print_reg 'r', '1', '1', ':', %r11 + print_reg 'r', '1', '2', ':', %r12 + print_reg 'r', '1', '3', ':', %r13 + print_reg 'r', '1', '4', ':', %r14 + print_reg 'r', '1', '5', ':', %r15 + print_reg 'c', 'r', '2', ':', %cr2 + + /* Only return from INT3 */ + cmpq $3, EXC_EXCEPTION(%rsp) + jne .Ldie + + popq %rsi + popq %rdx + popq %rcx + popq %rbx + popq %rax + + addq $16, %rsp + iretq + +.Ldie: + hlt + jmp .Ldie + +SYM_CODE_END(exc_handler) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9d2a13b37833..7d9ed79a93c0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -11,6 +11,7 @@ #include <linux/crash_dump.h> #include <linux/dma-map-ops.h> #include <linux/efi.h> +#include <linux/hugetlb.h> #include <linux/ima.h> #include <linux/init_ohci1394_dma.h> #include <linux/initrd.h> @@ -18,21 +19,19 @@ #include <linux/memblock.h> #include <linux/panic_notifier.h> #include <linux/pci.h> +#include <linux/random.h> #include <linux/root_dev.h> -#include <linux/hugetlb.h> -#include <linux/tboot.h> -#include <linux/usb/xhci-dbgp.h> #include <linux/static_call.h> #include <linux/swiotlb.h> -#include <linux/random.h> +#include <linux/tboot.h> +#include <linux/usb/xhci-dbgp.h> +#include <linux/vmalloc.h> #include <uapi/linux/mount.h> #include <xen/xen.h> #include <asm/apic.h> -#include <asm/efi.h> -#include <asm/numa.h> #include <asm/bios_ebda.h> #include <asm/bugs.h> #include <asm/cacheinfo.h> @@ -47,18 +46,16 @@ #include <asm/mce.h> #include <asm/memtype.h> #include <asm/mtrr.h> -#include <asm/realmode.h> +#include <asm/nmi.h> +#include <asm/numa.h> #include <asm/olpc_ofw.h> #include <asm/pci-direct.h> #include <asm/prom.h> #include <asm/proto.h> +#include <asm/realmode.h> #include <asm/thermal.h> #include <asm/unwind.h> #include <asm/vsyscall.h> -#include <linux/vmalloc.h> -#if defined(CONFIG_X86_LOCAL_APIC) -#include <asm/nmi.h> -#endif /* * max_low_pfn_mapped: highest directly mapped pfn < 4 GB @@ -134,6 +131,7 @@ struct ist_info ist_info; struct cpuinfo_x86 boot_cpu_data __read_mostly; EXPORT_SYMBOL(boot_cpu_data); +SYM_PIC_ALIAS(boot_cpu_data); #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) __visible unsigned long mmu_cr4_features __ro_after_init; @@ -151,6 +149,13 @@ int bootloader_type, bootloader_version; static const struct ctl_table x86_sysctl_table[] = { { + .procname = "unknown_nmi_panic", + .data = &unknown_nmi_panic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { .procname = "panic_on_unrecovered_nmi", .data = &panic_on_unrecovered_nmi, .maxlen = sizeof(int), @@ -185,15 +190,6 @@ static const struct ctl_table x86_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, -#if defined(CONFIG_X86_LOCAL_APIC) - { - .procname = "unknown_nmi_panic", - .data = &unknown_nmi_panic, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, -#endif #if defined(CONFIG_ACPI_SLEEP) { .procname = "acpi_video_flags", diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c index 059685612362..2ddf23387c7e 100644 --- a/arch/x86/kernel/shstk.c +++ b/arch/x86/kernel/shstk.c @@ -173,8 +173,8 @@ static int shstk_setup(void) return PTR_ERR((void *)addr); fpregs_lock_and_load(); - wrmsrl(MSR_IA32_PL3_SSP, addr + size); - wrmsrl(MSR_IA32_U_CET, CET_SHSTK_EN); + wrmsrq(MSR_IA32_PL3_SSP, addr + size); + wrmsrq(MSR_IA32_U_CET, CET_SHSTK_EN); fpregs_unlock(); shstk->base = addr; @@ -239,7 +239,7 @@ static unsigned long get_user_shstk_addr(void) fpregs_lock_and_load(); - rdmsrl(MSR_IA32_PL3_SSP, ssp); + rdmsrq(MSR_IA32_PL3_SSP, ssp); fpregs_unlock(); @@ -372,7 +372,7 @@ int setup_signal_shadow_stack(struct ksignal *ksig) return -EFAULT; fpregs_lock_and_load(); - wrmsrl(MSR_IA32_PL3_SSP, ssp); + wrmsrq(MSR_IA32_PL3_SSP, ssp); fpregs_unlock(); return 0; @@ -396,7 +396,7 @@ int restore_signal_shadow_stack(void) return err; fpregs_lock_and_load(); - wrmsrl(MSR_IA32_PL3_SSP, ssp); + wrmsrq(MSR_IA32_PL3_SSP, ssp); fpregs_unlock(); return 0; @@ -460,7 +460,7 @@ static int wrss_control(bool enable) return 0; fpregs_lock_and_load(); - rdmsrl(MSR_IA32_U_CET, msrval); + rdmsrq(MSR_IA32_U_CET, msrval); if (enable) { features_set(ARCH_SHSTK_WRSS); @@ -473,7 +473,7 @@ static int wrss_control(bool enable) msrval &= ~CET_WRSS_EN; } - wrmsrl(MSR_IA32_U_CET, msrval); + wrmsrq(MSR_IA32_U_CET, msrval); unlock: fpregs_unlock(); @@ -492,8 +492,8 @@ static int shstk_disable(void) fpregs_lock_and_load(); /* Disable WRSS too when disabling shadow stack */ - wrmsrl(MSR_IA32_U_CET, 0); - wrmsrl(MSR_IA32_PL3_SSP, 0); + wrmsrq(MSR_IA32_U_CET, 0); + wrmsrq(MSR_IA32_PL3_SSP, 0); fpregs_unlock(); shstk_free(current); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 5f441039b572..2404233336ab 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -255,7 +255,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { bool stepping, failed; - struct fpu *fpu = ¤t->thread.fpu; + struct fpu *fpu = x86_task_fpu(current); if (v8086_mode(regs)) save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL); @@ -423,14 +423,14 @@ bool sigaltstack_size_valid(size_t ss_size) if (!fpu_state_size_dynamic() && !strict_sigaltstack_size) return true; - fsize += current->group_leader->thread.fpu.perm.__user_state_size; + fsize += x86_task_fpu(current->group_leader)->perm.__user_state_size; if (likely(ss_size > fsize)) return true; if (strict_sigaltstack_size) return ss_size > fsize; - mask = current->group_leader->thread.fpu.perm.__state_perm; + mask = x86_task_fpu(current->group_leader)->perm.__state_perm; if (mask & XFEATURE_MASK_USER_DYNAMIC) return ss_size > fsize; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index d6cf1e23c2a3..b90d872aa0c8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -64,7 +64,7 @@ #include <asm/acpi.h> #include <asm/cacheinfo.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/desc.h> #include <asm/nmi.h> #include <asm/irq.h> @@ -1188,6 +1188,12 @@ void cpu_disable_common(void) remove_siblinginfo(cpu); + /* + * Stop allowing kernel-mode FPU. This is needed so that if the CPU is + * brought online again, the initial state is not allowed: + */ + this_cpu_write(kernel_fpu_allowed, false); + /* It's now safe to remove this processor from the online map */ lock_vector_lock(); remove_cpu_from_maps(cpu); diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index c3d7ff44b29a..378c388d1b31 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -108,7 +108,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, if (system_state == SYSTEM_BOOTING || modinit) return text_poke_early(insn, code, size); - text_poke_bp(insn, code, size, emulate); + smp_text_poke_single(insn, code, size, emulate); } static void __static_call_validate(u8 *insn, bool tail, bool tramp) diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c index b8e7abe00b06..708d61743d15 100644 --- a/arch/x86/kernel/trace_clock.c +++ b/arch/x86/kernel/trace_clock.c @@ -4,7 +4,7 @@ */ #include <asm/trace_clock.h> #include <asm/barrier.h> -#include <asm/msr.h> +#include <asm/tsc.h> /* * trace_clock_x86_tsc(): A clock that is just the cycle counter. diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c deleted file mode 100644 index 03ae1caaa878..000000000000 --- a/arch/x86/kernel/tracepoint.c +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2013 Seiji Aguchi <seiji.aguchi@hds.com> - */ -#include <linux/jump_label.h> -#include <linux/atomic.h> - -#include <asm/trace/exceptions.h> - -DEFINE_STATIC_KEY_FALSE(trace_pagefault_key); - -int trace_pagefault_reg(void) -{ - static_branch_inc(&trace_pagefault_key); - return 0; -} - -void trace_pagefault_unreg(void) -{ - static_branch_dec(&trace_pagefault_key); -} diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9f88b8a78e50..94c0236963c6 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -68,6 +68,7 @@ #include <asm/vdso.h> #include <asm/tdx.h> #include <asm/cfi.h> +#include <asm/msr.h> #ifdef CONFIG_X86_64 #include <asm/x86_init.h> @@ -749,7 +750,7 @@ static bool try_fixup_enqcmd_gp(void) if (current->pasid_activated) return false; - wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID); + wrmsrq(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID); current->pasid_activated = 1; return true; @@ -882,16 +883,16 @@ static void do_int3_user(struct pt_regs *regs) DEFINE_IDTENTRY_RAW(exc_int3) { /* - * poke_int3_handler() is completely self contained code; it does (and + * smp_text_poke_int3_handler() is completely self contained code; it does (and * must) *NOT* call out to anything, lest it hits upon yet another * INT3. */ - if (poke_int3_handler(regs)) + if (smp_text_poke_int3_handler(regs)) return; /* * irqentry_enter_from_user_mode() uses static_branch_{,un}likely() - * and therefore can trigger INT3, hence poke_int3_handler() must + * and therefore can trigger INT3, hence smp_text_poke_int3_handler() must * be done before. If the entry came from kernel mode, then use * nmi_enter() because the INT3 could have been hit in any context * including NMI. @@ -1120,9 +1121,9 @@ static noinstr void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6) */ unsigned long debugctl; - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); debugctl |= DEBUGCTLMSR_BTF; - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); + wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); } /* @@ -1295,7 +1296,7 @@ DEFINE_IDTENTRY_RAW(exc_debug) static void math_error(struct pt_regs *regs, int trapnr) { struct task_struct *task = current; - struct fpu *fpu = &task->thread.fpu; + struct fpu *fpu = x86_task_fpu(task); int si_code; char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : "simd exception"; @@ -1386,11 +1387,11 @@ static bool handle_xfd_event(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD)) return false; - rdmsrl(MSR_IA32_XFD_ERR, xfd_err); + rdmsrq(MSR_IA32_XFD_ERR, xfd_err); if (!xfd_err) return false; - wrmsrl(MSR_IA32_XFD_ERR, 0); + wrmsrq(MSR_IA32_XFD_ERR, 0); /* Die if that happens in kernel space */ if (WARN_ON(!user_mode(regs))) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 88e5a4ed9db3..87e749106dda 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -16,7 +16,7 @@ #include <linux/static_key.h> #include <linux/static_call.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/hpet.h> #include <asm/timer.h> #include <asm/vgtod.h> @@ -29,6 +29,7 @@ #include <asm/apic.h> #include <asm/cpu_device_id.h> #include <asm/i8259.h> +#include <asm/msr.h> #include <asm/topology.h> #include <asm/uv/uv.h> #include <asm/sev.h> @@ -1098,7 +1099,7 @@ static void __init detect_art(void) if (art_base_clk.denominator < ART_MIN_DENOMINATOR) return; - rdmsrl(MSR_IA32_TSC_ADJUST, art_base_clk.offset); + rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset); /* Make this sticky over multiple CPU init calls */ setup_force_cpu_cap(X86_FEATURE_ART); diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 4334033658ed..ec3aa340d351 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/smp.h> #include <linux/nmi.h> +#include <asm/msr.h> #include <asm/tsc.h> struct tsc_adjust { @@ -65,12 +66,12 @@ void tsc_verify_tsc_adjust(bool resume) adj->nextcheck = jiffies + HZ; - rdmsrl(MSR_IA32_TSC_ADJUST, curval); + rdmsrq(MSR_IA32_TSC_ADJUST, curval); if (adj->adjusted == curval) return; /* Restore the original value */ - wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted); + wrmsrq(MSR_IA32_TSC_ADJUST, adj->adjusted); if (!adj->warned || resume) { pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n", @@ -142,7 +143,7 @@ static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, if (likely(!tsc_async_resets)) { pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu, bootval); - wrmsrl(MSR_IA32_TSC_ADJUST, 0); + wrmsrq(MSR_IA32_TSC_ADJUST, 0); bootval = 0; } else { pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n", @@ -165,7 +166,7 @@ bool __init tsc_store_and_check_tsc_adjust(bool bootcpu) if (check_tsc_unstable()) return false; - rdmsrl(MSR_IA32_TSC_ADJUST, bootval); + rdmsrq(MSR_IA32_TSC_ADJUST, bootval); cur->bootval = bootval; cur->nextcheck = jiffies + HZ; tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu); @@ -187,7 +188,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) return false; - rdmsrl(MSR_IA32_TSC_ADJUST, bootval); + rdmsrq(MSR_IA32_TSC_ADJUST, bootval); cur->bootval = bootval; cur->nextcheck = jiffies + HZ; cur->warned = false; @@ -229,7 +230,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) */ if (bootval != ref->adjusted) { cur->adjusted = ref->adjusted; - wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); + wrmsrq(MSR_IA32_TSC_ADJUST, ref->adjusted); } /* * We have the TSCs forced to be in sync on this package. Skip sync @@ -518,7 +519,7 @@ retry: pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n", cpu, cur_max_warp, cur->adjusted); - wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted); + wrmsrq(MSR_IA32_TSC_ADJUST, cur->adjusted); goto retry; } diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 9194695662b2..6d383839e839 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -840,6 +840,11 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) insn_byte_t p; int i; + /* x86_nops[insn->length]; same as jmp with .offs = 0 */ + if (insn->length <= ASM_NOP_MAX && + !memcmp(insn->kaddr, x86_nops[insn->length], insn->length)) + goto setup; + switch (opc1) { case 0xeb: /* jmp 8 */ case 0xe9: /* jmp 32 */ diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index cda5f8362e9d..4fa0be732af1 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -79,11 +79,13 @@ const_cpu_current_top_of_stack = cpu_current_top_of_stack; #define BSS_DECRYPTED \ . = ALIGN(PMD_SIZE); \ __start_bss_decrypted = .; \ + __pi___start_bss_decrypted = .; \ *(.bss..decrypted); \ . = ALIGN(PAGE_SIZE); \ __start_bss_decrypted_unused = .; \ . = ALIGN(PMD_SIZE); \ __end_bss_decrypted = .; \ + __pi___end_bss_decrypted = .; \ #else @@ -128,6 +130,7 @@ SECTIONS /* Text and read-only data */ .text : AT(ADDR(.text) - LOAD_OFFSET) { _text = .; + __pi__text = .; _stext = .; ALIGN_ENTRY_TEXT_BEGIN *(.text..__x86.rethunk_untrain) @@ -391,6 +394,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ _end = .; + __pi__end = .; #ifdef CONFIG_AMD_MEM_ENCRYPT /* diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 571c906ffcbf..ecd85f4801cc 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -21,7 +21,7 @@ #include <asm/user.h> #include <asm/fpu/xstate.h> #include <asm/sgx.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include "cpuid.h" #include "lapic.h" #include "mmu.h" @@ -236,7 +236,7 @@ static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcp struct kvm_cpuid_entry2 *entry; u32 base; - for_each_possible_hypervisor_cpuid_base(base) { + for_each_possible_cpuid_base_hypervisor(base) { entry = kvm_find_cpuid_entry(vcpu, base); if (entry) { diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 7338879d1c0c..067f8e3f5a0d 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -20,6 +20,7 @@ #include <linux/kvm_host.h> #include <asm/irq_remapping.h> +#include <asm/msr.h> #include "trace.h" #include "lapic.h" @@ -330,7 +331,7 @@ void avic_ring_doorbell(struct kvm_vcpu *vcpu) int cpu = READ_ONCE(vcpu->cpu); if (cpu != get_cpu()) { - wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu)); + wrmsrq(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu)); trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu)); } put_cpu(); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a7a7dc507336..1aa0f07d3a63 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -26,6 +26,7 @@ #include <asm/fpu/xcr.h> #include <asm/fpu/xstate.h> #include <asm/debugreg.h> +#include <asm/msr.h> #include <asm/sev.h> #include "mmu.h" @@ -2933,6 +2934,7 @@ void __init sev_set_cpu_caps(void) void __init sev_hardware_setup(void) { unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count; + struct sev_platform_init_args init_args = {0}; bool sev_snp_supported = false; bool sev_es_supported = false; bool sev_supported = false; @@ -3059,6 +3061,15 @@ out: sev_supported_vmsa_features = 0; if (sev_es_debug_swap_enabled) sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP; + + if (!sev_enabled) + return; + + /* + * Do both SNP and SEV initialization at KVM module load. + */ + init_args.probe = true; + sev_platform_init(&init_args); } void sev_hardware_unsetup(void) @@ -3074,6 +3085,8 @@ void sev_hardware_unsetup(void) misc_cg_set_capacity(MISC_CG_RES_SEV, 0); misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0); + + sev_platform_shutdown(); } int sev_cpu_init(struct svm_cpu_data *sd) @@ -3119,7 +3132,7 @@ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va) * back to WBINVD if this faults so as not to make any problems worse * by leaving stale encrypted data in the cache. */ - if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid))) + if (WARN_ON_ONCE(wrmsrq_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid))) goto do_wbinvd; return; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index a89c271a1951..67fee545d42a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -31,6 +31,7 @@ #include <linux/string_choices.h> #include <asm/apic.h> +#include <asm/msr.h> #include <asm/perf_event.h> #include <asm/tlbflush.h> #include <asm/desc.h> @@ -475,24 +476,18 @@ static void svm_inject_exception(struct kvm_vcpu *vcpu) static void svm_init_erratum_383(void) { - u32 low, high; - int err; u64 val; if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) return; /* Use _safe variants to not break nested virtualization */ - val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); - if (err) + if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val)) return; val |= (1ULL << 47); - low = lower_32_bits(val); - high = upper_32_bits(val); - - native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); + native_write_msr_safe(MSR_AMD64_DC_CFG, val); erratum_383_found = true; } @@ -566,7 +561,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier) if (multiplier == __this_cpu_read(current_tsc_ratio)) return; - wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); + wrmsrq(MSR_AMD64_TSC_RATIO, multiplier); __this_cpu_write(current_tsc_ratio, multiplier); } @@ -579,15 +574,15 @@ static inline void kvm_cpu_svm_disable(void) { uint64_t efer; - wrmsrl(MSR_VM_HSAVE_PA, 0); - rdmsrl(MSR_EFER, efer); + wrmsrq(MSR_VM_HSAVE_PA, 0); + rdmsrq(MSR_EFER, efer); if (efer & EFER_SVME) { /* * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and * NMI aren't blocked. */ stgi(); - wrmsrl(MSR_EFER, efer & ~EFER_SVME); + wrmsrq(MSR_EFER, efer & ~EFER_SVME); } } @@ -616,7 +611,7 @@ static int svm_enable_virtualization_cpu(void) uint64_t efer; int me = raw_smp_processor_id(); - rdmsrl(MSR_EFER, efer); + rdmsrq(MSR_EFER, efer); if (efer & EFER_SVME) return -EBUSY; @@ -626,9 +621,9 @@ static int svm_enable_virtualization_cpu(void) sd->next_asid = sd->max_asid + 1; sd->min_asid = max_sev_asid + 1; - wrmsrl(MSR_EFER, efer | EFER_SVME); + wrmsrq(MSR_EFER, efer | EFER_SVME); - wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa); + wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa); if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { /* @@ -649,13 +644,12 @@ static int svm_enable_virtualization_cpu(void) * erratum is present everywhere). */ if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { - uint64_t len, status = 0; + u64 len, status = 0; int err; - len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); + err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len); if (!err) - status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, - &err); + err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status); if (err) osvw_status = osvw_len = 0; @@ -2205,14 +2199,13 @@ static int ac_interception(struct kvm_vcpu *vcpu) static bool is_erratum_383(void) { - int err, i; + int i; u64 value; if (!erratum_383_found) return false; - value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); - if (err) + if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value)) return false; /* Bit 62 may or may not be set for this mce */ @@ -2223,17 +2216,11 @@ static bool is_erratum_383(void) /* Clear MCi_STATUS registers */ for (i = 0; i < 6; ++i) - native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); - - value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); - if (!err) { - u32 low, high; + native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0); + if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) { value &= ~(1ULL << 2); - low = lower_32_bits(value); - high = upper_32_bits(value); - - native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); + native_write_msr_safe(MSR_IA32_MCG_STATUS, value); } /* Flush tlb to evict multi-match entries */ @@ -5295,7 +5282,7 @@ static __init void svm_adjust_mmio_mask(void) return; /* If memory encryption is not enabled, use existing mask */ - rdmsrl(MSR_AMD64_SYSCFG, msr); + rdmsrq(MSR_AMD64_SYSCFG, msr); if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) return; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 5504d9e9fd32..d268224227f0 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6,6 +6,7 @@ #include <asm/debugreg.h> #include <asm/mmu_context.h> +#include <asm/msr.h> #include "x86.h" #include "cpuid.h" @@ -7202,8 +7203,8 @@ static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs) msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; /* These MSRs specify bits which the guest must keep fixed off. */ - rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); - rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); + rdmsrq(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); + rdmsrq(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); if (vmx_umip_emulated()) msrs->cr4_fixed1 |= X86_CR4_UMIP; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 77012b2eca0e..231a9633359c 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/kvm_host.h> #include <linux/perf_event.h> +#include <asm/msr.h> #include <asm/perf_event.h> #include "x86.h" #include "cpuid.h" @@ -279,9 +280,9 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, local_irq_disable(); if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) { if (read) - rdmsrl(index, msr_info->data); + rdmsrq(index, msr_info->data); else - wrmsrl(index, msr_info->data); + wrmsrq(index, msr_info->data); __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); local_irq_enable(); return true; diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 9961e07cf071..df1d0cf76947 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2021 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/msr.h> #include <asm/sgx.h> #include "x86.h" @@ -411,16 +412,16 @@ void setup_default_sgx_lepubkeyhash(void) * MSRs exist but are read-only (locked and not writable). */ if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) || - rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) { + rdmsrq_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) { sgx_pubkey_hash[0] = 0xa6053e051270b7acULL; sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL; sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL; sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL; } else { /* MSR_IA32_SGXLEPUBKEYHASH0 is read above */ - rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]); - rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]); - rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]); + rdmsrq(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]); + rdmsrq(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]); + rdmsrq(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]); } } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 5c5766467a61..157c23db22be 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -46,6 +46,7 @@ #include <asm/perf_event.h> #include <asm/mmu_context.h> #include <asm/mshyperv.h> +#include <asm/msr.h> #include <asm/mwait.h> #include <asm/spec-ctrl.h> #include <asm/vmx.h> @@ -273,6 +274,7 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) case L1TF_MITIGATION_OFF: l1tf = VMENTER_L1D_FLUSH_NEVER; break; + case L1TF_MITIGATION_AUTO: case L1TF_MITIGATION_FLUSH_NOWARN: case L1TF_MITIGATION_FLUSH: case L1TF_MITIGATION_FLUSH_NOSMT: @@ -380,9 +382,9 @@ static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) if (!vmx->disable_fb_clear) return; - msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL); + msr = native_rdmsrq(MSR_IA32_MCU_OPT_CTRL); msr |= FB_CLEAR_DIS; - native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, msr); /* Cache the MSR value to avoid reading it later */ vmx->msr_ia32_mcu_opt_ctrl = msr; } @@ -393,7 +395,7 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) return; vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; - native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); + native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); } static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) @@ -1063,7 +1065,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, * provide that period, so a CPU could write host's record into * guest's memory. */ - wrmsrl(MSR_IA32_PEBS_ENABLE, 0); + wrmsrq(MSR_IA32_PEBS_ENABLE, 0); } i = vmx_find_loadstore_msr_slot(&m->guest, msr); @@ -1192,13 +1194,13 @@ static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range) { u32 i; - wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); - wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); - wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); - wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); + wrmsrq(MSR_IA32_RTIT_STATUS, ctx->status); + wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); + wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); + wrmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); for (i = 0; i < addr_range; i++) { - wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); - wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); + wrmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); + wrmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); } } @@ -1206,13 +1208,13 @@ static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) { u32 i; - rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); - rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); - rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); - rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); + rdmsrq(MSR_IA32_RTIT_STATUS, ctx->status); + rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); + rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); + rdmsrq(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); for (i = 0; i < addr_range; i++) { - rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); - rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); + rdmsrq(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); + rdmsrq(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); } } @@ -1225,9 +1227,9 @@ static void pt_guest_enter(struct vcpu_vmx *vmx) * GUEST_IA32_RTIT_CTL is already set in the VMCS. * Save host state before VM entry. */ - rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); + rdmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { - wrmsrl(MSR_IA32_RTIT_CTL, 0); + wrmsrq(MSR_IA32_RTIT_CTL, 0); pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges); pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges); } @@ -1248,7 +1250,7 @@ static void pt_guest_exit(struct vcpu_vmx *vmx) * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary. */ if (vmx->pt_desc.host.ctl) - wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); + wrmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); } void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, @@ -1338,7 +1340,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); } - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); + wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #else savesegment(fs, fs_sel); savesegment(gs, gs_sel); @@ -1362,7 +1364,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) ++vmx->vcpu.stat.host_state_reload; #ifdef CONFIG_X86_64 - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); + rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (host_state->ldt_sel || (host_state->gs_sel & 7)) { kvm_load_ldt(host_state->ldt_sel); @@ -1382,7 +1384,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) #endif invalidate_tss_limit(); #ifdef CONFIG_X86_64 - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); + wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif load_fixmap_gdt(raw_smp_processor_id()); vmx->guest_state_loaded = false; @@ -1394,7 +1396,7 @@ static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) { preempt_disable(); if (vmx->guest_state_loaded) - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); + rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); preempt_enable(); return vmx->msr_guest_kernel_gs_base; } @@ -1403,7 +1405,7 @@ static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) { preempt_disable(); if (vmx->guest_state_loaded) - wrmsrl(MSR_KERNEL_GS_BASE, data); + wrmsrq(MSR_KERNEL_GS_BASE, data); preempt_enable(); vmx->msr_guest_kernel_gs_base = data; } @@ -2574,7 +2576,7 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr) { u64 allowed; - rdmsrl(msr, allowed); + rdmsrq(msr, allowed); return ctl_opt & allowed; } @@ -2746,7 +2748,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, break; } - rdmsrl(MSR_IA32_VMX_BASIC, basic_msr); + rdmsrq(MSR_IA32_VMX_BASIC, basic_msr); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if (vmx_basic_vmcs_size(basic_msr) > PAGE_SIZE) @@ -2766,7 +2768,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, if (vmx_basic_vmcs_mem_type(basic_msr) != X86_MEMTYPE_WB) return -EIO; - rdmsrl(MSR_IA32_VMX_MISC, misc_msr); + rdmsrq(MSR_IA32_VMX_MISC, misc_msr); vmcs_conf->basic = basic_msr; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; @@ -2850,7 +2852,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) fault: WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n", - rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); + rdmsrq_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr); cr4_clear_bits(X86_CR4_VMXE); return -EFAULT; @@ -4391,7 +4393,7 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx) if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32)) vmcs_writel(HOST_IA32_SYSENTER_ESP, 0); - rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); + rdmsrq(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { @@ -6745,7 +6747,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) vcpu->stat.l1d_flush++; if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { - native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + native_wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH); return; } @@ -7052,7 +7054,7 @@ static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu) * the #NM exception. */ if (is_xfd_nm_fault(vcpu)) - rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); + rdmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); } static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info) @@ -7307,7 +7309,7 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, return; if (flags & VMX_RUN_SAVE_SPEC_CTRL) - vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); + vmx->spec_ctrl = native_rdmsrq(MSR_IA32_SPEC_CTRL); /* * If the guest/host SPEC_CTRL values differ, restore the host value. @@ -7318,7 +7320,7 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, */ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) || vmx->spec_ctrl != hostval) - native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval); + native_wrmsrq(MSR_IA32_SPEC_CTRL, hostval); barrier_nospec(); } @@ -7358,10 +7360,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, * mitigation for MDS is done late in VMentry and is still * executed in spite of L1D Flush. This is because an extra VERW * should not matter much after the big hammer L1D Flush. + * + * cpu_buf_vm_clear is used when system is not vulnerable to MDS/TAA, + * and is affected by MMIO Stale Data. In such cases mitigation in only + * needed against an MMIO capable guest. */ if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); - else if (static_branch_unlikely(&mmio_stale_data_clear) && + else if (static_branch_unlikely(&cpu_buf_vm_clear) && kvm_arch_has_assigned_device(vcpu->kvm)) mds_clear_cpu_buffers(); @@ -7700,6 +7706,7 @@ int vmx_vm_init(struct kvm *kvm) case L1TF_MITIGATION_FLUSH_NOWARN: /* 'I explicitly don't care' is set */ break; + case L1TF_MITIGATION_AUTO: case L1TF_MITIGATION_FLUSH: case L1TF_MITIGATION_FLUSH_NOSMT: case L1TF_MITIGATION_FULL: @@ -7959,7 +7966,7 @@ static __init u64 vmx_get_perf_capabilities(void) return 0; if (boot_cpu_has(X86_FEATURE_PDCM)) - rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); + rdmsrq(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) { x86_perf_get_lbr(&vmx_lbr_caps); @@ -8508,7 +8515,7 @@ __init int vmx_hardware_setup(void) kvm_enable_efer_bits(EFER_NX); if (boot_cpu_has(X86_FEATURE_MPX)) { - rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); + rdmsrq(MSR_IA32_BNDCFGS, host_bndcfgs); WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost"); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index be7bb6d20129..5bdb5b854924 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -578,7 +578,7 @@ static void kvm_on_user_return(struct user_return_notifier *urn) for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { values = &msrs->values[slot]; if (values->host != values->curr) { - wrmsrl(kvm_uret_msrs_list[slot], values->host); + wrmsrq(kvm_uret_msrs_list[slot], values->host); values->curr = values->host; } } @@ -590,10 +590,10 @@ static int kvm_probe_user_return_msr(u32 msr) int ret; preempt_disable(); - ret = rdmsrl_safe(msr, &val); + ret = rdmsrq_safe(msr, &val); if (ret) goto out; - ret = wrmsrl_safe(msr, val); + ret = wrmsrq_safe(msr, val); out: preempt_enable(); return ret; @@ -630,7 +630,7 @@ static void kvm_user_return_msr_cpu_online(void) int i; for (i = 0; i < kvm_nr_uret_msrs; ++i) { - rdmsrl_safe(kvm_uret_msrs_list[i], &value); + rdmsrq_safe(kvm_uret_msrs_list[i], &value); msrs->values[i].host = value; msrs->values[i].curr = value; } @@ -644,7 +644,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) value = (value & mask) | (msrs->values[slot].host & ~mask); if (value == msrs->values[slot].curr) return 0; - err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); + err = wrmsrq_safe(kvm_uret_msrs_list[slot], value); if (err) return 1; @@ -1174,7 +1174,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && vcpu->arch.ia32_xss != kvm_host.xss) - wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); + wrmsrq(MSR_IA32_XSS, vcpu->arch.ia32_xss); } if (cpu_feature_enabled(X86_FEATURE_PKU) && @@ -1205,7 +1205,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && vcpu->arch.ia32_xss != kvm_host.xss) - wrmsrl(MSR_IA32_XSS, kvm_host.xss); + wrmsrq(MSR_IA32_XSS, kvm_host.xss); } } @@ -1662,7 +1662,7 @@ static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, *data = MSR_PLATFORM_INFO_CPUID_FAULT; break; case MSR_IA32_UCODE_REV: - rdmsrl_safe(index, data); + rdmsrq_safe(index, data); break; default: return kvm_x86_call(get_feature_msr)(index, data); @@ -3829,7 +3829,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!data) break; - wrmsrl(MSR_IA32_PRED_CMD, data); + wrmsrq(MSR_IA32_PRED_CMD, data); break; } case MSR_IA32_FLUSH_CMD: @@ -3842,7 +3842,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!data) break; - wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH); break; case MSR_EFER: return set_efer(vcpu, msr_info); @@ -9738,7 +9738,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) * with an exception. PAT[0] is set to WB on RESET and also by the * kernel, i.e. failure indicates a kernel bug or broken firmware. */ - if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || + if (rdmsrq_safe(MSR_IA32_CR_PAT, &host_pat) || (host_pat & GENMASK(2, 0)) != 6) { pr_err("host PAT[0] is not WB\n"); return -EIO; @@ -9772,15 +9772,15 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0; } - rdmsrl_safe(MSR_EFER, &kvm_host.efer); + rdmsrq_safe(MSR_EFER, &kvm_host.efer); if (boot_cpu_has(X86_FEATURE_XSAVES)) - rdmsrl(MSR_IA32_XSS, kvm_host.xss); + rdmsrq(MSR_IA32_XSS, kvm_host.xss); kvm_init_pmu_capability(ops->pmu_ops); if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities); + rdmsrq(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities); r = ops->hardware_setup(); if (r != 0) @@ -10976,7 +10976,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) switch_fpu_return(); if (vcpu->arch.guest_fpu.xfd_err) - wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); + wrmsrq(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); @@ -11062,7 +11062,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_x86_call(handle_exit_irqoff)(vcpu); if (vcpu->arch.guest_fpu.xfd_err) - wrmsrl(MSR_IA32_XFD_ERR, 0); + wrmsrq(MSR_IA32_XFD_ERR, 0); /* * Consume any pending interrupts, including the possible source of @@ -13668,12 +13668,12 @@ int kvm_spec_ctrl_test_value(u64 value) local_irq_save(flags); - if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) + if (rdmsrq_safe(MSR_IA32_SPEC_CTRL, &saved_value)) ret = 1; - else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) + else if (wrmsrq_safe(MSR_IA32_SPEC_CTRL, value)) ret = 1; else - wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); + wrmsrq(MSR_IA32_SPEC_CTRL, saved_value); local_irq_restore(flags); diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 1c50352eb49f..4fa5c4e1ba8a 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -3,6 +3,8 @@ # Makefile for x86 specific library files. # +obj-y += crypto/ + # Produces uninteresting flaky coverage. KCOV_INSTRUMENT_delay.o := n @@ -39,14 +41,14 @@ lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o obj-$(CONFIG_CRC32_ARCH) += crc32-x86.o -crc32-x86-y := crc32-glue.o crc32-pclmul.o +crc32-x86-y := crc32.o crc32-pclmul.o crc32-x86-$(CONFIG_64BIT) += crc32c-3way.o obj-$(CONFIG_CRC64_ARCH) += crc64-x86.o -crc64-x86-y := crc64-glue.o crc64-pclmul.o +crc64-x86-y := crc64.o crc64-pclmul.o obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-x86.o -crc-t10dif-x86-y := crc-t10dif-glue.o crc16-msb-pclmul.o +crc-t10dif-x86-y := crc-t10dif.o crc16-msb-pclmul.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o obj-y += iomem.o diff --git a/arch/x86/lib/crc-t10dif-glue.c b/arch/x86/lib/crc-t10dif.c index f89c335cde3c..db7ce59c31ac 100644 --- a/arch/x86/lib/crc-t10dif-glue.c +++ b/arch/x86/lib/crc-t10dif.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include "crc-pclmul-template.h" -static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); DECLARE_CRC_PCLMUL_FUNCS(crc16_msb, u16); @@ -29,7 +29,7 @@ static int __init crc_t10dif_x86_init(void) } return 0; } -arch_initcall(crc_t10dif_x86_init); +subsys_initcall(crc_t10dif_x86_init); static void __exit crc_t10dif_x86_exit(void) { diff --git a/arch/x86/lib/crc32-glue.c b/arch/x86/lib/crc32.c index e3f93b17ac3f..d09343e2cea9 100644 --- a/arch/x86/lib/crc32-glue.c +++ b/arch/x86/lib/crc32.c @@ -11,8 +11,8 @@ #include <linux/module.h> #include "crc-pclmul-template.h" -static DEFINE_STATIC_KEY_FALSE(have_crc32); -static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); DECLARE_CRC_PCLMUL_FUNCS(crc32_lsb, u32); @@ -88,7 +88,7 @@ static int __init crc32_x86_init(void) } return 0; } -arch_initcall(crc32_x86_init); +subsys_initcall(crc32_x86_init); static void __exit crc32_x86_exit(void) { diff --git a/arch/x86/lib/crc64-glue.c b/arch/x86/lib/crc64.c index b0e1b719ecbf..351a09f5813e 100644 --- a/arch/x86/lib/crc64-glue.c +++ b/arch/x86/lib/crc64.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include "crc-pclmul-template.h" -static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); DECLARE_CRC_PCLMUL_FUNCS(crc64_msb, u64); DECLARE_CRC_PCLMUL_FUNCS(crc64_lsb, u64); @@ -39,7 +39,7 @@ static int __init crc64_x86_init(void) } return 0; } -arch_initcall(crc64_x86_init); +subsys_initcall(crc64_x86_init); static void __exit crc64_x86_exit(void) { diff --git a/arch/x86/lib/crypto/.gitignore b/arch/x86/lib/crypto/.gitignore new file mode 100644 index 000000000000..580c839bb177 --- /dev/null +++ b/arch/x86/lib/crypto/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +poly1305-x86_64-cryptogams.S diff --git a/arch/x86/lib/crypto/Kconfig b/arch/x86/lib/crypto/Kconfig new file mode 100644 index 000000000000..5e94cdee492c --- /dev/null +++ b/arch/x86/lib/crypto/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_BLAKE2S_X86 + bool "Hash functions: BLAKE2s (SSSE3/AVX-512)" + depends on 64BIT + select CRYPTO_LIB_BLAKE2S_GENERIC + select CRYPTO_ARCH_HAVE_LIB_BLAKE2S + help + BLAKE2s cryptographic hash function (RFC 7693) + + Architecture: x86_64 using: + - SSSE3 (Supplemental SSE3) + - AVX-512 (Advanced Vector Extensions-512) + +config CRYPTO_CHACHA20_X86_64 + tristate + depends on 64BIT + default CRYPTO_LIB_CHACHA + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_POLY1305_X86_64 + tristate + depends on 64BIT + default CRYPTO_LIB_POLY1305 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 + +config CRYPTO_SHA256_X86_64 + tristate + depends on 64BIT + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD + select CRYPTO_LIB_SHA256_GENERIC diff --git a/arch/x86/lib/crypto/Makefile b/arch/x86/lib/crypto/Makefile new file mode 100644 index 000000000000..abceca3d31c0 --- /dev/null +++ b/arch/x86/lib/crypto/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += libblake2s-x86_64.o +libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o + +obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o +chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha-avx512vl-x86_64.o chacha_glue.o + +obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o +poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o +targets += poly1305-x86_64-cryptogams.S + +obj-$(CONFIG_CRYPTO_SHA256_X86_64) += sha256-x86_64.o +sha256-x86_64-y := sha256.o sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256-ni-asm.o + +quiet_cmd_perlasm = PERLASM $@ + cmd_perlasm = $(PERL) $< > $@ + +$(obj)/%.S: $(src)/%.pl FORCE + $(call if_changed,perlasm) diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/lib/crypto/blake2s-core.S index b50b35ff1fdb..ac1c845445a4 100644 --- a/arch/x86/crypto/blake2s-core.S +++ b/arch/x86/lib/crypto/blake2s-core.S @@ -29,7 +29,6 @@ SIGMA: .byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6 .byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4 .byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12 -#ifdef CONFIG_AS_AVX512 .section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640 .align 64 SIGMA2: @@ -43,7 +42,6 @@ SIGMA2: .long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10 .long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14 .long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9 -#endif /* CONFIG_AS_AVX512 */ .text SYM_FUNC_START(blake2s_compress_ssse3) @@ -174,7 +172,6 @@ SYM_FUNC_START(blake2s_compress_ssse3) RET SYM_FUNC_END(blake2s_compress_ssse3) -#ifdef CONFIG_AS_AVX512 SYM_FUNC_START(blake2s_compress_avx512) vmovdqu (%rdi),%xmm0 vmovdqu 0x10(%rdi),%xmm1 @@ -253,4 +250,3 @@ SYM_FUNC_START(blake2s_compress_avx512) vzeroupper RET SYM_FUNC_END(blake2s_compress_avx512) -#endif /* CONFIG_AS_AVX512 */ diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/lib/crypto/blake2s-glue.c index 0313f9673f56..adc296cd17c9 100644 --- a/arch/x86/crypto/blake2s-glue.c +++ b/arch/x86/lib/crypto/blake2s-glue.c @@ -3,17 +3,15 @@ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ -#include <crypto/internal/blake2s.h> - -#include <linux/types.h> -#include <linux/jump_label.h> -#include <linux/kernel.h> -#include <linux/sizes.h> - #include <asm/cpufeature.h> #include <asm/fpu/api.h> #include <asm/processor.h> #include <asm/simd.h> +#include <crypto/internal/blake2s.h> +#include <linux/init.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/sizes.h> asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state, const u8 *block, const size_t nblocks, @@ -41,8 +39,7 @@ void blake2s_compress(struct blake2s_state *state, const u8 *block, SZ_4K / BLAKE2S_BLOCK_SIZE); kernel_fpu_begin(); - if (IS_ENABLED(CONFIG_AS_AVX512) && - static_branch_likely(&blake2s_use_avx512)) + if (static_branch_likely(&blake2s_use_avx512)) blake2s_compress_avx512(state, block, blocks, inc); else blake2s_compress_ssse3(state, block, blocks, inc); @@ -59,8 +56,7 @@ static int __init blake2s_mod_init(void) if (boot_cpu_has(X86_FEATURE_SSSE3)) static_branch_enable(&blake2s_use_ssse3); - if (IS_ENABLED(CONFIG_AS_AVX512) && - boot_cpu_has(X86_FEATURE_AVX) && + if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) && boot_cpu_has(X86_FEATURE_AVX512VL) && diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/lib/crypto/chacha-avx2-x86_64.S index f3d8fc018249..f3d8fc018249 100644 --- a/arch/x86/crypto/chacha-avx2-x86_64.S +++ b/arch/x86/lib/crypto/chacha-avx2-x86_64.S diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/lib/crypto/chacha-avx512vl-x86_64.S index 259383e1ad44..259383e1ad44 100644 --- a/arch/x86/crypto/chacha-avx512vl-x86_64.S +++ b/arch/x86/lib/crypto/chacha-avx512vl-x86_64.S diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/lib/crypto/chacha-ssse3-x86_64.S index 7111949cd5b9..7111949cd5b9 100644 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S +++ b/arch/x86/lib/crypto/chacha-ssse3-x86_64.S diff --git a/arch/x86/lib/crypto/chacha_glue.c b/arch/x86/lib/crypto/chacha_glue.c new file mode 100644 index 000000000000..10b2c945f541 --- /dev/null +++ b/arch/x86/lib/crypto/chacha_glue.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ChaCha and HChaCha functions (x86_64 optimized) + * + * Copyright (C) 2015 Martin Willi + */ + +#include <asm/simd.h> +#include <crypto/chacha.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sizes.h> + +asmlinkage void chacha_block_xor_ssse3(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_4block_xor_ssse3(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void hchacha_block_ssse3(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); + +asmlinkage void chacha_2block_xor_avx2(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_4block_xor_avx2(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_8block_xor_avx2(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); + +asmlinkage void chacha_2block_xor_avx512vl(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_4block_xor_avx512vl(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); +asmlinkage void chacha_8block_xor_avx512vl(const struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int len, int nrounds); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl); + +static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks) +{ + len = min(len, maxblocks * CHACHA_BLOCK_SIZE); + return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE; +} + +static void chacha_dosimd(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (static_branch_likely(&chacha_use_avx512vl)) { + while (bytes >= CHACHA_BLOCK_SIZE * 8) { + chacha_8block_xor_avx512vl(state, dst, src, bytes, + nrounds); + bytes -= CHACHA_BLOCK_SIZE * 8; + src += CHACHA_BLOCK_SIZE * 8; + dst += CHACHA_BLOCK_SIZE * 8; + state->x[12] += 8; + } + if (bytes > CHACHA_BLOCK_SIZE * 4) { + chacha_8block_xor_avx512vl(state, dst, src, bytes, + nrounds); + state->x[12] += chacha_advance(bytes, 8); + return; + } + if (bytes > CHACHA_BLOCK_SIZE * 2) { + chacha_4block_xor_avx512vl(state, dst, src, bytes, + nrounds); + state->x[12] += chacha_advance(bytes, 4); + return; + } + if (bytes) { + chacha_2block_xor_avx512vl(state, dst, src, bytes, + nrounds); + state->x[12] += chacha_advance(bytes, 2); + return; + } + } + + if (static_branch_likely(&chacha_use_avx2)) { + while (bytes >= CHACHA_BLOCK_SIZE * 8) { + chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); + bytes -= CHACHA_BLOCK_SIZE * 8; + src += CHACHA_BLOCK_SIZE * 8; + dst += CHACHA_BLOCK_SIZE * 8; + state->x[12] += 8; + } + if (bytes > CHACHA_BLOCK_SIZE * 4) { + chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); + state->x[12] += chacha_advance(bytes, 8); + return; + } + if (bytes > CHACHA_BLOCK_SIZE * 2) { + chacha_4block_xor_avx2(state, dst, src, bytes, nrounds); + state->x[12] += chacha_advance(bytes, 4); + return; + } + if (bytes > CHACHA_BLOCK_SIZE) { + chacha_2block_xor_avx2(state, dst, src, bytes, nrounds); + state->x[12] += chacha_advance(bytes, 2); + return; + } + } + + while (bytes >= CHACHA_BLOCK_SIZE * 4) { + chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); + bytes -= CHACHA_BLOCK_SIZE * 4; + src += CHACHA_BLOCK_SIZE * 4; + dst += CHACHA_BLOCK_SIZE * 4; + state->x[12] += 4; + } + if (bytes > CHACHA_BLOCK_SIZE) { + chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); + state->x[12] += chacha_advance(bytes, 4); + return; + } + if (bytes) { + chacha_block_xor_ssse3(state, dst, src, bytes, nrounds); + state->x[12]++; + } +} + +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) +{ + if (!static_branch_likely(&chacha_use_simd)) { + hchacha_block_generic(state, out, nrounds); + } else { + kernel_fpu_begin(); + hchacha_block_ssse3(state, out, nrounds); + kernel_fpu_end(); + } +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + if (!static_branch_likely(&chacha_use_simd) || + bytes <= CHACHA_BLOCK_SIZE) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + + do { + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); + + kernel_fpu_begin(); + chacha_dosimd(state, dst, src, todo, nrounds); + kernel_fpu_end(); + + bytes -= todo; + src += todo; + dst += todo; + } while (bytes); +} +EXPORT_SYMBOL(chacha_crypt_arch); + +bool chacha_is_arch_optimized(void) +{ + return static_key_enabled(&chacha_use_simd); +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +static int __init chacha_simd_mod_init(void) +{ + if (!boot_cpu_has(X86_FEATURE_SSSE3)) + return 0; + + static_branch_enable(&chacha_use_simd); + + if (boot_cpu_has(X86_FEATURE_AVX) && + boot_cpu_has(X86_FEATURE_AVX2) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { + static_branch_enable(&chacha_use_avx2); + + if (boot_cpu_has(X86_FEATURE_AVX512VL) && + boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */ + static_branch_enable(&chacha_use_avx512vl); + } + return 0; +} +subsys_initcall(chacha_simd_mod_init); + +static void __exit chacha_simd_mod_exit(void) +{ +} +module_exit(chacha_simd_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("ChaCha and HChaCha functions (x86_64 optimized)"); diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/lib/crypto/poly1305-x86_64-cryptogams.pl index b9abcd79c1f4..501827254fed 100644 --- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl +++ b/arch/x86/lib/crypto/poly1305-x86_64-cryptogams.pl @@ -118,6 +118,19 @@ sub declare_function() { } } +sub declare_typed_function() { + my ($name, $align, $nargs) = @_; + if($kernel) { + $code .= "SYM_TYPED_FUNC_START($name)\n"; + $code .= ".L$name:\n"; + } else { + $code .= ".globl $name\n"; + $code .= ".type $name,\@function,$nargs\n"; + $code .= ".align $align\n"; + $code .= "$name:\n"; + } +} + sub end_function() { my ($name) = @_; if($kernel) { @@ -128,7 +141,7 @@ sub end_function() { } $code.=<<___ if $kernel; -#include <linux/linkage.h> +#include <linux/cfi_types.h> ___ if ($avx) { @@ -236,14 +249,14 @@ ___ $code.=<<___ if (!$kernel); .extern OPENSSL_ia32cap_P -.globl poly1305_init_x86_64 -.hidden poly1305_init_x86_64 +.globl poly1305_block_init_arch +.hidden poly1305_block_init_arch .globl poly1305_blocks_x86_64 .hidden poly1305_blocks_x86_64 .globl poly1305_emit_x86_64 .hidden poly1305_emit_x86_64 ___ -&declare_function("poly1305_init_x86_64", 32, 3); +&declare_typed_function("poly1305_block_init_arch", 32, 3); $code.=<<___; xor %eax,%eax mov %rax,0($ctx) # initialize hash value @@ -298,7 +311,7 @@ $code.=<<___; .Lno_key: RET ___ -&end_function("poly1305_init_x86_64"); +&end_function("poly1305_block_init_arch"); &declare_function("poly1305_blocks_x86_64", 32, 4); $code.=<<___; @@ -2811,18 +2824,10 @@ if ($avx>2) { # reason stack layout is kept identical to poly1305_blocks_avx2. If not # for this tail, we wouldn't have to even allocate stack frame... -if($kernel) { - $code .= "#ifdef CONFIG_AS_AVX512\n"; -} - &declare_function("poly1305_blocks_avx512", 32, 4); poly1305_blocks_avxN(1); &end_function("poly1305_blocks_avx512"); -if ($kernel) { - $code .= "#endif\n"; -} - if (!$kernel && $avx>3) { ######################################################################## # VPMADD52 version using 2^44 radix. @@ -4113,9 +4118,9 @@ avx_handler: .section .pdata .align 4 - .rva .LSEH_begin_poly1305_init_x86_64 - .rva .LSEH_end_poly1305_init_x86_64 - .rva .LSEH_info_poly1305_init_x86_64 + .rva .LSEH_begin_poly1305_block_init_arch + .rva .LSEH_end_poly1305_block_init_arch + .rva .LSEH_info_poly1305_block_init_arch .rva .LSEH_begin_poly1305_blocks_x86_64 .rva .LSEH_end_poly1305_blocks_x86_64 @@ -4163,10 +4168,10 @@ ___ $code.=<<___; .section .xdata .align 8 -.LSEH_info_poly1305_init_x86_64: +.LSEH_info_poly1305_block_init_arch: .byte 9,0,0,0 .rva se_handler - .rva .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64 + .rva .LSEH_begin_poly1305_block_init_arch,.LSEH_begin_poly1305_block_init_arch .LSEH_info_poly1305_blocks_x86_64: .byte 9,0,0,0 diff --git a/arch/x86/lib/crypto/poly1305_glue.c b/arch/x86/lib/crypto/poly1305_glue.c new file mode 100644 index 000000000000..b7e78a583e07 --- /dev/null +++ b/arch/x86/lib/crypto/poly1305_glue.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#include <asm/cpu_device_id.h> +#include <asm/fpu/api.h> +#include <crypto/internal/poly1305.h> +#include <linux/jump_label.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sizes.h> +#include <linux/unaligned.h> + +struct poly1305_arch_internal { + union { + struct { + u32 h[5]; + u32 is_base2_26; + }; + u64 hs[3]; + }; + u64 r[2]; + u64 pad; + struct { u32 r2, r1, r4, r3; } rn[9]; +}; + +asmlinkage void poly1305_block_init_arch( + struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +EXPORT_SYMBOL_GPL(poly1305_block_init_arch); +asmlinkage void poly1305_blocks_x86_64(struct poly1305_arch_internal *ctx, + const u8 *inp, + const size_t len, const u32 padbit); +asmlinkage void poly1305_emit_x86_64(const struct poly1305_state *ctx, + u8 mac[POLY1305_DIGEST_SIZE], + const u32 nonce[4]); +asmlinkage void poly1305_emit_avx(const struct poly1305_state *ctx, + u8 mac[POLY1305_DIGEST_SIZE], + const u32 nonce[4]); +asmlinkage void poly1305_blocks_avx(struct poly1305_arch_internal *ctx, + const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_blocks_avx2(struct poly1305_arch_internal *ctx, + const u8 *inp, const size_t len, + const u32 padbit); +asmlinkage void poly1305_blocks_avx512(struct poly1305_arch_internal *ctx, + const u8 *inp, + const size_t len, const u32 padbit); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512); + +void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *inp, + unsigned int len, u32 padbit) +{ + struct poly1305_arch_internal *ctx = + container_of(&state->h.h, struct poly1305_arch_internal, h); + + /* SIMD disables preemption, so relax after processing each page. */ + BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE || + SZ_4K % POLY1305_BLOCK_SIZE); + + if (!static_branch_likely(&poly1305_use_avx)) { + poly1305_blocks_x86_64(ctx, inp, len, padbit); + return; + } + + do { + const unsigned int bytes = min(len, SZ_4K); + + kernel_fpu_begin(); + if (static_branch_likely(&poly1305_use_avx512)) + poly1305_blocks_avx512(ctx, inp, bytes, padbit); + else if (static_branch_likely(&poly1305_use_avx2)) + poly1305_blocks_avx2(ctx, inp, bytes, padbit); + else + poly1305_blocks_avx(ctx, inp, bytes, padbit); + kernel_fpu_end(); + + len -= bytes; + inp += bytes; + } while (len); +} +EXPORT_SYMBOL_GPL(poly1305_blocks_arch); + +void poly1305_emit_arch(const struct poly1305_state *ctx, + u8 mac[POLY1305_DIGEST_SIZE], const u32 nonce[4]) +{ + if (!static_branch_likely(&poly1305_use_avx)) + poly1305_emit_x86_64(ctx, mac, nonce); + else + poly1305_emit_avx(ctx, mac, nonce); +} +EXPORT_SYMBOL_GPL(poly1305_emit_arch); + +bool poly1305_is_arch_optimized(void) +{ + return static_key_enabled(&poly1305_use_avx); +} +EXPORT_SYMBOL(poly1305_is_arch_optimized); + +static int __init poly1305_simd_mod_init(void) +{ + if (boot_cpu_has(X86_FEATURE_AVX) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) + static_branch_enable(&poly1305_use_avx); + if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) + static_branch_enable(&poly1305_use_avx2); + if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX512F) && + cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) && + /* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */ + boot_cpu_data.x86_vfm != INTEL_SKYLAKE_X) + static_branch_enable(&poly1305_use_avx512); + return 0; +} +subsys_initcall(poly1305_simd_mod_init); + +static void __exit poly1305_simd_mod_exit(void) +{ +} +module_exit(poly1305_simd_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); +MODULE_DESCRIPTION("Poly1305 authenticator"); diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/lib/crypto/sha256-avx-asm.S index 53de72bdd851..0d7b2c3e45d9 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/lib/crypto/sha256-avx-asm.S @@ -48,7 +48,7 @@ ######################################################################## #include <linux/linkage.h> -#include <linux/cfi_types.h> +#include <linux/objtool.h> ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -341,13 +341,13 @@ a = TMP_ .endm ######################################################################## -## void sha256_transform_avx(state sha256_state *state, const u8 *data, int blocks) -## arg 1 : pointer to state -## arg 2 : pointer to input data -## arg 3 : Num blocks +## void sha256_transform_avx(u32 state[SHA256_STATE_WORDS], +## const u8 *data, size_t nblocks); ######################################################################## .text -SYM_TYPED_FUNC_START(sha256_transform_avx) +SYM_FUNC_START(sha256_transform_avx) + ANNOTATE_NOENDBR # since this is called only via static_call + pushq %rbx pushq %r12 pushq %r13 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/lib/crypto/sha256-avx2-asm.S index 0bbec1c75cd0..25d3380321ec 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/lib/crypto/sha256-avx2-asm.S @@ -49,7 +49,7 @@ ######################################################################## #include <linux/linkage.h> -#include <linux/cfi_types.h> +#include <linux/objtool.h> ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -518,13 +518,13 @@ STACK_SIZE = _CTX + _CTX_SIZE .endm ######################################################################## -## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks) -## arg 1 : pointer to state -## arg 2 : pointer to input data -## arg 3 : Num blocks +## void sha256_transform_rorx(u32 state[SHA256_STATE_WORDS], +## const u8 *data, size_t nblocks); ######################################################################## .text -SYM_TYPED_FUNC_START(sha256_transform_rorx) +SYM_FUNC_START(sha256_transform_rorx) + ANNOTATE_NOENDBR # since this is called only via static_call + pushq %rbx pushq %r12 pushq %r13 diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/lib/crypto/sha256-ni-asm.S index d515a55a3bc1..d3548206cf3d 100644 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/lib/crypto/sha256-ni-asm.S @@ -54,9 +54,9 @@ */ #include <linux/linkage.h> -#include <linux/cfi_types.h> +#include <linux/objtool.h> -#define DIGEST_PTR %rdi /* 1st arg */ +#define STATE_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ #define NUM_BLKS %rdx /* 3rd arg */ @@ -98,24 +98,20 @@ .endm /* - * Intel SHA Extensions optimized implementation of a SHA-256 update function + * Intel SHA Extensions optimized implementation of a SHA-256 block function * - * The function takes a pointer to the current hash values, a pointer to the - * input data, and a number of 64 byte blocks to process. Once all blocks have - * been processed, the digest pointer is updated with the resulting hash value. - * The function only processes complete blocks, there is no functionality to - * store partial blocks. All message padding and hash value initialization must - * be done outside the update function. + * This function takes a pointer to the current SHA-256 state, a pointer to the + * input data, and the number of 64-byte blocks to process. Once all blocks + * have been processed, the state is updated with the new state. This function + * only processes complete blocks. State initialization, buffering of partial + * blocks, and digest finalization is expected to be handled elsewhere. * - * void sha256_ni_transform(uint32_t *digest, const void *data, - uint32_t numBlocks); - * digest : pointer to digest - * data: pointer to input data - * numBlocks: Number of blocks to process + * void sha256_ni_transform(u32 state[SHA256_STATE_WORDS], + * const u8 *data, size_t nblocks); */ - .text -SYM_TYPED_FUNC_START(sha256_ni_transform) +SYM_FUNC_START(sha256_ni_transform) + ANNOTATE_NOENDBR # since this is called only via static_call shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash @@ -126,8 +122,8 @@ SYM_TYPED_FUNC_START(sha256_ni_transform) * Need to reorder these appropriately * DCBA, HGFE -> ABEF, CDGH */ - movdqu 0*16(DIGEST_PTR), STATE0 /* DCBA */ - movdqu 1*16(DIGEST_PTR), STATE1 /* HGFE */ + movdqu 0*16(STATE_PTR), STATE0 /* DCBA */ + movdqu 1*16(STATE_PTR), STATE1 /* HGFE */ movdqa STATE0, TMP punpcklqdq STATE1, STATE0 /* FEBA */ @@ -166,8 +162,8 @@ SYM_TYPED_FUNC_START(sha256_ni_transform) pshufd $0xB1, STATE0, STATE0 /* HGFE */ pshufd $0x1B, STATE1, STATE1 /* DCBA */ - movdqu STATE1, 0*16(DIGEST_PTR) - movdqu STATE0, 1*16(DIGEST_PTR) + movdqu STATE1, 0*16(STATE_PTR) + movdqu STATE0, 1*16(STATE_PTR) .Ldone_hash: diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/lib/crypto/sha256-ssse3-asm.S index 93264ee44543..7f24a4cdcb25 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/lib/crypto/sha256-ssse3-asm.S @@ -47,7 +47,7 @@ ######################################################################## #include <linux/linkage.h> -#include <linux/cfi_types.h> +#include <linux/objtool.h> ## assume buffers not aligned #define MOVDQ movdqu @@ -348,15 +348,13 @@ a = TMP_ .endm ######################################################################## -## void sha256_transform_ssse3(struct sha256_state *state, const u8 *data, -## int blocks); -## arg 1 : pointer to state -## (struct sha256_state is assumed to begin with u32 state[8]) -## arg 2 : pointer to input data -## arg 3 : Num blocks +## void sha256_transform_ssse3(u32 state[SHA256_STATE_WORDS], +## const u8 *data, size_t nblocks); ######################################################################## .text -SYM_TYPED_FUNC_START(sha256_transform_ssse3) +SYM_FUNC_START(sha256_transform_ssse3) + ANNOTATE_NOENDBR # since this is called only via static_call + pushq %rbx pushq %r12 pushq %r13 diff --git a/arch/x86/lib/crypto/sha256.c b/arch/x86/lib/crypto/sha256.c new file mode 100644 index 000000000000..80380f8fdcee --- /dev/null +++ b/arch/x86/lib/crypto/sha256.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256 optimized for x86_64 + * + * Copyright 2025 Google LLC + */ +#include <asm/fpu/api.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/static_call.h> + +asmlinkage void sha256_transform_ssse3(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +asmlinkage void sha256_transform_avx(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +asmlinkage void sha256_transform_rorx(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +asmlinkage void sha256_ni_transform(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_x86); + +DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_transform_ssse3); + +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_sha256_x86)) { + kernel_fpu_begin(); + static_call(sha256_blocks_x86)(state, data, nblocks); + kernel_fpu_end(); + } else { + sha256_blocks_generic(state, data, nblocks); + } +} +EXPORT_SYMBOL_GPL(sha256_blocks_simd); + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + sha256_blocks_generic(state, data, nblocks); +} +EXPORT_SYMBOL_GPL(sha256_blocks_arch); + +bool sha256_is_arch_optimized(void) +{ + return static_key_enabled(&have_sha256_x86); +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +static int __init sha256_x86_mod_init(void) +{ + if (boot_cpu_has(X86_FEATURE_SHA_NI)) { + static_call_update(sha256_blocks_x86, sha256_ni_transform); + } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE | + XFEATURE_MASK_YMM, NULL) && + boot_cpu_has(X86_FEATURE_AVX)) { + if (boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_BMI2)) + static_call_update(sha256_blocks_x86, + sha256_transform_rorx); + else + static_call_update(sha256_blocks_x86, + sha256_transform_avx); + } else if (!boot_cpu_has(X86_FEATURE_SSSE3)) { + return 0; + } + static_branch_enable(&have_sha256_x86); + return 0; +} +subsys_initcall(sha256_x86_mod_init); + +static void __exit sha256_x86_mod_exit(void) +{ +} +module_exit(sha256_x86_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 optimized for x86_64"); diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index e86eda2c0b04..eb2d2e1cbddd 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -75,7 +75,7 @@ static void delay_tsc(u64 cycles) /* Allow RT tasks to run */ preempt_enable(); - rep_nop(); + native_pause(); preempt_disable(); /* diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 98631c0e7a11..4e385cbfd444 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -13,6 +13,7 @@ #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/ldt.h> +#include <asm/msr.h> #include <asm/vm86.h> #undef pr_fmt @@ -631,14 +632,21 @@ static bool get_desc(struct desc_struct *out, unsigned short sel) /* Bits [15:3] contain the index of the desired entry. */ sel >>= 3; - mutex_lock(¤t->active_mm->context.lock); - ldt = current->active_mm->context.ldt; + /* + * If we're not in a valid context with a real (not just lazy) + * user mm, then don't even try. + */ + if (!nmi_uaccess_okay()) + return false; + + mutex_lock(¤t->mm->context.lock); + ldt = current->mm->context.ldt; if (ldt && sel < ldt->nr_entries) { *out = ldt->entries[sel]; success = true; } - mutex_unlock(¤t->active_mm->context.lock); + mutex_unlock(¤t->mm->context.lock); return success; } @@ -702,16 +710,16 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) unsigned long base; if (seg_reg_idx == INAT_SEG_REG_FS) { - rdmsrl(MSR_FS_BASE, base); + rdmsrq(MSR_FS_BASE, base); } else if (seg_reg_idx == INAT_SEG_REG_GS) { /* * swapgs was called at the kernel entry point. Thus, * MSR_KERNEL_GS_BASE will have the user-space GS base. */ if (user_mode(regs)) - rdmsrl(MSR_KERNEL_GS_BASE, base); + rdmsrq(MSR_KERNEL_GS_BASE, base); else - rdmsrl(MSR_GS_BASE, base); + rdmsrq(MSR_GS_BASE, base); } else { base = 0; } diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 6ffb931b9fb1..149a57e334ab 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -324,6 +324,11 @@ int insn_get_opcode(struct insn *insn) } insn->attr = inat_get_opcode_attribute(op); + if (insn->x86_64 && inat_is_invalid64(insn->attr)) { + /* This instruction is invalid, like UD2. Stop decoding. */ + insn->attr &= INAT_INV64; + } + while (inat_is_escape(insn->attr)) { /* Get escaped opcode */ op = get_next(insn_byte_t, insn); @@ -337,6 +342,7 @@ int insn_get_opcode(struct insn *insn) insn->attr = 0; return -EINVAL; } + end: opcode->got = 1; return 0; @@ -658,7 +664,6 @@ int insn_get_immediate(struct insn *insn) } if (!inat_has_immediate(insn->attr)) - /* no immediates */ goto done; switch (inat_immediate_size(insn->attr)) { diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c index 5eecb45d05d5..c20e04764edc 100644 --- a/arch/x86/lib/iomem.c +++ b/arch/x86/lib/iomem.c @@ -10,7 +10,7 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n) { unsigned long d0, d1, d2; - asm volatile("rep ; movsl\n\t" + asm volatile("rep movsl\n\t" "testb $2,%b4\n\t" "je 1f\n\t" "movsw\n" diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c index a58f451a7dd3..b5893928d55c 100644 --- a/arch/x86/lib/kaslr.c +++ b/arch/x86/lib/kaslr.c @@ -8,7 +8,7 @@ */ #include <asm/asm.h> #include <asm/kaslr.h> -#include <asm/msr.h> +#include <asm/tsc.h> #include <asm/archrandom.h> #include <asm/e820/api.h> #include <asm/shared/io.h> diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 0ae2e1712e2e..12a23fa7c44c 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -41,6 +41,7 @@ SYM_FUNC_END(__memcpy) EXPORT_SYMBOL(__memcpy) SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy) +SYM_PIC_ALIAS(memcpy) EXPORT_SYMBOL(memcpy) SYM_FUNC_START_LOCAL(memcpy_orig) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index d66b710d628f..fb5a03cf5ab7 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -42,6 +42,7 @@ SYM_FUNC_END(__memset) EXPORT_SYMBOL(__memset) SYM_FUNC_ALIAS_MEMFUNC(memset, __memset) +SYM_PIC_ALIAS(memset) EXPORT_SYMBOL(memset) SYM_FUNC_START_LOCAL(memset_orig) diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c index acd463d887e1..b8f63419e6ae 100644 --- a/arch/x86/lib/msr-smp.c +++ b/arch/x86/lib/msr-smp.c @@ -47,7 +47,7 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) } EXPORT_SYMBOL(rdmsr_on_cpu); -int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { int err; struct msr_info rv; @@ -60,7 +60,7 @@ int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) return err; } -EXPORT_SYMBOL(rdmsrl_on_cpu); +EXPORT_SYMBOL(rdmsrq_on_cpu); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { @@ -78,7 +78,7 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) } EXPORT_SYMBOL(wrmsr_on_cpu); -int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { int err; struct msr_info rv; @@ -92,7 +92,7 @@ int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) return err; } -EXPORT_SYMBOL(wrmsrl_on_cpu); +EXPORT_SYMBOL(wrmsrq_on_cpu); static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs, @@ -204,7 +204,7 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) } EXPORT_SYMBOL(wrmsr_safe_on_cpu); -int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) { int err; struct msr_info rv; @@ -218,9 +218,9 @@ int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) return err ? err : rv.err; } -EXPORT_SYMBOL(wrmsrl_safe_on_cpu); +EXPORT_SYMBOL(wrmsrq_safe_on_cpu); -int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { u32 low, high; int err; @@ -230,7 +230,7 @@ int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) return err; } -EXPORT_SYMBOL(rdmsrl_safe_on_cpu); +EXPORT_SYMBOL(rdmsrq_safe_on_cpu); /* * These variants are significantly slower, but allows control over diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 5a18ecc04a6c..4ef7c6dcbea6 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c @@ -41,7 +41,7 @@ static int msr_read(u32 msr, struct msr *m) int err; u64 val; - err = rdmsrl_safe(msr, &val); + err = rdmsrq_safe(msr, &val); if (!err) m->q = val; @@ -58,7 +58,7 @@ static int msr_read(u32 msr, struct msr *m) */ static int msr_write(u32 msr, struct msr *m) { - return wrmsrl_safe(msr, m->q); + return wrmsrq_safe(msr, m->q); } static inline int __flip_bit(u32 msr, u8 bit, bool set) @@ -122,23 +122,23 @@ int msr_clear_bit(u32 msr, u8 bit) EXPORT_SYMBOL_GPL(msr_clear_bit); #ifdef CONFIG_TRACEPOINTS -void do_trace_write_msr(unsigned int msr, u64 val, int failed) +void do_trace_write_msr(u32 msr, u64 val, int failed) { trace_write_msr(msr, val, failed); } EXPORT_SYMBOL(do_trace_write_msr); EXPORT_TRACEPOINT_SYMBOL(write_msr); -void do_trace_read_msr(unsigned int msr, u64 val, int failed) +void do_trace_read_msr(u32 msr, u64 val, int failed) { trace_read_msr(msr, val, failed); } EXPORT_SYMBOL(do_trace_read_msr); EXPORT_TRACEPOINT_SYMBOL(read_msr); -void do_trace_rdpmc(unsigned counter, u64 val, int failed) +void do_trace_rdpmc(u32 msr, u64 val, int failed) { - trace_rdpmc(counter, val, failed); + trace_rdpmc(msr, val, failed); } EXPORT_SYMBOL(do_trace_rdpmc); EXPORT_TRACEPOINT_SYMBOL(rdpmc); diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 39374949daa2..d78d769a02bd 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -40,6 +40,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) ALTERNATIVE_2 __stringify(RETPOLINE \reg), \ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \ __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE) +SYM_PIC_ALIAS(__x86_indirect_thunk_\reg) .endm @@ -442,6 +443,7 @@ SYM_CODE_START(__x86_return_thunk) #endif int3 SYM_CODE_END(__x86_return_thunk) +SYM_PIC_ALIAS(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) #endif /* CONFIG_MITIGATION_RETHUNK */ diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index 53b3f202267c..f87ec24fa579 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c @@ -40,8 +40,7 @@ char *strncpy(char *dest, const char *src, size_t count) "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b\n\t" - "rep\n\t" - "stosb\n" + "rep stosb\n" "2:" : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) : "0" (src), "1" (dest), "2" (count) : "memory"); @@ -54,8 +53,7 @@ EXPORT_SYMBOL(strncpy); char *strcat(char *dest, const char *src) { int d0, d1, d2, d3; - asm volatile("repne\n\t" - "scasb\n\t" + asm volatile("repne scasb\n\t" "decl %1\n" "1:\tlodsb\n\t" "stosb\n\t" @@ -72,8 +70,7 @@ EXPORT_SYMBOL(strcat); char *strncat(char *dest, const char *src, size_t count) { int d0, d1, d2, d3; - asm volatile("repne\n\t" - "scasb\n\t" + asm volatile("repne scasb\n\t" "decl %1\n\t" "movl %8,%3\n" "1:\tdecl %3\n\t" @@ -167,8 +164,7 @@ size_t strlen(const char *s) { int d0; size_t res; - asm volatile("repne\n\t" - "scasb" + asm volatile("repne scasb" : "=c" (res), "=&D" (d0) : "1" (s), "a" (0), "0" (0xffffffffu) : "memory"); @@ -184,8 +180,7 @@ void *memchr(const void *cs, int c, size_t count) void *res; if (!count) return NULL; - asm volatile("repne\n\t" - "scasb\n\t" + asm volatile("repne scasb\n\t" "je 1f\n\t" "movl $1,%0\n" "1:\tdecl %0" @@ -202,7 +197,7 @@ void *memscan(void *addr, int c, size_t size) { if (!size) return addr; - asm volatile("repnz; scasb\n\t" + asm volatile("repnz scasb\n\t" "jnz 1f\n\t" "dec %%edi\n" "1:" diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index 38f37df056f7..28267985e85f 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c @@ -8,16 +8,14 @@ int d0, d1; register char *__res; __asm__ __volatile__( "movl %6,%%edi\n\t" - "repne\n\t" - "scasb\n\t" + "repne scasb\n\t" "notl %%ecx\n\t" "decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */ "movl %%ecx,%%edx\n" "1:\tmovl %6,%%edi\n\t" "movl %%esi,%%eax\n\t" "movl %%edx,%%ecx\n\t" - "repe\n\t" - "cmpsb\n\t" + "repe cmpsb\n\t" "je 2f\n\t" /* also works for empty string, see above */ "xchgl %%eax,%%esi\n\t" "incl %%esi\n\t" diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 422257c350c6..f6f436f1d573 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -38,9 +38,9 @@ do { \ might_fault(); \ __asm__ __volatile__( \ ASM_STAC "\n" \ - "0: rep; stosl\n" \ + "0: rep stosl\n" \ " movl %2,%0\n" \ - "1: rep; stosb\n" \ + "1: rep stosb\n" \ "2: " ASM_CLAC "\n" \ _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %2) \ _ASM_EXTABLE_UA(1b, 2b) \ @@ -140,9 +140,9 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "99: rep; movsl\n" + "99: rep movsl\n" "36: movl %%eax, %0\n" - "37: rep; movsb\n" + "37: rep movsb\n" "100:\n" _ASM_EXTABLE_UA(1b, 100b) _ASM_EXTABLE_UA(2b, 100b) @@ -242,9 +242,9 @@ static unsigned long __copy_user_intel_nocache(void *to, " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "6: rep; movsl\n" + "6: rep movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" + "7: rep movsb\n" "8:\n" _ASM_EXTABLE_UA(0b, 8b) _ASM_EXTABLE_UA(1b, 8b) @@ -293,14 +293,14 @@ do { \ " negl %0\n" \ " andl $7,%0\n" \ " subl %0,%3\n" \ - "4: rep; movsb\n" \ + "4: rep movsb\n" \ " movl %3,%0\n" \ " shrl $2,%0\n" \ " andl $3,%3\n" \ " .align 2,0x90\n" \ - "0: rep; movsl\n" \ + "0: rep movsl\n" \ " movl %3,%0\n" \ - "1: rep; movsb\n" \ + "1: rep movsb\n" \ "2:\n" \ _ASM_EXTABLE_TYPE_REG(4b, 2b, EX_TYPE_UCOPY_LEN1, %3) \ _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %3) \ diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index f5dd84eb55dc..262f7ca1fb95 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -35,7 +35,7 @@ # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) # - (66&F2): Both 0x66 and 0xF2 prefixes are specified. # -# REX2 Prefix +# REX2 Prefix Superscripts # - (!REX2): REX2 is not allowed # - (REX2): REX2 variant e.g. JMPABS @@ -147,7 +147,7 @@ AVXcode: # 0x60 - 0x6f 60: PUSHA/PUSHAD (i64) 61: POPA/POPAD (i64) -62: BOUND Gv,Ma (i64) | EVEX (Prefix) +62: BOUND Gv,Ma (i64) | EVEX (Prefix),(o64) 63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64) 64: SEG=FS (Prefix) 65: SEG=GS (Prefix) @@ -253,8 +253,8 @@ c0: Grp2 Eb,Ib (1A) c1: Grp2 Ev,Ib (1A) c2: RETN Iw (f64) c3: RETN -c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) -c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) +c4: LES Gz,Mp (i64) | VEX+2byte (Prefix),(o64) +c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix),(o64) c6: Grp11A Eb,Ib (1A) c7: Grp11B Ev,Iz (1A) c8: ENTER Iw,Ib @@ -286,10 +286,10 @@ df: ESC # Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix # in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation # to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD. -e0: LOOPNE/LOOPNZ Jb (f64) (!REX2) -e1: LOOPE/LOOPZ Jb (f64) (!REX2) -e2: LOOP Jb (f64) (!REX2) -e3: JrCXZ Jb (f64) (!REX2) +e0: LOOPNE/LOOPNZ Jb (f64),(!REX2) +e1: LOOPE/LOOPZ Jb (f64),(!REX2) +e2: LOOP Jb (f64),(!REX2) +e3: JrCXZ Jb (f64),(!REX2) e4: IN AL,Ib (!REX2) e5: IN eAX,Ib (!REX2) e6: OUT Ib,AL (!REX2) @@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2) # in "near" jumps and calls is 16-bit. For CALL, # push of return address is 16-bit wide, RSP is decremented by 2 # but is not truncated to 16 bits, unlike RIP. -e8: CALL Jz (f64) (!REX2) -e9: JMP-near Jz (f64) (!REX2) -ea: JMP-far Ap (i64) (!REX2) -eb: JMP-short Jb (f64) (!REX2) +e8: CALL Jz (f64),(!REX2) +e9: JMP-near Jz (f64),(!REX2) +ea: JMP-far Ap (i64),(!REX2) +eb: JMP-short Jb (f64),(!REX2) ec: IN AL,DX (!REX2) ed: IN eAX,DX (!REX2) ee: OUT DX,AL (!REX2) @@ -478,22 +478,22 @@ AVXcode: 1 7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev) # 0x0f 0x80-0x8f # Note: "forced64" is Intel CPU behavior (see comment about CALL insn). -80: JO Jz (f64) (!REX2) -81: JNO Jz (f64) (!REX2) -82: JB/JC/JNAE Jz (f64) (!REX2) -83: JAE/JNB/JNC Jz (f64) (!REX2) -84: JE/JZ Jz (f64) (!REX2) -85: JNE/JNZ Jz (f64) (!REX2) -86: JBE/JNA Jz (f64) (!REX2) -87: JA/JNBE Jz (f64) (!REX2) -88: JS Jz (f64) (!REX2) -89: JNS Jz (f64) (!REX2) -8a: JP/JPE Jz (f64) (!REX2) -8b: JNP/JPO Jz (f64) (!REX2) -8c: JL/JNGE Jz (f64) (!REX2) -8d: JNL/JGE Jz (f64) (!REX2) -8e: JLE/JNG Jz (f64) (!REX2) -8f: JNLE/JG Jz (f64) (!REX2) +80: JO Jz (f64),(!REX2) +81: JNO Jz (f64),(!REX2) +82: JB/JC/JNAE Jz (f64),(!REX2) +83: JAE/JNB/JNC Jz (f64),(!REX2) +84: JE/JZ Jz (f64),(!REX2) +85: JNE/JNZ Jz (f64),(!REX2) +86: JBE/JNA Jz (f64),(!REX2) +87: JA/JNBE Jz (f64),(!REX2) +88: JS Jz (f64),(!REX2) +89: JNS Jz (f64),(!REX2) +8a: JP/JPE Jz (f64),(!REX2) +8b: JNP/JPO Jz (f64),(!REX2) +8c: JL/JNGE Jz (f64),(!REX2) +8d: JNL/JGE Jz (f64),(!REX2) +8e: JLE/JNG Jz (f64),(!REX2) +8f: JNLE/JG Jz (f64),(!REX2) # 0x0f 0x90-0x9f 90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66) 91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66) diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c index d62662bdd460..5f253ae406b6 100644 --- a/arch/x86/math-emu/fpu_aux.c +++ b/arch/x86/math-emu/fpu_aux.c @@ -53,7 +53,7 @@ void fpstate_init_soft(struct swregs_state *soft) void finit(void) { - fpstate_init_soft(¤t->thread.fpu.fpstate->regs.soft); + fpstate_init_soft(&x86_task_fpu(current)->fpstate->regs.soft); } /* diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 91c52ead1226..5034df617740 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c @@ -641,7 +641,7 @@ int fpregs_soft_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft; + struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft; void *space = s387->st_space; int ret; int offset, other, i, tags, regnr, tag, newtop; @@ -692,7 +692,7 @@ int fpregs_soft_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { - struct swregs_state *s387 = &target->thread.fpu.fpstate->regs.soft; + struct swregs_state *s387 = &x86_task_fpu(target)->fpstate->regs.soft; const void *space = s387->st_space; int offset = (S387->ftop & 7) * 10, other = 80 - offset; diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h index eec3e4805c75..5e238e930fe3 100644 --- a/arch/x86/math-emu/fpu_system.h +++ b/arch/x86/math-emu/fpu_system.h @@ -73,7 +73,7 @@ static inline bool seg_writable(struct desc_struct *d) return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_WRITABLE; } -#define I387 (¤t->thread.fpu.fpstate->regs) +#define I387 (&x86_task_fpu(current)->fpstate->regs) #define FPU_info (I387->soft.info) #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs)) diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 32035d5be5a0..5b9908f13dcf 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -3,12 +3,10 @@ KCOV_INSTRUMENT_tlb.o := n KCOV_INSTRUMENT_mem_encrypt.o := n KCOV_INSTRUMENT_mem_encrypt_amd.o := n -KCOV_INSTRUMENT_mem_encrypt_identity.o := n KCOV_INSTRUMENT_pgprot.o := n KASAN_SANITIZE_mem_encrypt.o := n KASAN_SANITIZE_mem_encrypt_amd.o := n -KASAN_SANITIZE_mem_encrypt_identity.o := n KASAN_SANITIZE_pgprot.o := n # Disable KCSAN entirely, because otherwise we get warnings that some functions @@ -16,12 +14,10 @@ KASAN_SANITIZE_pgprot.o := n KCSAN_SANITIZE := n # Avoid recursion by not calling KMSAN hooks for CEA code. KMSAN_SANITIZE_cpu_entry_area.o := n -KMSAN_SANITIZE_mem_encrypt_identity.o := n ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_mem_encrypt.o = -pg CFLAGS_REMOVE_mem_encrypt_amd.o = -pg -CFLAGS_REMOVE_mem_encrypt_identity.o = -pg CFLAGS_REMOVE_pgprot.o = -pg endif @@ -32,9 +28,6 @@ obj-y += pat/ # Make sure __phys_addr has no stackprotector CFLAGS_physaddr.o := -fno-stack-protector -CFLAGS_mem_encrypt_identity.o := -fno-stack-protector - -CFLAGS_fault.o := -I $(src)/../include/asm/trace obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o @@ -52,7 +45,7 @@ obj-$(CONFIG_MMIOTRACE) += mmiotrace.o mmiotrace-y := kmmio.o pf_in.o mmio-mod.o obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o -obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o +obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_AMD_NUMA) += amdtopology.o obj-$(CONFIG_ACPI_NUMA) += srat.o @@ -63,5 +56,4 @@ obj-$(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION) += pti.o obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o -obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c index 628833afee37..f980b0eb0105 100644 --- a/arch/x86/mm/amdtopology.c +++ b/arch/x86/mm/amdtopology.c @@ -25,7 +25,7 @@ #include <asm/numa.h> #include <asm/mpspec.h> #include <asm/apic.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> static unsigned char __initdata nodeids[8]; diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 51986e8a9d35..bf8dab18be97 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -111,7 +111,7 @@ static bool ex_handler_sgx(const struct exception_table_entry *fixup, /* * Handler for when we fail to restore a task's FPU state. We should never get - * here because the FPU state of a task using the FPU (task->thread.fpu.state) + * here because the FPU state of a task using the FPU (struct fpu::fpstate) * should always be valid. However, past bugs have allowed userspace to set * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn(). * These caused XRSTOR to fail when switching to the task, leaking the FPU diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 296d294142c8..998bd807fc7b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -13,7 +13,6 @@ #include <linux/mmiotrace.h> /* kmmio_handler, ... */ #include <linux/perf_event.h> /* perf_sw_event */ #include <linux/hugetlb.h> /* hstate_index_to_shift */ -#include <linux/prefetch.h> /* prefetchw */ #include <linux/context_tracking.h> /* exception_enter(), ... */ #include <linux/uaccess.h> /* faulthandler_disabled() */ #include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/ @@ -38,7 +37,7 @@ #include <asm/sev.h> /* snp_dump_hva_rmpentry() */ #define CREATE_TRACE_POINTS -#include <asm/trace/exceptions.h> +#include <trace/events/exceptions.h> /* * Returns 0 if mmiotrace is disabled, or if the fault is not @@ -1455,9 +1454,6 @@ static __always_inline void trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - if (!trace_pagefault_enabled()) - return; - if (user_mode(regs)) trace_page_fault_user(address, regs, error_code); else @@ -1496,8 +1492,6 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) address = cpu_feature_enabled(X86_FEATURE_FRED) ? fred_event_data(regs) : read_cr2(); - prefetchw(¤t->mm->mmap_lock); - /* * KVM uses #PF vector to deliver 'page not present' events to guests * (asynchronous page fault mechanism). The event happens when a diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index bfa444a7dbb0..7456df985d96 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -28,6 +28,7 @@ #include <asm/text-patching.h> #include <asm/memtype.h> #include <asm/paravirt.h> +#include <asm/mmu_context.h> /* * We need to define the tracepoints somewhere, and tlb.c @@ -173,11 +174,7 @@ __ref void *alloc_low_pages(unsigned int num) * randomization is enabled. */ -#ifndef CONFIG_X86_5LEVEL -#define INIT_PGD_PAGE_TABLES 3 -#else #define INIT_PGD_PAGE_TABLES 4 -#endif #ifndef CONFIG_RANDOMIZE_MEMORY #define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES) @@ -824,31 +821,33 @@ void __init poking_init(void) spinlock_t *ptl; pte_t *ptep; - poking_mm = mm_alloc(); - BUG_ON(!poking_mm); + text_poke_mm = mm_alloc(); + BUG_ON(!text_poke_mm); /* Xen PV guests need the PGD to be pinned. */ - paravirt_enter_mmap(poking_mm); + paravirt_enter_mmap(text_poke_mm); + + set_notrack_mm(text_poke_mm); /* * Randomize the poking address, but make sure that the following page * will be mapped at the same PMD. We need 2 pages, so find space for 3, * and adjust the address if the PMD ends after the first one. */ - poking_addr = TASK_UNMAPPED_BASE; + text_poke_mm_addr = TASK_UNMAPPED_BASE; if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) % + text_poke_mm_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) % (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE); - if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0) - poking_addr += PAGE_SIZE; + if (((text_poke_mm_addr + PAGE_SIZE) & ~PMD_MASK) == 0) + text_poke_mm_addr += PAGE_SIZE; /* * We need to trigger the allocation of the page-tables that will be * needed for poking now. Later, poking may be performed in an atomic * section, which might cause allocation to fail. */ - ptep = get_locked_pte(poking_mm, poking_addr, &ptl); + ptep = get_locked_pte(text_poke_mm, text_poke_mm_addr, &ptl); BUG_ON(!ptep); pte_unmap_unlock(ptep, ptl); } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bb8d99e717b9..607d6a2e66e2 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -613,7 +613,6 @@ void __init find_low_pfn_range(void) highmem_pfn_init(); } -#ifndef CONFIG_NUMA void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM @@ -634,12 +633,6 @@ void __init initmem_init(void) printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); - setup_bootmem_allocator(); -} -#endif /* !CONFIG_NUMA */ - -void __init setup_bootmem_allocator(void) -{ printk(KERN_INFO " mapped low ram: 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 949a447f75ec..66330fe4e18c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -806,12 +806,17 @@ kernel_physical_mapping_change(unsigned long paddr_start, } #ifndef CONFIG_NUMA -void __init initmem_init(void) +static inline void x86_numa_init(void) { memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); } #endif +void __init initmem_init(void) +{ + x86_numa_init(); +} + void __init paging_init(void) { sparse_init(); @@ -828,7 +833,6 @@ void __init paging_init(void) zone_sizes_init(); } -#ifdef CONFIG_SPARSEMEM_VMEMMAP #define PAGE_UNUSED 0xFD /* @@ -927,7 +931,6 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long if (!IS_ALIGNED(end, PMD_SIZE)) unused_pmd_start = end; } -#endif /* * Memory hotplug specific functions @@ -1147,16 +1150,13 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); pages++; - } -#ifdef CONFIG_SPARSEMEM_VMEMMAP - else if (vmemmap_pmd_is_unused(addr, next)) { + } else if (vmemmap_pmd_is_unused(addr, next)) { free_hugepage_table(pmd_page(*pmd), altmap); spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); } -#endif continue; } @@ -1495,7 +1495,6 @@ unsigned long memory_block_size_bytes(void) return memory_block_size_probed; } -#ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Initialise the sparsemem vmemmap using huge-pages at the PMD level. */ @@ -1642,4 +1641,3 @@ void __meminit vmemmap_populate_print_last(void) node_start = 0; } } -#endif diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 7490ff6d83b1..faf3a13fb6ba 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -40,7 +40,9 @@ * section is later cleared. */ u64 sme_me_mask __section(".data") = 0; +SYM_PIC_ALIAS(sme_me_mask); u64 sev_status __section(".data") = 0; +SYM_PIC_ALIAS(sev_status); u64 sev_check_data __section(".data") = 0; EXPORT_SYMBOL(sme_me_mask); diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h index 3f37b5c80bb3..097aadc250f7 100644 --- a/arch/x86/mm/mm_internal.h +++ b/arch/x86/mm/mm_internal.h @@ -25,4 +25,8 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); extern unsigned long tlb_single_page_flush_ceiling; +#ifdef CONFIG_NUMA +void __init x86_numa_init(void); +#endif + #endif /* __X86_MM_INTERNAL_H */ diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 64e5cdb2460a..c24890c40138 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -18,9 +18,10 @@ #include <asm/e820/api.h> #include <asm/proto.h> #include <asm/dma.h> -#include <asm/amd_nb.h> +#include <asm/numa.h> +#include <asm/amd/nb.h> -#include "numa_internal.h" +#include "mm_internal.h" int numa_off; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c deleted file mode 100644 index 65fda406e6f2..000000000000 --- a/arch/x86/mm/numa_32.c +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation - * August 2002: added remote node KVA remap - Martin J. Bligh - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/memblock.h> -#include <linux/init.h> -#include <linux/vmalloc.h> -#include <asm/pgtable_areas.h> - -#include "numa_internal.h" - -extern unsigned long highend_pfn, highstart_pfn; - -void __init initmem_init(void) -{ - x86_numa_init(); - -#ifdef CONFIG_HIGHMEM - highstart_pfn = highend_pfn = max_pfn; - if (max_pfn > max_low_pfn) - highstart_pfn = max_low_pfn; - printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", - pages_to_mb(highend_pfn - highstart_pfn)); - high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; -#else - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; -#endif - printk(KERN_NOTICE "%ldMB LOWMEM available.\n", - pages_to_mb(max_low_pfn)); - printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n", - max_low_pfn, highstart_pfn); - - printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", - (ulong) pfn_to_kaddr(max_low_pfn)); - - printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", - (ulong) pfn_to_kaddr(highstart_pfn)); - - __vmalloc_start_set = true; - setup_bootmem_allocator(); -} diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c deleted file mode 100644 index 59d80160fa5a..000000000000 --- a/arch/x86/mm/numa_64.c +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Generic VM initialization for x86-64 NUMA setups. - * Copyright 2002,2003 Andi Kleen, SuSE Labs. - */ -#include <linux/memblock.h> - -#include "numa_internal.h" - -void __init initmem_init(void) -{ - x86_numa_init(); -} diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h deleted file mode 100644 index 11e1ff370c10..000000000000 --- a/arch/x86/mm/numa_internal.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __X86_MM_NUMA_INTERNAL_H -#define __X86_MM_NUMA_INTERNAL_H - -#include <linux/types.h> -#include <asm/numa.h> - -void __init x86_numa_init(void); - -#endif /* __X86_MM_NUMA_INTERNAL_H */ diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 72d8cbc61158..c97b527c66fe 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -38,6 +38,7 @@ #include <linux/kernel.h> #include <linux/pfn_t.h> #include <linux/slab.h> +#include <linux/io.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/fs.h> @@ -232,7 +233,7 @@ void pat_cpu_init(void) panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); } - wrmsrl(MSR_IA32_CR_PAT, pat_msr_val); + wrmsrq(MSR_IA32_CR_PAT, pat_msr_val); __flush_tlb_all(); } @@ -256,7 +257,7 @@ void __init pat_bp_init(void) if (!cpu_feature_enabled(X86_FEATURE_PAT)) pat_disable("PAT not supported by the CPU."); else - rdmsrl(MSR_IA32_CR_PAT, pat_msr_val); + rdmsrq(MSR_IA32_CR_PAT, pat_msr_val); if (!pat_msr_val) { pat_disable("PAT support disabled by the firmware."); @@ -682,6 +683,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr) /** * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type * of @pfn cannot be overridden by UC MTRR memory type. + * @pfn: The page frame number to check. * * Only to be called when PAT is enabled. * @@ -773,38 +775,14 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, return vma_prot; } -#ifdef CONFIG_STRICT_DEVMEM -/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - return 1; -} -#else -/* This check is needed to avoid cache aliasing when PAT is enabled */ -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - u64 from = ((u64)pfn) << PAGE_SHIFT; - u64 to = from + size; - u64 cursor = from; - - if (!pat_enabled()) - return 1; - - while (cursor < to) { - if (!devmem_is_allowed(pfn)) - return 0; - cursor += PAGE_SIZE; - pfn++; - } - return 1; -} -#endif /* CONFIG_STRICT_DEVMEM */ - int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; + if (!pat_enabled()) + return 1; + if (!range_is_allowed(pfn, size)) return 0; diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index def3d9284254..30ab4aced761 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -889,7 +889,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) /* change init_mm */ set_pte_atomic(kpte, pte); #ifdef CONFIG_X86_32 - if (!SHARED_KERNEL_PMD) { + { struct page *page; list_for_each_entry(page, &pgd_list, lru) { @@ -1293,7 +1293,7 @@ static int collapse_pmd_page(pmd_t *pmd, unsigned long addr, /* Queue the page table to be freed after TLB flush */ list_add(&page_ptdesc(pmd_page(old_pmd))->pt_list, pgtables); - if (IS_ENABLED(CONFIG_X86_32) && !SHARED_KERNEL_PMD) { + if (IS_ENABLED(CONFIG_X86_32)) { struct page *page; /* Update all PGD tables to use the same large page */ diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index f7ae44d3dd9e..62777ba4de1a 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -10,6 +10,7 @@ #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; EXPORT_SYMBOL(physical_mask); +SYM_PIC_ALIAS(physical_mask); #endif pgtable_t pte_alloc_one(struct mm_struct *mm) @@ -68,12 +69,6 @@ static inline void pgd_list_del(pgd_t *pgd) list_del(&ptdesc->pt_list); } -#define UNSHARED_PTRS_PER_PGD \ - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) -#define MAX_UNSHARED_PTRS_PER_PGD \ - MAX_T(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) - - static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) { virt_to_ptdesc(pgd)->pt_mm = mm; @@ -86,29 +81,19 @@ struct mm_struct *pgd_page_get_mm(struct page *page) static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) { - /* If the pgd points to a shared pagetable level (either the - ptes in non-PAE, or shared PMD in PAE), then just copy the - references from swapper_pg_dir. */ - if (CONFIG_PGTABLE_LEVELS == 2 || - (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || - CONFIG_PGTABLE_LEVELS >= 4) { + /* PAE preallocates all its PMDs. No cloning needed. */ + if (!IS_ENABLED(CONFIG_X86_PAE)) clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); - } - /* list required to sync kernel mapping updates */ - if (!SHARED_KERNEL_PMD) { - pgd_set_mm(pgd, mm); - pgd_list_add(pgd); - } + /* List used to sync kernel mapping updates */ + pgd_set_mm(pgd, mm); + pgd_list_add(pgd); } static void pgd_dtor(pgd_t *pgd) { - if (SHARED_KERNEL_PMD) - return; - spin_lock(&pgd_lock); pgd_list_del(pgd); spin_unlock(&pgd_lock); @@ -132,15 +117,15 @@ static void pgd_dtor(pgd_t *pgd) * processor notices the update. Since this is expensive, and * all 4 top-level entries are used almost immediately in a * new process's life, we just pre-populate them here. - * - * Also, if we're in a paravirt environment where the kernel pmd is - * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate - * and initialize the kernel pmds here. */ -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD -#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD +#define PREALLOCATED_PMDS PTRS_PER_PGD /* + * "USER_PMDS" are the PMDs for the user copy of the page tables when + * PTI is enabled. They do not exist when PTI is disabled. Note that + * this is distinct from the user _portion_ of the kernel page tables + * which always exists. + * * We allocate separate PMDs for the kernel part of the user page-table * when PTI is enabled. We need them to map the per-process LDT into the * user-space page-table. @@ -169,7 +154,6 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) /* No need to prepopulate any pagetable entries in non-PAE modes. */ #define PREALLOCATED_PMDS 0 -#define MAX_PREALLOCATED_PMDS 0 #define PREALLOCATED_USER_PMDS 0 #define MAX_PREALLOCATED_USER_PMDS 0 #endif /* CONFIG_X86_PAE */ @@ -318,68 +302,15 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm, { } #endif -/* - * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also - * assumes that pgd should be in one page. - * - * But kernel with PAE paging that is not running as a Xen domain - * only needs to allocate 32 bytes for pgd instead of one page. - */ -#ifdef CONFIG_X86_PAE - -#include <linux/slab.h> - -#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) -#define PGD_ALIGN 32 - -static struct kmem_cache *pgd_cache; - -void __init pgtable_cache_init(void) -{ - /* - * When PAE kernel is running as a Xen domain, it does not use - * shared kernel pmd. And this requires a whole page for pgd. - */ - if (!SHARED_KERNEL_PMD) - return; - - /* - * when PAE kernel is not running as a Xen domain, it uses - * shared kernel pmd. Shared kernel pmd does not require a whole - * page for pgd. We are able to just allocate a 32-byte for pgd. - * During boot time, we create a 32-byte slab for pgd table allocation. - */ - pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN, - SLAB_PANIC, NULL); -} static inline pgd_t *_pgd_alloc(struct mm_struct *mm) { /* - * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. - * We allocate one page for pgd. - */ - if (!SHARED_KERNEL_PMD) - return __pgd_alloc(mm, pgd_allocation_order()); - - /* - * Now PAE kernel is not running as a Xen domain. We can allocate - * a 32-byte slab for pgd to save memory space. + * PTI and Xen need a whole page for the PAE PGD + * even though the hardware only needs 32 bytes. + * + * For simplicity, allocate a page for all users. */ - return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER); -} - -static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - if (!SHARED_KERNEL_PMD) - __pgd_free(mm, pgd); - else - kmem_cache_free(pgd_cache, pgd); -} -#else - -static inline pgd_t *_pgd_alloc(struct mm_struct *mm) -{ return __pgd_alloc(mm, pgd_allocation_order()); } @@ -387,13 +318,12 @@ static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd) { __pgd_free(mm, pgd); } -#endif /* CONFIG_X86_PAE */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; - pmd_t *pmds[MAX_PREALLOCATED_PMDS]; + pmd_t *pmds[PREALLOCATED_PMDS]; pgd = _pgd_alloc(mm); @@ -613,11 +543,11 @@ pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, #endif /** - * reserve_top_address - reserves a hole in the top of kernel address space - * @reserve - size of hole to reserve + * reserve_top_address - Reserve a hole in the top of the kernel address space + * @reserve: Size of hole to reserve * * Can be used to relocate the fixmap area and poke a hole in the top - * of kernel address space to make room for a hypervisor. + * of the kernel address space to make room for a hypervisor. */ void __init reserve_top_address(unsigned long reserve) { @@ -662,9 +592,12 @@ void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, } #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -#ifdef CONFIG_X86_5LEVEL +#if CONFIG_PGTABLE_LEVELS > 4 /** - * p4d_set_huge - setup kernel P4D mapping + * p4d_set_huge - Set up kernel P4D mapping + * @p4d: Pointer to the P4D entry + * @addr: Virtual address associated with the P4D entry + * @prot: Protection bits to use * * No 512GB pages yet -- always return 0 */ @@ -674,9 +607,10 @@ int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) } /** - * p4d_clear_huge - clear kernel P4D mapping when it is set + * p4d_clear_huge - Clear kernel P4D mapping when it is set + * @p4d: Pointer to the P4D entry to clear * - * No 512GB pages yet -- always return 0 + * No 512GB pages yet -- do nothing */ void p4d_clear_huge(p4d_t *p4d) { @@ -684,7 +618,10 @@ void p4d_clear_huge(p4d_t *p4d) #endif /** - * pud_set_huge - setup kernel PUD mapping + * pud_set_huge - Set up kernel PUD mapping + * @pud: Pointer to the PUD entry + * @addr: Virtual address associated with the PUD entry + * @prot: Protection bits to use * * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this * function sets up a huge page only if the complete range has the same MTRR @@ -715,7 +652,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) } /** - * pmd_set_huge - setup kernel PMD mapping + * pmd_set_huge - Set up kernel PMD mapping + * @pmd: Pointer to the PMD entry + * @addr: Virtual address associated with the PMD entry + * @prot: Protection bits to use * * See text over pud_set_huge() above. * @@ -744,7 +684,8 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) } /** - * pud_clear_huge - clear kernel PUD mapping when it is set + * pud_clear_huge - Clear kernel PUD mapping when it is set + * @pud: Pointer to the PUD entry to clear. * * Returns 1 on success and 0 on failure (no PUD map is found). */ @@ -759,7 +700,8 @@ int pud_clear_huge(pud_t *pud) } /** - * pmd_clear_huge - clear kernel PMD mapping when it is set + * pmd_clear_huge - Clear kernel PMD mapping when it is set + * @pmd: Pointer to the PMD entry to clear. * * Returns 1 on success and 0 on failure (no PMD map is found). */ @@ -775,11 +717,11 @@ int pmd_clear_huge(pmd_t *pmd) #ifdef CONFIG_X86_64 /** - * pud_free_pmd_page - Clear pud entry and free pmd page. - * @pud: Pointer to a PUD. - * @addr: Virtual address associated with pud. + * pud_free_pmd_page - Clear PUD entry and free PMD page + * @pud: Pointer to a PUD + * @addr: Virtual address associated with PUD * - * Context: The pud range has been unmapped and TLB purged. + * Context: The PUD range has been unmapped and TLB purged. * Return: 1 if clearing the entry succeeded. 0 otherwise. * * NOTE: Callers must allow a single page allocation. @@ -822,11 +764,11 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) } /** - * pmd_free_pte_page - Clear pmd entry and free pte page. - * @pmd: Pointer to a PMD. - * @addr: Virtual address associated with pmd. + * pmd_free_pte_page - Clear PMD entry and free PTE page. + * @pmd: Pointer to the PMD + * @addr: Virtual address associated with PMD * - * Context: The pmd range has been unmapped and TLB purged. + * Context: The PMD range has been unmapped and TLB purged. * Return: 1 if clearing the entry succeeded. 0 otherwise. */ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) @@ -848,7 +790,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) /* * Disable free page handling on x86-PAE. This assures that ioremap() - * does not update sync'd pmd entries. See vmalloc_sync_one(). + * does not update sync'd PMD entries. See vmalloc_sync_one(). */ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 5f0d579932c6..190299834011 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -185,7 +185,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); } - BUILD_BUG_ON(pgd_leaf(*pgd) != 0); + BUILD_BUG_ON(pgd_leaf(*pgd)); return p4d_offset(pgd, address); } @@ -206,7 +206,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) if (!p4d) return NULL; - BUILD_BUG_ON(p4d_leaf(*p4d) != 0); + BUILD_BUG_ON(p4d_leaf(*p4d)); if (p4d_none(*p4d)) { unsigned long new_pud_page = __get_free_page(gfp); if (WARN_ON_ONCE(!new_pud_page)) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index b6d6750e4bd1..39f80111e6f1 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -19,6 +19,7 @@ #include <asm/cache.h> #include <asm/cacheflush.h> #include <asm/apic.h> +#include <asm/msr.h> #include <asm/perf_event.h> #include <asm/tlb.h> @@ -215,16 +216,20 @@ static void clear_asid_other(void) atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); +struct new_asid { + unsigned int asid : 16; + unsigned int need_flush : 1; +}; -static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, - u16 *new_asid, bool *need_flush) +static struct new_asid choose_new_asid(struct mm_struct *next, u64 next_tlb_gen) { + struct new_asid ns; u16 asid; if (!static_cpu_has(X86_FEATURE_PCID)) { - *new_asid = 0; - *need_flush = true; - return; + ns.asid = 0; + ns.need_flush = 1; + return ns; } /* @@ -235,9 +240,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, u16 global_asid = mm_global_asid(next); if (global_asid) { - *new_asid = global_asid; - *need_flush = false; - return; + ns.asid = global_asid; + ns.need_flush = 0; + return ns; } } @@ -249,22 +254,23 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, next->context.ctx_id) continue; - *new_asid = asid; - *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < - next_tlb_gen); - return; + ns.asid = asid; + ns.need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < next_tlb_gen); + return ns; } /* * We don't currently own an ASID slot on this CPU. * Allocate a slot. */ - *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; - if (*new_asid >= TLB_NR_DYN_ASIDS) { - *new_asid = 0; + ns.asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; + if (ns.asid >= TLB_NR_DYN_ASIDS) { + ns.asid = 0; this_cpu_write(cpu_tlbstate.next_asid, 1); } - *need_flush = true; + ns.need_flush = true; + + return ns; } /* @@ -623,7 +629,7 @@ static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm, { /* Flush L1D if the outgoing task requests it */ if (prev_mm & LAST_USER_MM_L1D_FLUSH) - wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH); /* Check whether the incoming task opted in for L1D flush */ if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH))) @@ -781,9 +787,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); unsigned cpu = smp_processor_id(); unsigned long new_lam; + struct new_asid ns; u64 next_tlb_gen; - bool need_flush; - u16 new_asid; + /* We don't want flush_tlb_func() to run concurrently with us. */ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) @@ -847,14 +853,15 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, * mm_cpumask. The TLB shootdown code can figure out from * cpu_tlbstate_shared.is_lazy whether or not to send an IPI. */ - if (IS_ENABLED(CONFIG_DEBUG_VM) && WARN_ON_ONCE(prev != &init_mm && + if (IS_ENABLED(CONFIG_DEBUG_VM) && + WARN_ON_ONCE(prev != &init_mm && !is_notrack_mm(prev) && !cpumask_test_cpu(cpu, mm_cpumask(next)))) cpumask_set_cpu(cpu, mm_cpumask(next)); /* Check if the current mm is transitioning to a global ASID */ if (mm_needs_global_asid(next, prev_asid)) { next_tlb_gen = atomic64_read(&next->context.tlb_gen); - choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + ns = choose_new_asid(next, next_tlb_gen); goto reload_tlb; } @@ -889,8 +896,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, * TLB contents went out of date while we were in lazy * mode. Fall through to the TLB switching code below. */ - new_asid = prev_asid; - need_flush = true; + ns.asid = prev_asid; + ns.need_flush = true; } else { /* * Apply process to process speculation vulnerability @@ -906,34 +913,26 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); barrier(); - /* - * Leave this CPU in prev's mm_cpumask. Atomic writes to - * mm_cpumask can be expensive under contention. The CPU - * will be removed lazily at TLB flush time. - */ - VM_WARN_ON_ONCE(prev != &init_mm && !cpumask_test_cpu(cpu, - mm_cpumask(prev))); - /* Start receiving IPIs and then read tlb_gen (and LAM below) */ if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next))) cpumask_set_cpu(cpu, mm_cpumask(next)); next_tlb_gen = atomic64_read(&next->context.tlb_gen); - choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + ns = choose_new_asid(next, next_tlb_gen); } reload_tlb: new_lam = mm_lam_cr3_mask(next); - if (need_flush) { - VM_WARN_ON_ONCE(is_global_asid(new_asid)); - this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); - this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - load_new_mm_cr3(next->pgd, new_asid, new_lam, true); + if (ns.need_flush) { + VM_WARN_ON_ONCE(is_global_asid(ns.asid)); + this_cpu_write(cpu_tlbstate.ctxs[ns.asid].ctx_id, next->context.ctx_id); + this_cpu_write(cpu_tlbstate.ctxs[ns.asid].tlb_gen, next_tlb_gen); + load_new_mm_cr3(next->pgd, ns.asid, new_lam, true); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - load_new_mm_cr3(next->pgd, new_asid, new_lam, false); + load_new_mm_cr3(next->pgd, ns.asid, new_lam, false); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); } @@ -942,7 +941,7 @@ reload_tlb: barrier(); this_cpu_write(cpu_tlbstate.loaded_mm, next); - this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); + this_cpu_write(cpu_tlbstate.loaded_mm_asid, ns.asid); cpu_tlbstate_update_lam(new_lam, mm_untag_mask(next)); if (next != prev) { @@ -973,6 +972,77 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) } /* + * Using a temporary mm allows to set temporary mappings that are not accessible + * by other CPUs. Such mappings are needed to perform sensitive memory writes + * that override the kernel memory protections (e.g., W^X), without exposing the + * temporary page-table mappings that are required for these write operations to + * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the + * mapping is torn down. Temporary mms can also be used for EFI runtime service + * calls or similar functionality. + * + * It is illegal to schedule while using a temporary mm -- the context switch + * code is unaware of the temporary mm and does not know how to context switch. + * Use a real (non-temporary) mm in a kernel thread if you need to sleep. + * + * Note: For sensitive memory writes, the temporary mm needs to be used + * exclusively by a single core, and IRQs should be disabled while the + * temporary mm is loaded, thereby preventing interrupt handler bugs from + * overriding the kernel memory protection. + */ +struct mm_struct *use_temporary_mm(struct mm_struct *temp_mm) +{ + struct mm_struct *prev_mm; + + lockdep_assert_preemption_disabled(); + guard(irqsave)(); + + /* + * Make sure not to be in TLB lazy mode, as otherwise we'll end up + * with a stale address space WITHOUT being in lazy mode after + * restoring the previous mm. + */ + if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) + leave_mm(); + + prev_mm = this_cpu_read(cpu_tlbstate.loaded_mm); + switch_mm_irqs_off(NULL, temp_mm, current); + + /* + * If breakpoints are enabled, disable them while the temporary mm is + * used. Userspace might set up watchpoints on addresses that are used + * in the temporary mm, which would lead to wrong signals being sent or + * crashes. + * + * Note that breakpoints are not disabled selectively, which also causes + * kernel breakpoints (e.g., perf's) to be disabled. This might be + * undesirable, but still seems reasonable as the code that runs in the + * temporary mm should be short. + */ + if (hw_breakpoint_active()) + hw_breakpoint_disable(); + + return prev_mm; +} + +void unuse_temporary_mm(struct mm_struct *prev_mm) +{ + lockdep_assert_preemption_disabled(); + guard(irqsave)(); + + /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */ + cpumask_clear_cpu(smp_processor_id(), mm_cpumask(this_cpu_read(cpu_tlbstate.loaded_mm))); + + switch_mm_irqs_off(NULL, prev_mm, current); + + /* + * Restore the breakpoints if they were disabled before the temporary mm + * was loaded. + */ + if (hw_breakpoint_active()) + hw_breakpoint_restore(); +} + +/* * Call this when reinitializing a CPU. It fixes the following potential * problems: * diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index ea4dd5b393aa..15672cb926fc 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -631,7 +631,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, goto out; ret = 1; if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { - text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); + smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL); ret = 0; } out: diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 631512f7ec85..99b1727136c1 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -5,7 +5,7 @@ #include <linux/cpu.h> #include <linux/range.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/pci_x86.h> #include <asm/pci-direct.h> @@ -202,7 +202,7 @@ static int __init early_root_info_init(void) /* need to take out [0, TOM) for RAM*/ address = MSR_K8_TOP_MEM1; - rdmsrl(address, val); + rdmsrq(address, val); end = (val & 0xffffff800000ULL); printk(KERN_INFO "TOM: %016llx aka %lldM\n", end, end>>20); if (end < (1ULL<<32)) @@ -293,12 +293,12 @@ static int __init early_root_info_init(void) /* need to take out [4G, TOM2) for RAM*/ /* SYS_CFG */ address = MSR_AMD64_SYSCFG; - rdmsrl(address, val); + rdmsrq(address, val); /* TOP_MEM2 is enabled? */ if (val & (1<<21)) { /* TOP_MEM2 */ address = MSR_K8_TOP_MEM2; - rdmsrl(address, val); + rdmsrq(address, val); end = (val & 0xffffff800000ULL); printk(KERN_INFO "TOM2: %016llx aka %lldM\n", end, end>>20); subtract_range(range, RANGE_NUM, 1ULL<<32, end); @@ -341,10 +341,10 @@ static int amd_bus_cpu_online(unsigned int cpu) { u64 reg; - rdmsrl(MSR_AMD64_NB_CFG, reg); + rdmsrq(MSR_AMD64_NB_CFG, reg); if (!(reg & ENABLE_CF8_EXT_CFG)) { reg |= ENABLE_CF8_EXT_CFG; - wrmsrl(MSR_AMD64_NB_CFG, reg); + wrmsrq(MSR_AMD64_NB_CFG, reg); } return 0; } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index efefeb82ab61..36336299596b 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -9,7 +9,7 @@ #include <linux/pci.h> #include <linux/suspend.h> #include <linux/vgaarb.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include <asm/hpet.h> #include <asm/pci_x86.h> diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 39255f0eb14d..1f4522325920 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -22,9 +22,10 @@ #include <linux/slab.h> #include <linux/mutex.h> #include <linux/rculist.h> +#include <asm/acpi.h> #include <asm/e820/api.h> +#include <asm/msr.h> #include <asm/pci_x86.h> -#include <asm/acpi.h> /* Indicate if the ECAM resources have been placed into the resource table */ static bool pci_mmcfg_running_state; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index a4b4ebd41b8f..e7e8f77f77f8 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -89,6 +89,7 @@ int __init efi_alloc_page_tables(void) efi_mm.pgd = efi_pgd; mm_init_cpumask(&efi_mm); init_new_context(NULL, &efi_mm); + set_notrack_mm(&efi_mm); return 0; @@ -434,15 +435,12 @@ void __init efi_dump_pagetable(void) */ static void efi_enter_mm(void) { - efi_prev_mm = current->active_mm; - current->active_mm = &efi_mm; - switch_mm(efi_prev_mm, &efi_mm, NULL); + efi_prev_mm = use_temporary_mm(&efi_mm); } static void efi_leave_mm(void) { - current->active_mm = efi_prev_mm; - switch_mm(&efi_mm, efi_prev_mm, NULL); + unuse_temporary_mm(efi_prev_mm); } void arch_efi_call_virt_setup(void) diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c index 57f210cda761..ee77d57bcab7 100644 --- a/arch/x86/platform/olpc/olpc-xo1-rtc.c +++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c @@ -64,9 +64,9 @@ static int __init xo1_rtc_init(void) of_node_put(node); pr_info("olpc-xo1-rtc: Initializing OLPC XO-1 RTC\n"); - rdmsrl(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm); - rdmsrl(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm); - rdmsrl(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century); + rdmsrq(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm); + rdmsrq(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm); + rdmsrq(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century); r = platform_device_register(&xo1_rtc_device); if (r) diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index 63066e7c8517..30751b42d54e 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -325,7 +325,7 @@ static int setup_sci_interrupt(struct platform_device *pdev) dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n"); sci_irq = 3; lo |= 0x00300000; - wrmsrl(0x51400020, lo); + wrmsrq(0x51400020, lo); } /* Select level triggered in PIC */ diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S index cfa18ec7d55f..1d78e5631bb8 100644 --- a/arch/x86/platform/pvh/head.S +++ b/arch/x86/platform/pvh/head.S @@ -87,8 +87,7 @@ SYM_CODE_START(pvh_start_xen) mov %ebx, %esi movl rva(pvh_start_info_sz)(%ebp), %ecx shr $2,%ecx - rep - movsl + rep movsl leal rva(early_stack_end)(%ebp), %esp diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 08e76a5ca155..916441f5e85c 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -27,6 +27,7 @@ #include <asm/mmu_context.h> #include <asm/cpu_device_id.h> #include <asm/microcode.h> +#include <asm/msr.h> #include <asm/fred.h> #ifdef CONFIG_X86_32 @@ -44,7 +45,7 @@ static void msr_save_context(struct saved_context *ctxt) while (msr < end) { if (msr->valid) - rdmsrl(msr->info.msr_no, msr->info.reg.q); + rdmsrq(msr->info.msr_no, msr->info.reg.q); msr++; } } @@ -56,7 +57,7 @@ static void msr_restore_context(struct saved_context *ctxt) while (msr < end) { if (msr->valid) - wrmsrl(msr->info.msr_no, msr->info.reg.q); + wrmsrq(msr->info.msr_no, msr->info.reg.q); msr++; } } @@ -110,12 +111,12 @@ static void __save_processor_state(struct saved_context *ctxt) savesegment(ds, ctxt->ds); savesegment(es, ctxt->es); - rdmsrl(MSR_FS_BASE, ctxt->fs_base); - rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); - rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); + rdmsrq(MSR_FS_BASE, ctxt->fs_base); + rdmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base); + rdmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); mtrr_save_fixed_ranges(NULL); - rdmsrl(MSR_EFER, ctxt->efer); + rdmsrq(MSR_EFER, ctxt->efer); #endif /* @@ -125,7 +126,7 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr2 = read_cr2(); ctxt->cr3 = __read_cr3(); ctxt->cr4 = __read_cr4(); - ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, + ctxt->misc_enable_saved = !rdmsrq_safe(MSR_IA32_MISC_ENABLE, &ctxt->misc_enable); msr_save_context(ctxt); } @@ -198,7 +199,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) struct cpuinfo_x86 *c; if (ctxt->misc_enable_saved) - wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); + wrmsrq(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); /* * control registers */ @@ -208,7 +209,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) __write_cr4(ctxt->cr4); #else /* CONFIG X86_64 */ - wrmsrl(MSR_EFER, ctxt->efer); + wrmsrq(MSR_EFER, ctxt->efer); __write_cr4(ctxt->cr4); #endif write_cr3(ctxt->cr3); @@ -231,7 +232,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) * handlers or in complicated helpers like load_gs_index(). */ #ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); + wrmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base); /* * Reinitialize FRED to ensure the FRED MSRs contain the same values @@ -267,8 +268,8 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) * restoring the selectors clobbers the bases. Keep in mind * that MSR_KERNEL_GS_BASE is horribly misnamed. */ - wrmsrl(MSR_FS_BASE, ctxt->fs_base); - wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); + wrmsrq(MSR_FS_BASE, ctxt->fs_base); + wrmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); #else loadsegment(gs, ctxt->gs); #endif @@ -414,7 +415,7 @@ static int msr_build_context(const u32 *msr_id, const int num) u64 dummy; msr_array[i].info.msr_no = msr_id[j]; - msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); + msr_array[i].valid = !rdmsrq_safe(msr_id[j], &dummy); msr_array[i].info.reg.q = 0; } saved_msrs->num = total_num; diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index 5b81d19cd114..a7c23f2a58c9 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -42,6 +42,7 @@ unsigned long relocated_restore_code __visible; /** * pfn_is_nosave - check if given pfn is in the 'nosave' section + * @pfn: the page frame number to check. */ int pfn_is_nosave(unsigned long pfn) { @@ -86,7 +87,10 @@ static inline u32 compute_e820_crc32(struct e820_table *table) /** * arch_hibernation_header_save - populate the architecture specific part * of a hibernation image header - * @addr: address to save the data at + * @addr: address where architecture specific header data will be saved. + * @max_size: maximum size of architecture specific data in hibernation header. + * + * Return: 0 on success, -EOVERFLOW if max_size is insufficient. */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 5606a15cf9a1..fb910d9f8471 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -69,8 +69,7 @@ copy_loop: movl pbe_orig_address(%edx), %edi movl $(PAGE_SIZE >> 2), %ecx - rep - movsl + rep movsl movl pbe_next(%edx), %edx jmp copy_loop diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 66f066b8feda..c73be0a02a6c 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -138,8 +138,7 @@ SYM_FUNC_START(core_restore_code) movq pbe_address(%rdx), %rsi movq pbe_orig_address(%rdx), %rdi movq $(PAGE_SIZE >> 3), %rcx - rep - movsq + rep movsq /* progress to the next pbe */ movq pbe_next(%rdx), %rdx diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index f9bc444a3064..ed5c63c0b4e5 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -9,6 +9,7 @@ #include <asm/realmode.h> #include <asm/tlbflush.h> #include <asm/crash.h> +#include <asm/msr.h> #include <asm/sev.h> struct real_mode_header *real_mode_header; @@ -145,7 +146,7 @@ static void __init setup_real_mode(void) * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR * so we need to mask it out. */ - rdmsrl(MSR_EFER, efer); + rdmsrq(MSR_EFER, efer); trampoline_header->efer = efer & ~EFER_LMA; trampoline_header->start = (u64) secondary_startup_64; diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 5770c8097f32..2c19d7fc8a85 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -64,6 +64,8 @@ BEGIN { modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])" force64_expr = "\\([df]64\\)" + invalid64_expr = "\\(i64\\)" + only64_expr = "\\(o64\\)" rex_expr = "^((REX(\\.[XRWB]+)+)|(REX$))" rex2_expr = "\\(REX2\\)" no_rex2_expr = "\\(!REX2\\)" @@ -319,6 +321,11 @@ function convert_operands(count,opnd, i,j,imm,mod) if (match(ext, force64_expr)) flags = add_flags(flags, "INAT_FORCE64") + # check invalid in 64-bit (and no only64) + if (match(ext, invalid64_expr) && + !match($0, only64_expr)) + flags = add_flags(flags, "INAT_INV64") + # check REX2 not allowed if (match(ext, no_rex2_expr)) flags = add_flags(flags, "INAT_NO_REX2") diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index fc473ca12c44..942372e69b4d 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -27,9 +27,10 @@ #include <asm/smp.h> #include <asm/cpu.h> #include <asm/apic.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/cmdline.h> #include <asm/iommu.h> +#include <asm/msr.h> /* * The RMP entry information as returned by the RMPREAD instruction. @@ -136,11 +137,11 @@ static int __mfd_enable(unsigned int cpu) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) return 0; - rdmsrl(MSR_AMD64_SYSCFG, val); + rdmsrq(MSR_AMD64_SYSCFG, val); val |= MSR_AMD64_SYSCFG_MFDM; - wrmsrl(MSR_AMD64_SYSCFG, val); + wrmsrq(MSR_AMD64_SYSCFG, val); return 0; } @@ -157,12 +158,12 @@ static int __snp_enable(unsigned int cpu) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) return 0; - rdmsrl(MSR_AMD64_SYSCFG, val); + rdmsrq(MSR_AMD64_SYSCFG, val); val |= MSR_AMD64_SYSCFG_SNP_EN; val |= MSR_AMD64_SYSCFG_SNP_VMPL_EN; - wrmsrl(MSR_AMD64_SYSCFG, val); + wrmsrq(MSR_AMD64_SYSCFG, val); return 0; } @@ -522,7 +523,7 @@ int __init snp_rmptable_init(void) * Check if SEV-SNP is already enabled, this can happen in case of * kexec boot. */ - rdmsrl(MSR_AMD64_SYSCFG, val); + rdmsrq(MSR_AMD64_SYSCFG, val); if (val & MSR_AMD64_SYSCFG_SNP_EN) goto skip_enable; @@ -576,8 +577,8 @@ static bool probe_contiguous_rmptable_info(void) { u64 rmp_sz, rmp_base, rmp_end; - rdmsrl(MSR_AMD64_RMP_BASE, rmp_base); - rdmsrl(MSR_AMD64_RMP_END, rmp_end); + rdmsrq(MSR_AMD64_RMP_BASE, rmp_base); + rdmsrq(MSR_AMD64_RMP_END, rmp_end); if (!(rmp_base & RMP_ADDR_MASK) || !(rmp_end & RMP_ADDR_MASK)) { pr_err("Memory for the RMP table has not been reserved by BIOS\n"); @@ -610,13 +611,13 @@ static bool probe_segmented_rmptable_info(void) unsigned int eax, ebx, segment_shift, segment_shift_min, segment_shift_max; u64 rmp_base, rmp_end; - rdmsrl(MSR_AMD64_RMP_BASE, rmp_base); + rdmsrq(MSR_AMD64_RMP_BASE, rmp_base); if (!(rmp_base & RMP_ADDR_MASK)) { pr_err("Memory for the RMP table has not been reserved by BIOS\n"); return false; } - rdmsrl(MSR_AMD64_RMP_END, rmp_end); + rdmsrq(MSR_AMD64_RMP_END, rmp_end); WARN_ONCE(rmp_end & RMP_ADDR_MASK, "Segmented RMP enabled but RMP_END MSR is non-zero\n"); @@ -652,7 +653,7 @@ static bool probe_segmented_rmptable_info(void) bool snp_probe_rmptable_info(void) { if (cpu_feature_enabled(X86_FEATURE_SEGMENTED_RMP)) - rdmsrl(MSR_AMD64_RMP_CFG, rmp_cfg); + rdmsrq(MSR_AMD64_RMP_CFG, rmp_cfg); if (rmp_cfg & MSR_AMD64_SEG_RMP_ENABLED) return probe_segmented_rmptable_info(); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 846b5737d320..26bbaf4b7330 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -49,7 +49,7 @@ #include <xen/hvc-console.h> #include <xen/acpi.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/paravirt.h> #include <asm/apic.h> #include <asm/page.h> @@ -61,6 +61,7 @@ #include <asm/processor.h> #include <asm/proto.h> #include <asm/msr-index.h> +#include <asm/msr.h> #include <asm/traps.h> #include <asm/setup.h> #include <asm/desc.h> @@ -1086,15 +1087,15 @@ static void xen_write_cr4(unsigned long cr4) native_write_cr4(cr4); } -static u64 xen_do_read_msr(unsigned int msr, int *err) +static u64 xen_do_read_msr(u32 msr, int *err) { u64 val = 0; /* Avoid uninitialized value for safe variant. */ - if (pmu_msr_read(msr, &val, err)) + if (pmu_msr_chk_emulated(msr, &val, true)) return val; if (err) - val = native_read_msr_safe(msr, err); + *err = native_read_msr_safe(msr, &val); else val = native_read_msr(msr); @@ -1110,17 +1111,9 @@ static u64 xen_do_read_msr(unsigned int msr, int *err) return val; } -static void set_seg(unsigned int which, unsigned int low, unsigned int high, - int *err) +static void set_seg(u32 which, u64 base) { - u64 base = ((u64)high << 32) | low; - - if (HYPERVISOR_set_segment_base(which, base) == 0) - return; - - if (err) - *err = -EIO; - else + if (HYPERVISOR_set_segment_base(which, base)) WARN(1, "Xen set_segment_base(%u, %llx) failed\n", which, base); } @@ -1129,20 +1122,19 @@ static void set_seg(unsigned int which, unsigned int low, unsigned int high, * With err == NULL write_msr() semantics are selected. * Supplying an err pointer requires err to be pre-initialized with 0. */ -static void xen_do_write_msr(unsigned int msr, unsigned int low, - unsigned int high, int *err) +static void xen_do_write_msr(u32 msr, u64 val, int *err) { switch (msr) { case MSR_FS_BASE: - set_seg(SEGBASE_FS, low, high, err); + set_seg(SEGBASE_FS, val); break; case MSR_KERNEL_GS_BASE: - set_seg(SEGBASE_GS_USER, low, high, err); + set_seg(SEGBASE_GS_USER, val); break; case MSR_GS_BASE: - set_seg(SEGBASE_GS_KERNEL, low, high, err); + set_seg(SEGBASE_GS_KERNEL, val); break; case MSR_STAR: @@ -1158,42 +1150,45 @@ static void xen_do_write_msr(unsigned int msr, unsigned int low, break; default: - if (!pmu_msr_write(msr, low, high, err)) { - if (err) - *err = native_write_msr_safe(msr, low, high); - else - native_write_msr(msr, low, high); - } + if (pmu_msr_chk_emulated(msr, &val, false)) + return; + + if (err) + *err = native_write_msr_safe(msr, val); + else + native_write_msr(msr, val); } } -static u64 xen_read_msr_safe(unsigned int msr, int *err) +static int xen_read_msr_safe(u32 msr, u64 *val) { - return xen_do_read_msr(msr, err); + int err = 0; + + *val = xen_do_read_msr(msr, &err); + return err; } -static int xen_write_msr_safe(unsigned int msr, unsigned int low, - unsigned int high) +static int xen_write_msr_safe(u32 msr, u64 val) { int err = 0; - xen_do_write_msr(msr, low, high, &err); + xen_do_write_msr(msr, val, &err); return err; } -static u64 xen_read_msr(unsigned int msr) +static u64 xen_read_msr(u32 msr) { - int err; + int err = 0; return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL); } -static void xen_write_msr(unsigned int msr, unsigned low, unsigned high) +static void xen_write_msr(u32 msr, u64 val) { int err; - xen_do_write_msr(msr, low, high, xen_msr_safe ? &err : NULL); + xen_do_write_msr(msr, val, xen_msr_safe ? &err : NULL); } /* This is called once we have the cpu_possible_mask */ diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 38971c6dcd4b..2a4a8deaf612 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -578,7 +578,6 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) xen_mc_issue(XEN_LAZY_MMU); } -#if CONFIG_PGTABLE_LEVELS >= 5 __visible p4dval_t xen_p4d_val(p4d_t p4d) { return pte_mfn_to_pfn(p4d.p4d); @@ -592,7 +591,6 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d) return native_make_p4d(p4d); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d); -#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, void (*func)(struct mm_struct *mm, struct page *, @@ -2222,10 +2220,8 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = { .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, -#if CONFIG_PGTABLE_LEVELS >= 5 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val), .make_p4d = PV_CALLEE_SAVE(xen_make_p4d), -#endif .enter_mmap = xen_enter_mmap, .exit_mmap = xen_exit_mmap, diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index f06987b0efc3..8f89ce0b67e3 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -2,6 +2,7 @@ #include <linux/types.h> #include <linux/interrupt.h> +#include <asm/msr.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/page.h> @@ -128,7 +129,7 @@ static inline uint32_t get_fam15h_addr(u32 addr) return addr; } -static inline bool is_amd_pmu_msr(unsigned int msr) +static bool is_amd_pmu_msr(u32 msr) { if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) @@ -194,8 +195,7 @@ static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index) } } -static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, - int index, bool is_read) +static bool xen_intel_pmu_emulate(u32 msr, u64 *val, int type, int index, bool is_read) { uint64_t *reg = NULL; struct xen_pmu_intel_ctxt *ctxt; @@ -257,7 +257,7 @@ static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, return false; } -static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) +static bool xen_amd_pmu_emulate(u32 msr, u64 *val, bool is_read) { uint64_t *reg = NULL; int i, off = 0; @@ -298,55 +298,20 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) return false; } -static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, - bool *emul) +bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read) { int type, index = 0; if (is_amd_pmu_msr(msr)) - *emul = xen_amd_pmu_emulate(msr, val, is_read); - else if (is_intel_pmu_msr(msr, &type, &index)) - *emul = xen_intel_pmu_emulate(msr, val, type, index, is_read); - else - return false; - - return true; -} - -bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) -{ - bool emulated; + return xen_amd_pmu_emulate(msr, val, is_read); - if (!pmu_msr_chk_emulated(msr, val, true, &emulated)) - return false; + if (is_intel_pmu_msr(msr, &type, &index)) + return xen_intel_pmu_emulate(msr, val, type, index, is_read); - if (!emulated) { - *val = err ? native_read_msr_safe(msr, err) - : native_read_msr(msr); - } - - return true; -} - -bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) -{ - uint64_t val = ((uint64_t)high << 32) | low; - bool emulated; - - if (!pmu_msr_chk_emulated(msr, &val, false, &emulated)) - return false; - - if (!emulated) { - if (err) - *err = native_write_msr_safe(msr, low, high); - else - native_write_msr(msr, low, high); - } - - return true; + return false; } -static unsigned long long xen_amd_read_pmc(int counter) +static u64 xen_amd_read_pmc(int counter) { struct xen_pmu_amd_ctxt *ctxt; uint64_t *counter_regs; @@ -354,11 +319,12 @@ static unsigned long long xen_amd_read_pmc(int counter) uint8_t xenpmu_flags = get_xenpmu_flags(); if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { - uint32_t msr; - int err; + u32 msr; + u64 val; msr = amd_counters_base + (counter * amd_msr_step); - return native_read_msr_safe(msr, &err); + native_read_msr_safe(msr, &val); + return val; } ctxt = &xenpmu_data->pmu.c.amd; @@ -366,7 +332,7 @@ static unsigned long long xen_amd_read_pmc(int counter) return counter_regs[counter]; } -static unsigned long long xen_intel_read_pmc(int counter) +static u64 xen_intel_read_pmc(int counter) { struct xen_pmu_intel_ctxt *ctxt; uint64_t *fixed_counters; @@ -375,15 +341,16 @@ static unsigned long long xen_intel_read_pmc(int counter) uint8_t xenpmu_flags = get_xenpmu_flags(); if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { - uint32_t msr; - int err; + u32 msr; + u64 val; if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff); else msr = MSR_IA32_PERFCTR0 + counter; - return native_read_msr_safe(msr, &err); + native_read_msr_safe(msr, &val); + return val; } ctxt = &xenpmu_data->pmu.c.intel; @@ -396,7 +363,7 @@ static unsigned long long xen_intel_read_pmc(int counter) return arch_cntr_pair[counter].counter; } -unsigned long long xen_read_pmc(int counter) +u64 xen_read_pmc(int counter) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return xen_amd_read_pmc(counter); diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 77a6ea1c60e4..ba2f17e64321 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -13,6 +13,7 @@ #include <asm/xen/hypercall.h> #include <asm/xen/page.h> #include <asm/fixmap.h> +#include <asm/msr.h> #include "xen-ops.h" @@ -39,7 +40,7 @@ void xen_arch_post_suspend(int cancelled) static void xen_vcpu_notify_restore(void *data) { if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) - wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); + wrmsrq(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); /* Boot processor notified via generic timekeeping_resume() */ if (smp_processor_id() == 0) @@ -55,9 +56,9 @@ static void xen_vcpu_notify_suspend(void *data) tick_suspend_local(); if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { - rdmsrl(MSR_IA32_SPEC_CTRL, tmp); + rdmsrq(MSR_IA32_SPEC_CTRL, tmp); this_cpu_write(spec_ctrl, tmp); - wrmsrl(MSR_IA32_SPEC_CTRL, 0); + wrmsrq(MSR_IA32_SPEC_CTRL, 0); } } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 25e318ef27d6..090349baec09 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -271,10 +271,9 @@ void xen_pmu_finish(int cpu); static inline void xen_pmu_init(int cpu) {} static inline void xen_pmu_finish(int cpu) {} #endif -bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); -bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); +bool pmu_msr_chk_emulated(u32 msr, u64 *val, bool is_read); int pmu_apic_update(uint32_t reg); -unsigned long long xen_read_pmc(int counter); +u64 xen_read_pmc(int counter); #ifdef CONFIG_SMP diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig index 91c4c4cae8a7..49f50d1bd724 100644 --- a/arch/xtensa/configs/cadence_csp_defconfig +++ b/arch/xtensa/configs/cadence_csp_defconfig @@ -1,6 +1,5 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_USELIB=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IRQ_TIME_ACCOUNTING=y diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index 183618090d05..223f1d452310 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -388,8 +388,7 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) struct pt_regs *regs = get_irq_regs(); perf_sample_data_init(&data, 0, last_period); - if (perf_event_overflow(event, &data, regs)) - xtensa_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } rc = IRQ_HANDLED; diff --git a/block/Kconfig b/block/Kconfig index df8973bc0539..15027963472d 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -211,14 +211,6 @@ config BLK_INLINE_ENCRYPTION_FALLBACK source "block/partitions/Kconfig" -config BLK_MQ_PCI - def_bool PCI - -config BLK_MQ_VIRTIO - bool - depends on VIRTIO - default y - config BLK_PM def_bool PM diff --git a/block/Makefile b/block/Makefile index 3a941dc0d27f..c65f4da93702 100644 --- a/block/Makefile +++ b/block/Makefile @@ -5,13 +5,12 @@ obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ - blk-merge.o blk-timeout.o \ - blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ + blk-merge.o blk-timeout.o blk-lib.o blk-mq.o \ + blk-mq-tag.o blk-mq-dma.o blk-stat.o \ blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \ disk-events.o blk-ia-ranges.o early-lookup.o -obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o diff --git a/block/bdev.c b/block/bdev.c index 889ec6e002d7..b77ddd12dc06 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -1335,7 +1335,8 @@ void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask) generic_fill_statx_atomic_writes(stat, queue_atomic_write_unit_min_bytes(bd_queue), - queue_atomic_write_unit_max_bytes(bd_queue)); + queue_atomic_write_unit_max_bytes(bd_queue), + 0); } stat->blksize = bdev_io_min(bdev); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index abd80dc13562..0cb1e9873aab 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -7210,8 +7210,8 @@ static void bfq_exit_queue(struct elevator_queue *e) #endif blk_stat_disable_accounting(bfqd->queue); - clear_bit(ELEVATOR_FLAG_DISABLE_WBT, &e->flags); - wbt_enable_default(bfqd->queue->disk); + blk_queue_flag_clear(QUEUE_FLAG_DISABLE_WBT_DEF, bfqd->queue); + set_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, &e->flags); kfree(bfqd); } @@ -7397,7 +7397,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) /* We dispatch from request queue wide instead of hw queue */ blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); - set_bit(ELEVATOR_FLAG_DISABLE_WBT, &eq->flags); + blk_queue_flag_set(QUEUE_FLAG_DISABLE_WBT_DEF, q); wbt_disable_default(q->disk); blk_stat_enable_accounting(q); diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 43ef6bd06c85..cb94e9be26dc 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -127,10 +127,8 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, if (bip->bip_vcnt > 0) { struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1]; - bool same_page = false; - if (bvec_try_merge_hw_page(q, bv, page, len, offset, - &same_page)) { + if (bvec_try_merge_hw_page(q, bv, page, len, offset)) { bip->bip_iter.bi_size += len; return len; } diff --git a/block/bio.c b/block/bio.c index 4be592d37fb6..3c0a558c90f5 100644 --- a/block/bio.c +++ b/block/bio.c @@ -251,6 +251,7 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, bio->bi_flags = 0; bio->bi_ioprio = 0; bio->bi_write_hint = 0; + bio->bi_write_stream = 0; bio->bi_status = 0; bio->bi_iter.bi_sector = 0; bio->bi_iter.bi_size = 0; @@ -827,6 +828,7 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) bio_set_flag(bio, BIO_CLONED); bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; + bio->bi_write_stream = bio_src->bi_write_stream; bio->bi_iter = bio_src->bi_iter; if (bio->bi_bdev) { @@ -918,7 +920,7 @@ static inline bool bio_full(struct bio *bio, unsigned len) } static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, - unsigned int len, unsigned int off, bool *same_page) + unsigned int len, unsigned int off) { size_t bv_end = bv->bv_offset + bv->bv_len; phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; @@ -931,9 +933,7 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) return false; - *same_page = ((vec_end_addr & PAGE_MASK) == ((page_addr + off) & - PAGE_MASK)); - if (!*same_page) { + if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) { if (IS_ENABLED(CONFIG_KMSAN)) return false; if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) @@ -953,8 +953,7 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, * helpers to split. Hopefully this will go away soon. */ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, - struct page *page, unsigned len, unsigned offset, - bool *same_page) + struct page *page, unsigned len, unsigned offset) { unsigned long mask = queue_segment_boundary(q); phys_addr_t addr1 = bvec_phys(bv); @@ -964,7 +963,7 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, return false; if (len > queue_max_segment_size(q) - bv->bv_len) return false; - return bvec_try_merge_page(bv, page, len, offset, same_page); + return bvec_try_merge_page(bv, page, len, offset); } /** @@ -990,6 +989,22 @@ void __bio_add_page(struct bio *bio, struct page *page, EXPORT_SYMBOL_GPL(__bio_add_page); /** + * bio_add_virt_nofail - add data in the direct kernel mapping to a bio + * @bio: destination bio + * @vaddr: data to add + * @len: length of the data to add, may cross pages + * + * Add the data at @vaddr to @bio. The caller must have ensure a segment + * is available for the added data. No merging into an existing segment + * will be performed. + */ +void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len) +{ + __bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr)); +} +EXPORT_SYMBOL_GPL(bio_add_virt_nofail); + +/** * bio_add_page - attempt to add page(s) to bio * @bio: destination bio * @page: start page to add @@ -1002,8 +1017,6 @@ EXPORT_SYMBOL_GPL(__bio_add_page); int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - bool same_page = false; - if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return 0; if (bio->bi_iter.bi_size > UINT_MAX - len) @@ -1011,7 +1024,7 @@ int bio_add_page(struct bio *bio, struct page *page, if (bio->bi_vcnt > 0 && bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], - page, len, offset, &same_page)) { + page, len, offset)) { bio->bi_iter.bi_size += len; return len; } @@ -1058,6 +1071,61 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, } EXPORT_SYMBOL(bio_add_folio); +/** + * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio + * @bio: destination bio + * @vaddr: vmalloc address to add + * @len: total length in bytes of the data to add + * + * Add data starting at @vaddr to @bio and return how many bytes were added. + * This may be less than the amount originally asked. Returns 0 if no data + * could be added to @bio. + * + * This helper calls flush_kernel_vmap_range() for the range added. For reads + * the caller still needs to manually call invalidate_kernel_vmap_range() in + * the completion handler. + */ +unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len) +{ + unsigned int offset = offset_in_page(vaddr); + + len = min(len, PAGE_SIZE - offset); + if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len) + return 0; + if (op_is_write(bio_op(bio))) + flush_kernel_vmap_range(vaddr, len); + return len; +} +EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk); + +/** + * bio_add_vmalloc - add a vmalloc region to a bio + * @bio: destination bio + * @vaddr: vmalloc address to add + * @len: total length in bytes of the data to add + * + * Add data starting at @vaddr to @bio. Return %true on success or %false if + * @bio does not have enough space for the payload. + * + * This helper calls flush_kernel_vmap_range() for the range added. For reads + * the caller still needs to manually call invalidate_kernel_vmap_range() in + * the completion handler. + */ +bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len) +{ + do { + unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len); + + if (!added) + return false; + vaddr += added; + len -= added; + } while (len); + + return true; +} +EXPORT_SYMBOL_GPL(bio_add_vmalloc); + void __bio_release_pages(struct bio *bio, bool mark_dirty) { struct folio_iter fi; @@ -1088,27 +1156,6 @@ void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter) bio_set_flag(bio, BIO_CLONED); } -static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len, - size_t offset) -{ - bool same_page = false; - - if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) - return -EIO; - - if (bio->bi_vcnt > 0 && - bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], - folio_page(folio, 0), len, offset, - &same_page)) { - bio->bi_iter.bi_size += len; - if (same_page && bio_flagged(bio, BIO_PAGE_PINNED)) - unpin_user_folio(folio, 1); - return 0; - } - bio_add_folio_nofail(bio, folio, len, offset); - return 0; -} - static unsigned int get_contig_folio_len(unsigned int *num_pages, struct page **pages, unsigned int i, struct folio *folio, size_t left, @@ -1203,6 +1250,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) for (left = size, i = 0; left > 0; left -= len, i += num_pages) { struct page *page = pages[i]; struct folio *folio = page_folio(page); + unsigned int old_vcnt = bio->bi_vcnt; folio_offset = ((size_t)folio_page_idx(folio, page) << PAGE_SHIFT) + offset; @@ -1215,7 +1263,23 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) len = get_contig_folio_len(&num_pages, pages, i, folio, left, offset); - bio_iov_add_folio(bio, folio, len, folio_offset); + if (!bio_add_folio(bio, folio, len, folio_offset)) { + WARN_ON_ONCE(1); + ret = -EINVAL; + goto out; + } + + if (bio_flagged(bio, BIO_PAGE_PINNED)) { + /* + * We're adding another fragment of a page that already + * was part of the last segment. Undo our pin as the + * page was pinned when an earlier fragment of it was + * added to the bio and __bio_release_pages expects a + * single pin per page. + */ + if (offset && bio->bi_vcnt == old_vcnt) + unpin_user_folio(folio, 1); + } offset = 0; } @@ -1301,6 +1365,36 @@ int submit_bio_wait(struct bio *bio) } EXPORT_SYMBOL(submit_bio_wait); +/** + * bdev_rw_virt - synchronously read into / write from kernel mapping + * @bdev: block device to access + * @sector: sector to access + * @data: data to read/write + * @len: length in byte to read/write + * @op: operation (e.g. REQ_OP_READ/REQ_OP_WRITE) + * + * Performs synchronous I/O to @bdev for @data/@len. @data must be in + * the kernel direct mapping and not a vmalloc address. + */ +int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data, + size_t len, enum req_op op) +{ + struct bio_vec bv; + struct bio bio; + int error; + + if (WARN_ON_ONCE(is_vmalloc_addr(data))) + return -EIO; + + bio_init(&bio, bdev, &bv, 1, op); + bio.bi_iter.bi_sector = sector; + bio_add_virt_nofail(&bio, data, len); + error = submit_bio_wait(&bio); + bio_uninit(&bio); + return error; +} +EXPORT_SYMBOL_GPL(bdev_rw_virt); + static void bio_wait_end_io(struct bio *bio) { complete(bio->bi_private); diff --git a/block/blk-core.c b/block/blk-core.c index e8cc270a453f..b862c66018f2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1018,7 +1018,7 @@ again: stamp = READ_ONCE(part->bd_stamp); if (unlikely(time_after(now, stamp)) && likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) && - (end || part_in_flight(part))) + (end || bdev_count_inflight(part))) __part_stat_add(part, io_ticks, now - stamp); if (bdev_is_partition(part)) { diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index f154be0b575a..005c9157ffb3 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -173,6 +173,7 @@ static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) bio_set_flag(bio, BIO_REMAPPED); bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; + bio->bi_write_stream = bio_src->bi_write_stream; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; diff --git a/block/blk-map.c b/block/blk-map.c index d2f22744b3d1..23e5d5ebe59e 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -317,64 +317,26 @@ static void bio_map_kern_endio(struct bio *bio) kfree(bio); } -/** - * bio_map_kern - map kernel address into bio - * @q: the struct request_queue for the bio - * @data: pointer to buffer to map - * @len: length in bytes - * @gfp_mask: allocation flags for bio allocation - * - * Map the kernel address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -static struct bio *bio_map_kern(struct request_queue *q, void *data, - unsigned int len, gfp_t gfp_mask) +static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op, + gfp_t gfp_mask) { - unsigned long kaddr = (unsigned long)data; - unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long start = kaddr >> PAGE_SHIFT; - const int nr_pages = end - start; - bool is_vmalloc = is_vmalloc_addr(data); - struct page *page; - int offset, i; + unsigned int nr_vecs = bio_add_max_vecs(data, len); struct bio *bio; - bio = bio_kmalloc(nr_pages, gfp_mask); + bio = bio_kmalloc(nr_vecs, gfp_mask); if (!bio) return ERR_PTR(-ENOMEM); - bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); - - if (is_vmalloc) { - flush_kernel_vmap_range(data, len); + bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, op); + if (is_vmalloc_addr(data)) { bio->bi_private = data; - } - - offset = offset_in_page(kaddr); - for (i = 0; i < nr_pages; i++) { - unsigned int bytes = PAGE_SIZE - offset; - - if (len <= 0) - break; - - if (bytes > len) - bytes = len; - - if (!is_vmalloc) - page = virt_to_page(data); - else - page = vmalloc_to_page(data); - if (bio_add_page(bio, page, bytes, offset) < bytes) { - /* we don't support partial mappings */ + if (!bio_add_vmalloc(bio, data, len)) { bio_uninit(bio); kfree(bio); return ERR_PTR(-EINVAL); } - - data += bytes; - len -= bytes; - offset = 0; + } else { + bio_add_virt_nofail(bio, data, len); } - bio->bi_end_io = bio_map_kern_endio; return bio; } @@ -402,17 +364,16 @@ static void bio_copy_kern_endio_read(struct bio *bio) /** * bio_copy_kern - copy kernel address into bio - * @q: the struct request_queue for the bio * @data: pointer to buffer to copy * @len: length in bytes + * @op: bio/request operation * @gfp_mask: allocation flags for bio and page allocation - * @reading: data direction is READ * * copy the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -static struct bio *bio_copy_kern(struct request_queue *q, void *data, - unsigned int len, gfp_t gfp_mask, int reading) +static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op, + gfp_t gfp_mask) { unsigned long kaddr = (unsigned long)data; unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -431,7 +392,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, bio = bio_kmalloc(nr_pages, gfp_mask); if (!bio) return ERR_PTR(-ENOMEM); - bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); + bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, op); while (len) { struct page *page; @@ -444,7 +405,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, if (!page) goto cleanup; - if (!reading) + if (op_is_write(op)) memcpy(page_address(page), p, bytes); if (bio_add_page(bio, page, bytes, 0) < bytes) @@ -454,11 +415,11 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, p += bytes; } - if (reading) { + if (op_is_write(op)) { + bio->bi_end_io = bio_copy_kern_endio; + } else { bio->bi_end_io = bio_copy_kern_endio_read; bio->bi_private = data; - } else { - bio->bi_end_io = bio_copy_kern_endio; } return bio; @@ -556,8 +517,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, if (map_data) copy = true; - else if (blk_queue_may_bounce(q)) - copy = true; else if (iov_iter_alignment(iter) & align) copy = true; else if (iov_iter_is_bvec(iter)) @@ -689,7 +648,6 @@ EXPORT_SYMBOL(blk_rq_unmap_user); /** * blk_rq_map_kern - map kernel data to a request, for passthrough requests - * @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data @@ -700,31 +658,26 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * buffer is used. Can be called multiple times to append multiple * buffers. */ -int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, - unsigned int len, gfp_t gfp_mask) +int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len, + gfp_t gfp_mask) { - int reading = rq_data_dir(rq) == READ; unsigned long addr = (unsigned long) kbuf; struct bio *bio; int ret; - if (len > (queue_max_hw_sectors(q) << 9)) + if (len > (queue_max_hw_sectors(rq->q) << SECTOR_SHIFT)) return -EINVAL; if (!len || !kbuf) return -EINVAL; - if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || - blk_queue_may_bounce(q)) - bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); + if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf)) + bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask); else - bio = bio_map_kern(q, kbuf, len, gfp_mask); + bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_opf &= ~REQ_OP_MASK; - bio->bi_opf |= req_op(rq); - ret = blk_rq_append_bio(rq, bio); if (unlikely(ret)) { bio_uninit(bio); diff --git a/block/blk-merge.c b/block/blk-merge.c index fdd4efb54c6c..3af1d284add5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -7,7 +7,6 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> -#include <linux/scatterlist.h> #include <linux/part_stat.h> #include <linux/blk-cgroup.h> @@ -226,27 +225,6 @@ static inline unsigned get_max_io_size(struct bio *bio, } /** - * get_max_segment_size() - maximum number of bytes to add as a single segment - * @lim: Request queue limits. - * @paddr: address of the range to add - * @len: maximum length available to add at @paddr - * - * Returns the maximum number of bytes of the range starting at @paddr that can - * be added to a single segment. - */ -static inline unsigned get_max_segment_size(const struct queue_limits *lim, - phys_addr_t paddr, unsigned int len) -{ - /* - * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 - * after having calculated the minimum. - */ - return min_t(unsigned long, len, - min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr), - (unsigned long)lim->max_segment_size - 1) + 1); -} - -/** * bvec_split_segs - verify whether or not a bvec should be split in the middle * @lim: [in] queue limits to split based on * @bv: [in] bvec to examine @@ -473,117 +451,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq) return nr_phys_segs; } -struct phys_vec { - phys_addr_t paddr; - u32 len; -}; - -static bool blk_map_iter_next(struct request *req, - struct req_iterator *iter, struct phys_vec *vec) -{ - unsigned int max_size; - struct bio_vec bv; - - if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { - if (!iter->bio) - return false; - vec->paddr = bvec_phys(&req->special_vec); - vec->len = req->special_vec.bv_len; - iter->bio = NULL; - return true; - } - - if (!iter->iter.bi_size) - return false; - - bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); - vec->paddr = bvec_phys(&bv); - max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX); - bv.bv_len = min(bv.bv_len, max_size); - bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); - - /* - * If we are entirely done with this bi_io_vec entry, check if the next - * one could be merged into it. This typically happens when moving to - * the next bio, but some callers also don't pack bvecs tight. - */ - while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) { - struct bio_vec next; - - if (!iter->iter.bi_size) { - if (!iter->bio->bi_next) - break; - iter->bio = iter->bio->bi_next; - iter->iter = iter->bio->bi_iter; - } - - next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); - if (bv.bv_len + next.bv_len > max_size || - !biovec_phys_mergeable(req->q, &bv, &next)) - break; - - bv.bv_len += next.bv_len; - bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); - } - - vec->len = bv.bv_len; - return true; -} - -static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, - struct scatterlist *sglist) -{ - if (!*sg) - return sglist; - - /* - * If the driver previously mapped a shorter list, we could see a - * termination bit prematurely unless it fully inits the sg table - * on each mapping. We KNOW that there must be more entries here - * or the driver would be buggy, so force clear the termination bit - * to avoid doing a full sg_init_table() in drivers for each command. - */ - sg_unmark_end(*sg); - return sg_next(*sg); -} - -/* - * Map a request to scatterlist, return number of sg entries setup. Caller - * must make sure sg can hold rq->nr_phys_segments entries. - */ -int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, - struct scatterlist **last_sg) -{ - struct req_iterator iter = { - .bio = rq->bio, - }; - struct phys_vec vec; - int nsegs = 0; - - /* the internal flush request may not have bio attached */ - if (iter.bio) - iter.iter = iter.bio->bi_iter; - - while (blk_map_iter_next(rq, &iter, &vec)) { - *last_sg = blk_next_sg(last_sg, sglist); - sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len, - offset_in_page(vec.paddr)); - nsegs++; - } - - if (*last_sg) - sg_mark_end(*last_sg); - - /* - * Something must have been wrong if the figured number of - * segment is bigger than number of req's physical segments - */ - WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); - - return nsegs; -} -EXPORT_SYMBOL(__blk_rq_map_sg); - static inline unsigned int blk_rq_get_max_sectors(struct request *rq, sector_t offset) { @@ -832,6 +699,8 @@ static struct request *attempt_merge(struct request_queue *q, if (req->bio->bi_write_hint != next->bio->bi_write_hint) return NULL; + if (req->bio->bi_write_stream != next->bio->bi_write_stream) + return NULL; if (req->bio->bi_ioprio != next->bio->bi_ioprio) return NULL; if (!blk_atomic_write_mergeable_rqs(req, next)) @@ -953,6 +822,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) return false; if (rq->bio->bi_write_hint != bio->bi_write_hint) return false; + if (rq->bio->bi_write_stream != bio->bi_write_stream) + return false; if (rq->bio->bi_ioprio != bio->bi_ioprio) return false; if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3421b5521fe2..29b3540dd180 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -93,6 +93,8 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(HCTX_ACTIVE), QUEUE_FLAG_NAME(SQ_SCHED), + QUEUE_FLAG_NAME(DISABLE_WBT_DEF), + QUEUE_FLAG_NAME(NO_ELV_SWITCH), }; #undef QUEUE_FLAG_NAME @@ -624,20 +626,9 @@ void blk_mq_debugfs_register(struct request_queue *q) debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); - /* - * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir - * didn't exist yet (because we don't know what to name the directory - * until the queue is registered to a gendisk). - */ - if (q->elevator && !q->sched_debugfs_dir) - blk_mq_debugfs_register_sched(q); - - /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->debugfs_dir) blk_mq_debugfs_register_hctx(q, hctx); - if (q->elevator && !hctx->sched_debugfs_dir) - blk_mq_debugfs_register_sched_hctx(q, hctx); } if (q->rq_qos) { diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c new file mode 100644 index 000000000000..82bae475dfa4 --- /dev/null +++ b/block/blk-mq-dma.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Christoph Hellwig + */ +#include "blk.h" + +struct phys_vec { + phys_addr_t paddr; + u32 len; +}; + +static bool blk_map_iter_next(struct request *req, struct req_iterator *iter, + struct phys_vec *vec) +{ + unsigned int max_size; + struct bio_vec bv; + + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + if (!iter->bio) + return false; + vec->paddr = bvec_phys(&req->special_vec); + vec->len = req->special_vec.bv_len; + iter->bio = NULL; + return true; + } + + if (!iter->iter.bi_size) + return false; + + bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + vec->paddr = bvec_phys(&bv); + max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX); + bv.bv_len = min(bv.bv_len, max_size); + bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); + + /* + * If we are entirely done with this bi_io_vec entry, check if the next + * one could be merged into it. This typically happens when moving to + * the next bio, but some callers also don't pack bvecs tight. + */ + while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) { + struct bio_vec next; + + if (!iter->iter.bi_size) { + if (!iter->bio->bi_next) + break; + iter->bio = iter->bio->bi_next; + iter->iter = iter->bio->bi_iter; + } + + next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + if (bv.bv_len + next.bv_len > max_size || + !biovec_phys_mergeable(req->q, &bv, &next)) + break; + + bv.bv_len += next.bv_len; + bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); + } + + vec->len = bv.bv_len; + return true; +} + +static inline struct scatterlist * +blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist) +{ + if (!*sg) + return sglist; + + /* + * If the driver previously mapped a shorter list, we could see a + * termination bit prematurely unless it fully inits the sg table + * on each mapping. We KNOW that there must be more entries here + * or the driver would be buggy, so force clear the termination bit + * to avoid doing a full sg_init_table() in drivers for each command. + */ + sg_unmark_end(*sg); + return sg_next(*sg); +} + +/* + * Map a request to scatterlist, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries. + */ +int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, + struct scatterlist **last_sg) +{ + struct req_iterator iter = { + .bio = rq->bio, + }; + struct phys_vec vec; + int nsegs = 0; + + /* the internal flush request may not have bio attached */ + if (iter.bio) + iter.iter = iter.bio->bi_iter; + + while (blk_map_iter_next(rq, &iter, &vec)) { + *last_sg = blk_next_sg(last_sg, sglist); + sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len, + offset_in_page(vec.paddr)); + nsegs++; + } + + if (*last_sg) + sg_mark_end(*last_sg); + + /* + * Something must have been wrong if the figured number of + * segment is bigger than number of req's physical segments + */ + WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); + + return nsegs; +} +EXPORT_SYMBOL(__blk_rq_map_sg); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 109611445d40..55a0fd105147 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -59,19 +59,17 @@ static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) list_first_entry(rq_list, struct request, queuelist)->mq_hctx; struct request *rq; LIST_HEAD(hctx_list); - unsigned int count = 0; list_for_each_entry(rq, rq_list, queuelist) { if (rq->mq_hctx != hctx) { list_cut_before(&hctx_list, rq_list, &rq->queuelist); goto dispatch; } - count++; } list_splice_tail_init(rq_list, &hctx_list); dispatch: - return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); + return blk_mq_dispatch_rq_list(hctx, &hctx_list, false); } #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ @@ -167,7 +165,7 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) dispatched |= blk_mq_dispatch_hctx_list(&rq_list); } while (!list_empty(&rq_list)); } else { - dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); + dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, false); } if (busy) @@ -261,7 +259,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) /* round robin for fair dispatch */ ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); - } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); + } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, false)); WRITE_ONCE(hctx->dispatch_from, ctx); return ret; @@ -298,7 +296,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) + if (!blk_mq_dispatch_rq_list(hctx, &rq_list, true)) return 0; need_dispatch = true; } else { @@ -312,7 +310,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) if (need_dispatch) return blk_mq_do_dispatch_ctx(hctx); blk_mq_flush_busy_ctxs(hctx, &rq_list); - blk_mq_dispatch_rq_list(hctx, &rq_list, 0); + blk_mq_dispatch_rq_list(hctx, &rq_list, true); return 0; } @@ -436,6 +434,30 @@ static int blk_mq_init_sched_shared_tags(struct request_queue *queue) return 0; } +void blk_mq_sched_reg_debugfs(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned long i; + + mutex_lock(&q->debugfs_mutex); + blk_mq_debugfs_register_sched(q); + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_debugfs_register_sched_hctx(q, hctx); + mutex_unlock(&q->debugfs_mutex); +} + +void blk_mq_sched_unreg_debugfs(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned long i; + + mutex_lock(&q->debugfs_mutex); + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_debugfs_unregister_sched_hctx(hctx); + blk_mq_debugfs_unregister_sched(q); + mutex_unlock(&q->debugfs_mutex); +} + /* caller must have a reference to @e, will grab another one if successful */ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) { @@ -469,10 +491,6 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) if (ret) goto err_free_map_and_rqs; - mutex_lock(&q->debugfs_mutex); - blk_mq_debugfs_register_sched(q); - mutex_unlock(&q->debugfs_mutex); - queue_for_each_hw_ctx(q, hctx, i) { if (e->ops.init_hctx) { ret = e->ops.init_hctx(hctx, i); @@ -484,11 +502,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return ret; } } - mutex_lock(&q->debugfs_mutex); - blk_mq_debugfs_register_sched_hctx(q, hctx); - mutex_unlock(&q->debugfs_mutex); } - return 0; err_free_map_and_rqs: @@ -527,10 +541,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) unsigned int flags = 0; queue_for_each_hw_ctx(q, hctx, i) { - mutex_lock(&q->debugfs_mutex); - blk_mq_debugfs_unregister_sched_hctx(hctx); - mutex_unlock(&q->debugfs_mutex); - if (e->type->ops.exit_hctx && hctx->sched_data) { e->type->ops.exit_hctx(hctx, i); hctx->sched_data = NULL; @@ -538,12 +548,9 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) flags = hctx->flags; } - mutex_lock(&q->debugfs_mutex); - blk_mq_debugfs_unregister_sched(q); - mutex_unlock(&q->debugfs_mutex); - if (e->type->ops.exit_sched) e->type->ops.exit_sched(e); blk_mq_sched_tags_teardown(q, flags); + set_bit(ELEVATOR_FLAG_DYING, &q->elevator->flags); q->elevator = NULL; } diff --git a/block/blk-mq.c b/block/blk-mq.c index c2697db59109..4806b867e37d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -89,7 +89,7 @@ struct mq_inflight { unsigned int inflight[2]; }; -static bool blk_mq_check_inflight(struct request *rq, void *priv) +static bool blk_mq_check_in_driver(struct request *rq, void *priv) { struct mq_inflight *mi = priv; @@ -101,24 +101,14 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv) return true; } -unsigned int blk_mq_in_flight(struct request_queue *q, - struct block_device *part) +void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]) { struct mq_inflight mi = { .part = part }; - blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); - - return mi.inflight[0] + mi.inflight[1]; -} - -void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, - unsigned int inflight[2]) -{ - struct mq_inflight mi = { .part = part }; - - blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); - inflight[0] = mi.inflight[0]; - inflight[1] = mi.inflight[1]; + blk_mq_queue_tag_busy_iter(bdev_get_queue(part), blk_mq_check_in_driver, + &mi); + inflight[READ] = mi.inflight[READ]; + inflight[WRITE] = mi.inflight[WRITE]; } #ifdef CONFIG_LOCKDEP @@ -584,9 +574,13 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q, struct blk_mq_alloc_data data = { .q = q, .flags = flags, + .shallow_depth = 0, .cmd_flags = opf, + .rq_flags = 0, .nr_tags = plug->nr_ios, .cached_rqs = &plug->cached_rqs, + .ctx = NULL, + .hctx = NULL }; struct request *rq; @@ -646,8 +640,13 @@ struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, struct blk_mq_alloc_data data = { .q = q, .flags = flags, + .shallow_depth = 0, .cmd_flags = opf, + .rq_flags = 0, .nr_tags = 1, + .cached_rqs = NULL, + .ctx = NULL, + .hctx = NULL }; int ret; @@ -675,8 +674,13 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, struct blk_mq_alloc_data data = { .q = q, .flags = flags, + .shallow_depth = 0, .cmd_flags = opf, + .rq_flags = 0, .nr_tags = 1, + .cached_rqs = NULL, + .ctx = NULL, + .hctx = NULL }; u64 alloc_time_ns = 0; struct request *rq; @@ -2080,7 +2084,7 @@ static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, * Returns true if we did some work AND can potentially do more. */ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, - unsigned int nr_budgets) + bool get_budget) { enum prep_dispatch prep; struct request_queue *q = hctx->queue; @@ -2102,7 +2106,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, rq = list_first_entry(list, struct request, queuelist); WARN_ON_ONCE(hctx != rq->mq_hctx); - prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); + prep = blk_mq_prep_dispatch_rq(rq, get_budget); if (prep != PREP_DISPATCH_OK) break; @@ -2111,12 +2115,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, bd.rq = rq; bd.last = list_empty(list); - /* - * once the request is queued to lld, no need to cover the - * budget any more - */ - if (nr_budgets) - nr_budgets--; ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { case BLK_STS_OK: @@ -2150,7 +2148,11 @@ out: ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || blk_mq_is_shared_tags(hctx->flags)); - if (nr_budgets) + /* + * If the caller allocated budgets, free the budgets of the + * requests that have not yet been passed to the block driver. + */ + if (!get_budget) blk_mq_release_budgets(q, list); spin_lock(&hctx->lock); @@ -2778,15 +2780,15 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) return __blk_mq_issue_directly(hctx, rq, last); } -static void blk_mq_plug_issue_direct(struct blk_plug *plug) +static void blk_mq_issue_direct(struct rq_list *rqs) { struct blk_mq_hw_ctx *hctx = NULL; struct request *rq; int queued = 0; blk_status_t ret = BLK_STS_OK; - while ((rq = rq_list_pop(&plug->mq_list))) { - bool last = rq_list_empty(&plug->mq_list); + while ((rq = rq_list_pop(rqs))) { + bool last = rq_list_empty(rqs); if (hctx != rq->mq_hctx) { if (hctx) { @@ -2817,15 +2819,64 @@ out: blk_mq_commit_rqs(hctx, queued, false); } -static void __blk_mq_flush_plug_list(struct request_queue *q, - struct blk_plug *plug) +static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs) { if (blk_queue_quiesced(q)) return; - q->mq_ops->queue_rqs(&plug->mq_list); + q->mq_ops->queue_rqs(rqs); +} + +static unsigned blk_mq_extract_queue_requests(struct rq_list *rqs, + struct rq_list *queue_rqs) +{ + struct request *rq = rq_list_pop(rqs); + struct request_queue *this_q = rq->q; + struct request **prev = &rqs->head; + struct rq_list matched_rqs = {}; + struct request *last = NULL; + unsigned depth = 1; + + rq_list_add_tail(&matched_rqs, rq); + while ((rq = *prev)) { + if (rq->q == this_q) { + /* move rq from rqs to matched_rqs */ + *prev = rq->rq_next; + rq_list_add_tail(&matched_rqs, rq); + depth++; + } else { + /* leave rq in rqs */ + prev = &rq->rq_next; + last = rq; + } + } + + rqs->tail = last; + *queue_rqs = matched_rqs; + return depth; +} + +static void blk_mq_dispatch_queue_requests(struct rq_list *rqs, unsigned depth) +{ + struct request_queue *q = rq_list_peek(rqs)->q; + + trace_block_unplug(q, depth, true); + + /* + * Peek first request and see if we have a ->queue_rqs() hook. + * If we do, we can dispatch the whole list in one go. + * We already know at this point that all requests belong to the + * same queue, caller must ensure that's the case. + */ + if (q->mq_ops->queue_rqs) { + blk_mq_run_dispatch_ops(q, __blk_mq_flush_list(q, rqs)); + if (rq_list_empty(rqs)) + return; + } + + blk_mq_run_dispatch_ops(q, blk_mq_issue_direct(rqs)); } -static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) +static void blk_mq_dispatch_list(struct rq_list *rqs, bool from_sched) { struct blk_mq_hw_ctx *this_hctx = NULL; struct blk_mq_ctx *this_ctx = NULL; @@ -2835,7 +2886,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) LIST_HEAD(list); do { - struct request *rq = rq_list_pop(&plug->mq_list); + struct request *rq = rq_list_pop(rqs); if (!this_hctx) { this_hctx = rq->mq_hctx; @@ -2848,9 +2899,9 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) } list_add_tail(&rq->queuelist, &list); depth++; - } while (!rq_list_empty(&plug->mq_list)); + } while (!rq_list_empty(rqs)); - plug->mq_list = requeue_list; + *rqs = requeue_list; trace_block_unplug(this_hctx->queue, depth, !from_sched); percpu_ref_get(&this_hctx->queue->q_usage_counter); @@ -2870,9 +2921,21 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) percpu_ref_put(&this_hctx->queue->q_usage_counter); } +static void blk_mq_dispatch_multiple_queue_requests(struct rq_list *rqs) +{ + do { + struct rq_list queue_rqs; + unsigned depth; + + depth = blk_mq_extract_queue_requests(rqs, &queue_rqs); + blk_mq_dispatch_queue_requests(&queue_rqs, depth); + while (!rq_list_empty(&queue_rqs)) + blk_mq_dispatch_list(&queue_rqs, false); + } while (!rq_list_empty(rqs)); +} + void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { - struct request *rq; unsigned int depth; /* @@ -2887,34 +2950,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) depth = plug->rq_count; plug->rq_count = 0; - if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { - struct request_queue *q; - - rq = rq_list_peek(&plug->mq_list); - q = rq->q; - trace_block_unplug(q, depth, true); - - /* - * Peek first request and see if we have a ->queue_rqs() hook. - * If we do, we can dispatch the whole plug list in one go. We - * already know at this point that all requests belong to the - * same queue, caller must ensure that's the case. - */ - if (q->mq_ops->queue_rqs) { - blk_mq_run_dispatch_ops(q, - __blk_mq_flush_plug_list(q, plug)); - if (rq_list_empty(&plug->mq_list)) - return; + if (!plug->has_elevator && !from_schedule) { + if (plug->multiple_queues) { + blk_mq_dispatch_multiple_queue_requests(&plug->mq_list); + return; } - blk_mq_run_dispatch_ops(q, - blk_mq_plug_issue_direct(plug)); + blk_mq_dispatch_queue_requests(&plug->mq_list, depth); if (rq_list_empty(&plug->mq_list)) return; } do { - blk_mq_dispatch_plug_list(plug, from_schedule); + blk_mq_dispatch_list(&plug->mq_list, from_schedule); } while (!rq_list_empty(&plug->mq_list)); } @@ -2969,8 +3017,14 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, { struct blk_mq_alloc_data data = { .q = q, - .nr_tags = 1, + .flags = 0, + .shallow_depth = 0, .cmd_flags = bio->bi_opf, + .rq_flags = 0, + .nr_tags = 1, + .cached_rqs = NULL, + .ctx = NULL, + .hctx = NULL }; struct request *rq; @@ -3080,8 +3134,6 @@ void blk_mq_submit_bio(struct bio *bio) goto new_request; } - bio = blk_queue_bounce(bio, q); - /* * The cached request already holds a q_usage_counter reference and we * don't have to acquire a new one if we use it. @@ -4094,8 +4146,6 @@ static void blk_mq_map_swqueue(struct request_queue *q) struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; - mutex_lock(&q->elevator_lock); - queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; @@ -4200,8 +4250,6 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } - - mutex_unlock(&q->elevator_lock); } /* @@ -4505,16 +4553,9 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, } static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, - struct request_queue *q, bool lock) + struct request_queue *q) { - if (lock) { - /* protect against switching io scheduler */ - mutex_lock(&q->elevator_lock); - __blk_mq_realloc_hw_ctxs(set, q); - mutex_unlock(&q->elevator_lock); - } else { - __blk_mq_realloc_hw_ctxs(set, q); - } + __blk_mq_realloc_hw_ctxs(set, q); /* unregister cpuhp callbacks for exited hctxs */ blk_mq_remove_hw_queues_cpuhp(q); @@ -4546,7 +4587,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, xa_init(&q->hctx_table); - blk_mq_realloc_hw_ctxs(set, q, false); + blk_mq_realloc_hw_ctxs(set, q); if (!q->nr_hw_queues) goto err_hctxs; @@ -4563,8 +4604,8 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->nr_requests = set->queue_depth; blk_mq_init_cpu_queues(q, set->nr_hw_queues); - blk_mq_add_queue_tag_set(set, q); blk_mq_map_swqueue(q); + blk_mq_add_queue_tag_set(set, q); return 0; err_hctxs: @@ -4784,6 +4825,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) goto out_free_srcu; } + init_rwsem(&set->update_nr_hwq_lock); + ret = -ENOMEM; set->tags = kcalloc_node(set->nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, @@ -4923,88 +4966,10 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) return ret; } -/* - * request_queue and elevator_type pair. - * It is just used by __blk_mq_update_nr_hw_queues to cache - * the elevator_type associated with a request_queue. - */ -struct blk_mq_qe_pair { - struct list_head node; - struct request_queue *q; - struct elevator_type *type; -}; - -/* - * Cache the elevator_type in qe pair list and switch the - * io scheduler to 'none' - */ -static bool blk_mq_elv_switch_none(struct list_head *head, - struct request_queue *q) -{ - struct blk_mq_qe_pair *qe; - - qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); - if (!qe) - return false; - - /* Accessing q->elevator needs protection from ->elevator_lock. */ - mutex_lock(&q->elevator_lock); - - if (!q->elevator) { - kfree(qe); - goto unlock; - } - - INIT_LIST_HEAD(&qe->node); - qe->q = q; - qe->type = q->elevator->type; - /* keep a reference to the elevator module as we'll switch back */ - __elevator_get(qe->type); - list_add(&qe->node, head); - elevator_disable(q); -unlock: - mutex_unlock(&q->elevator_lock); - - return true; -} - -static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, - struct request_queue *q) -{ - struct blk_mq_qe_pair *qe; - - list_for_each_entry(qe, head, node) - if (qe->q == q) - return qe; - - return NULL; -} - -static void blk_mq_elv_switch_back(struct list_head *head, - struct request_queue *q) -{ - struct blk_mq_qe_pair *qe; - struct elevator_type *t; - - qe = blk_lookup_qe_pair(head, q); - if (!qe) - return; - t = qe->type; - list_del(&qe->node); - kfree(qe); - - mutex_lock(&q->elevator_lock); - elevator_switch(q, t); - /* drop the reference acquired in blk_mq_elv_switch_none */ - elevator_put(t); - mutex_unlock(&q->elevator_lock); -} - static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { struct request_queue *q; - LIST_HEAD(head); int prev_nr_hw_queues = set->nr_hw_queues; unsigned int memflags; int i; @@ -5019,30 +4984,24 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, return; memflags = memalloc_noio_save(); - list_for_each_entry(q, &set->tag_list, tag_set_list) - blk_mq_freeze_queue_nomemsave(q); - - /* - * Switch IO scheduler to 'none', cleaning up the data associated - * with the previous scheduler. We will switch back once we are done - * updating the new sw to hw queue mappings. - */ - list_for_each_entry(q, &set->tag_list, tag_set_list) - if (!blk_mq_elv_switch_none(&head, q)) - goto switch_back; - list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_debugfs_unregister_hctxs(q); blk_mq_sysfs_unregister_hctxs(q); } - if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_freeze_queue_nomemsave(q); + + if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) { + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_unfreeze_queue_nomemrestore(q); goto reregister; + } fallback: blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { - blk_mq_realloc_hw_ctxs(set, q, true); + __blk_mq_realloc_hw_ctxs(set, q); if (q->nr_hw_queues != set->nr_hw_queues) { int i = prev_nr_hw_queues; @@ -5058,18 +5017,18 @@ fallback: blk_mq_map_swqueue(q); } + /* elv_update_nr_hw_queues() unfreeze queue for us */ + list_for_each_entry(q, &set->tag_list, tag_set_list) + elv_update_nr_hw_queues(q); + reregister: list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_sysfs_register_hctxs(q); blk_mq_debugfs_register_hctxs(q); - } - -switch_back: - list_for_each_entry(q, &set->tag_list, tag_set_list) - blk_mq_elv_switch_back(&head, q); - list_for_each_entry(q, &set->tag_list, tag_set_list) - blk_mq_unfreeze_queue_nomemrestore(q); + blk_mq_remove_hw_queues_cpuhp(q); + blk_mq_add_hw_queues_cpuhp(q); + } memalloc_noio_restore(memflags); /* Free the excess tags when nr_hw_queues shrink. */ @@ -5079,9 +5038,11 @@ switch_back: void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { + down_write(&set->update_nr_hwq_lock); mutex_lock(&set->tag_list_lock); __blk_mq_update_nr_hw_queues(set, nr_hw_queues); mutex_unlock(&set->tag_list_lock); + up_write(&set->update_nr_hwq_lock); } EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); diff --git a/block/blk-mq.h b/block/blk-mq.h index 3011a78cf16a..affb2e14b56e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -48,7 +48,7 @@ void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, - unsigned int); + bool); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); @@ -246,10 +246,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) return hctx->nr_ctx && hctx->tags; } -unsigned int blk_mq_in_flight(struct request_queue *q, - struct block_device *part); -void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, - unsigned int inflight[2]); +void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]); static inline void blk_mq_put_dispatch_budget(struct request_queue *q, int budget_token) diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c index 95982bc46ba1..848591fb3c57 100644 --- a/block/blk-rq-qos.c +++ b/block/blk-rq-qos.c @@ -2,6 +2,8 @@ #include "blk-rq-qos.h" +__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos); + /* * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded, * false if 'v' + 1 would be bigger than 'below'. @@ -317,6 +319,7 @@ void rq_qos_exit(struct request_queue *q) struct rq_qos *rqos = q->rq_qos; q->rq_qos = rqos->next; rqos->ops->exit(rqos); + static_branch_dec(&block_rq_qos); } mutex_unlock(&q->rq_qos_mutex); } @@ -343,6 +346,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id, goto ebusy; rqos->next = q->rq_qos; q->rq_qos = rqos; + static_branch_inc(&block_rq_qos); blk_mq_unfreeze_queue(q, memflags); diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index 37245c97ee61..39749f4066fb 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -12,6 +12,7 @@ #include "blk-mq-debugfs.h" struct blk_mq_debugfs_attr; +extern struct static_key_false block_rq_qos; enum rq_qos_id { RQ_QOS_WBT, @@ -112,31 +113,33 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos); static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) { - if (q->rq_qos) + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) __rq_qos_cleanup(q->rq_qos, bio); } static inline void rq_qos_done(struct request_queue *q, struct request *rq) { - if (q->rq_qos && !blk_rq_is_passthrough(rq)) + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos && + !blk_rq_is_passthrough(rq)) __rq_qos_done(q->rq_qos, rq); } static inline void rq_qos_issue(struct request_queue *q, struct request *rq) { - if (q->rq_qos) + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) __rq_qos_issue(q->rq_qos, rq); } static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) { - if (q->rq_qos) + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) __rq_qos_requeue(q->rq_qos, rq); } static inline void rq_qos_done_bio(struct bio *bio) { - if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || + if (static_branch_unlikely(&block_rq_qos) && + bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || bio_flagged(bio, BIO_QOS_MERGED))) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); if (q->rq_qos) @@ -146,7 +149,7 @@ static inline void rq_qos_done_bio(struct bio *bio) static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) { - if (q->rq_qos) { + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) { bio_set_flag(bio, BIO_QOS_THROTTLED); __rq_qos_throttle(q->rq_qos, bio); } @@ -155,14 +158,14 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) static inline void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) { - if (q->rq_qos) + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) __rq_qos_track(q->rq_qos, rq, bio); } static inline void rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) { - if (q->rq_qos) { + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) { bio_set_flag(bio, BIO_QOS_MERGED); __rq_qos_merge(q->rq_qos, rq, bio); } @@ -170,7 +173,7 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq, static inline void rq_qos_queue_depth_changed(struct request_queue *q) { - if (q->rq_qos) + if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) __rq_qos_queue_depth_changed(q->rq_qos); } diff --git a/block/blk-settings.c b/block/blk-settings.c index 4817e7ca03f8..a000daafbfb4 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -124,11 +124,6 @@ static int blk_validate_integrity_limits(struct queue_limits *lim) return 0; } - if (lim->features & BLK_FEAT_BOUNCE_HIGH) { - pr_warn("no bounce buffer support for integrity metadata\n"); - return -EINVAL; - } - if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { pr_warn("integrity support disabled.\n"); return -EINVAL; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 1f9b45b0b9ee..b2b9b89d6967 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -134,6 +134,8 @@ QUEUE_SYSFS_LIMIT_SHOW(max_segments) QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments) QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments) QUEUE_SYSFS_LIMIT_SHOW(max_segment_size) +QUEUE_SYSFS_LIMIT_SHOW(max_write_streams) +QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity) QUEUE_SYSFS_LIMIT_SHOW(logical_block_size) QUEUE_SYSFS_LIMIT_SHOW(physical_block_size) QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors) @@ -488,6 +490,8 @@ QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments"); QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size"); +QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams"); +QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity"); QUEUE_RW_ENTRY(elv_iosched, "scheduler"); QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size"); @@ -560,7 +564,7 @@ static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page) ssize_t ret; struct request_queue *q = disk->queue; - mutex_lock(&q->elevator_lock); + mutex_lock(&disk->rqos_state_mutex); if (!wbt_rq_qos(q)) { ret = -EINVAL; goto out; @@ -573,7 +577,7 @@ static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page) ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); out: - mutex_unlock(&q->elevator_lock); + mutex_unlock(&disk->rqos_state_mutex); return ret; } @@ -593,7 +597,6 @@ static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page, return -EINVAL; memflags = blk_mq_freeze_queue(q); - mutex_lock(&q->elevator_lock); rqos = wbt_rq_qos(q); if (!rqos) { @@ -618,11 +621,12 @@ static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page, */ blk_mq_quiesce_queue(q); + mutex_lock(&disk->rqos_state_mutex); wbt_set_min_lat(q, val); + mutex_unlock(&disk->rqos_state_mutex); blk_mq_unquiesce_queue(q); out: - mutex_unlock(&q->elevator_lock); blk_mq_unfreeze_queue(q, memflags); return ret; @@ -642,6 +646,8 @@ static struct attribute *queue_attrs[] = { &queue_max_discard_segments_entry.attr, &queue_max_integrity_segments_entry.attr, &queue_max_segment_size_entry.attr, + &queue_max_write_streams_entry.attr, + &queue_write_stream_granularity_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, &queue_physical_block_size_entry.attr, @@ -869,16 +875,9 @@ int blk_register_queue(struct gendisk *disk) if (ret) goto out_unregister_ia_ranges; - mutex_lock(&q->elevator_lock); - if (q->elevator) { - ret = elv_register_queue(q, false); - if (ret) { - mutex_unlock(&q->elevator_lock); - goto out_crypto_sysfs_unregister; - } - } + if (queue_is_mq(q)) + elevator_set_default(q); wbt_enable_default(disk); - mutex_unlock(&q->elevator_lock); blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); @@ -902,8 +901,6 @@ int blk_register_queue(struct gendisk *disk) return ret; -out_crypto_sysfs_unregister: - blk_crypto_sysfs_unregister(disk); out_unregister_ia_ranges: disk_unregister_independent_access_ranges(disk); out_debugfs_remove: @@ -951,10 +948,6 @@ void blk_unregister_queue(struct gendisk *disk) blk_mq_sysfs_unregister(disk); blk_crypto_sysfs_unregister(disk); - mutex_lock(&q->elevator_lock); - elv_unregister_queue(q); - mutex_unlock(&q->elevator_lock); - mutex_lock(&q->sysfs_lock); disk_unregister_independent_access_ranges(disk); mutex_unlock(&q->sysfs_lock); @@ -963,5 +956,8 @@ void blk_unregister_queue(struct gendisk *disk) kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); kobject_del(&disk->queue_kobj); + if (queue_is_mq(q)) + elevator_set_none(q); + blk_debugfs_remove(disk); } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index d6dd2e047874..bd15357f23bd 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -143,7 +143,8 @@ static inline unsigned int throtl_bio_data_size(struct bio *bio) static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) { INIT_LIST_HEAD(&qn->node); - bio_list_init(&qn->bios); + bio_list_init(&qn->bios_bps); + bio_list_init(&qn->bios_iops); qn->tg = tg; } @@ -151,18 +152,32 @@ static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it * @bio: bio being added * @qn: qnode to add bio to - * @queued: the service_queue->queued[] list @qn belongs to + * @sq: the service_queue @qn belongs to * - * Add @bio to @qn and put @qn on @queued if it's not already on. + * Add @bio to @qn and put @qn on @sq->queued if it's not already on. * @qn->tg's reference count is bumped when @qn is activated. See the * comment on top of throtl_qnode definition for details. */ static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, - struct list_head *queued) + struct throtl_service_queue *sq) { - bio_list_add(&qn->bios, bio); + bool rw = bio_data_dir(bio); + + /* + * Split bios have already been throttled by bps, so they are + * directly queued into the iops path. + */ + if (bio_flagged(bio, BIO_TG_BPS_THROTTLED) || + bio_flagged(bio, BIO_BPS_THROTTLED)) { + bio_list_add(&qn->bios_iops, bio); + sq->nr_queued_iops[rw]++; + } else { + bio_list_add(&qn->bios_bps, bio); + sq->nr_queued_bps[rw]++; + } + if (list_empty(&qn->node)) { - list_add_tail(&qn->node, queued); + list_add_tail(&qn->node, &sq->queued[rw]); blkg_get(tg_to_blkg(qn->tg)); } } @@ -170,6 +185,10 @@ static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, /** * throtl_peek_queued - peek the first bio on a qnode list * @queued: the qnode list to peek + * + * Always take a bio from the head of the iops queue first. If the queue is + * empty, we then take it from the bps queue to maintain the overall idea of + * fetching bios from the head. */ static struct bio *throtl_peek_queued(struct list_head *queued) { @@ -180,28 +199,33 @@ static struct bio *throtl_peek_queued(struct list_head *queued) return NULL; qn = list_first_entry(queued, struct throtl_qnode, node); - bio = bio_list_peek(&qn->bios); + bio = bio_list_peek(&qn->bios_iops); + if (!bio) + bio = bio_list_peek(&qn->bios_bps); WARN_ON_ONCE(!bio); return bio; } /** * throtl_pop_queued - pop the first bio form a qnode list - * @queued: the qnode list to pop a bio from + * @sq: the service_queue to pop a bio from * @tg_to_put: optional out argument for throtl_grp to put + * @rw: read/write * - * Pop the first bio from the qnode list @queued. After popping, the first - * qnode is removed from @queued if empty or moved to the end of @queued so - * that the popping order is round-robin. + * Pop the first bio from the qnode list @sq->queued. Note that we firstly + * focus on the iops list because bios are ultimately dispatched from it. + * After popping, the first qnode is removed from @sq->queued if empty or moved + * to the end of @sq->queued so that the popping order is round-robin. * * When the first qnode is removed, its associated throtl_grp should be put * too. If @tg_to_put is NULL, this function automatically puts it; * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is * responsible for putting it. */ -static struct bio *throtl_pop_queued(struct list_head *queued, - struct throtl_grp **tg_to_put) +static struct bio *throtl_pop_queued(struct throtl_service_queue *sq, + struct throtl_grp **tg_to_put, bool rw) { + struct list_head *queued = &sq->queued[rw]; struct throtl_qnode *qn; struct bio *bio; @@ -209,10 +233,17 @@ static struct bio *throtl_pop_queued(struct list_head *queued, return NULL; qn = list_first_entry(queued, struct throtl_qnode, node); - bio = bio_list_pop(&qn->bios); + bio = bio_list_pop(&qn->bios_iops); + if (bio) { + sq->nr_queued_iops[rw]--; + } else { + bio = bio_list_pop(&qn->bios_bps); + if (bio) + sq->nr_queued_bps[rw]--; + } WARN_ON_ONCE(!bio); - if (bio_list_empty(&qn->bios)) { + if (bio_list_empty(&qn->bios_bps) && bio_list_empty(&qn->bios_iops)) { list_del_init(&qn->node); if (tg_to_put) *tg_to_put = qn->tg; @@ -520,6 +551,9 @@ static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, unsigned long jiffy_end) { + if (!time_before(tg->slice_end[rw], jiffy_end)) + return; + throtl_set_slice_end(tg, rw, jiffy_end); throtl_log(&tg->service_queue, "[%c] extend slice start=%lu end=%lu jiffies=%lu", @@ -536,6 +570,11 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw) return true; } +static unsigned int sq_queued(struct throtl_service_queue *sq, int type) +{ + return sq->nr_queued_bps[type] + sq->nr_queued_iops[type]; +} + static unsigned int calculate_io_allowed(u32 iops_limit, unsigned long jiffy_elapsed) { @@ -571,6 +610,48 @@ static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed) return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ); } +static long long throtl_trim_bps(struct throtl_grp *tg, bool rw, + unsigned long time_elapsed) +{ + u64 bps_limit = tg_bps_limit(tg, rw); + long long bytes_trim; + + if (bps_limit == U64_MAX) + return 0; + + /* Need to consider the case of bytes_allowed overflow. */ + bytes_trim = calculate_bytes_allowed(bps_limit, time_elapsed); + if (bytes_trim <= 0 || tg->bytes_disp[rw] < bytes_trim) { + bytes_trim = tg->bytes_disp[rw]; + tg->bytes_disp[rw] = 0; + } else { + tg->bytes_disp[rw] -= bytes_trim; + } + + return bytes_trim; +} + +static int throtl_trim_iops(struct throtl_grp *tg, bool rw, + unsigned long time_elapsed) +{ + u32 iops_limit = tg_iops_limit(tg, rw); + int io_trim; + + if (iops_limit == UINT_MAX) + return 0; + + /* Need to consider the case of io_allowed overflow. */ + io_trim = calculate_io_allowed(iops_limit, time_elapsed); + if (io_trim <= 0 || tg->io_disp[rw] < io_trim) { + io_trim = tg->io_disp[rw]; + tg->io_disp[rw] = 0; + } else { + tg->io_disp[rw] -= io_trim; + } + + return io_trim; +} + /* Trim the used slices and adjust slice start accordingly */ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) { @@ -612,22 +693,11 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) * one extra slice is preserved for deviation. */ time_elapsed -= tg->td->throtl_slice; - bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw), - time_elapsed); - io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed); - if (bytes_trim <= 0 && io_trim <= 0) + bytes_trim = throtl_trim_bps(tg, rw, time_elapsed); + io_trim = throtl_trim_iops(tg, rw, time_elapsed); + if (!bytes_trim && !io_trim) return; - if ((long long)tg->bytes_disp[rw] >= bytes_trim) - tg->bytes_disp[rw] -= bytes_trim; - else - tg->bytes_disp[rw] = 0; - - if ((int)tg->io_disp[rw] >= io_trim) - tg->io_disp[rw] -= io_trim; - else - tg->io_disp[rw] = 0; - tg->slice_start[rw] += time_elapsed; throtl_log(&tg->service_queue, @@ -643,21 +713,41 @@ static void __tg_update_carryover(struct throtl_grp *tg, bool rw, unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw]; u64 bps_limit = tg_bps_limit(tg, rw); u32 iops_limit = tg_iops_limit(tg, rw); + long long bytes_allowed; + int io_allowed; + + /* + * If the queue is empty, carryover handling is not needed. In such cases, + * tg->[bytes/io]_disp should be reset to 0 to avoid impacting the dispatch + * of subsequent bios. The same handling applies when the previous BPS/IOPS + * limit was set to max. + */ + if (sq_queued(&tg->service_queue, rw) == 0) { + tg->bytes_disp[rw] = 0; + tg->io_disp[rw] = 0; + return; + } /* * If config is updated while bios are still throttled, calculate and - * accumulate how many bytes/ios are waited across changes. And - * carryover_bytes/ios will be used to calculate new wait time under new - * configuration. + * accumulate how many bytes/ios are waited across changes. And use the + * calculated carryover (@bytes/@ios) to update [bytes/io]_disp, which + * will be used to calculate new wait time under new configuration. + * And we need to consider the case of bytes/io_allowed overflow. */ - if (bps_limit != U64_MAX) - *bytes = calculate_bytes_allowed(bps_limit, jiffy_elapsed) - - tg->bytes_disp[rw]; - if (iops_limit != UINT_MAX) - *ios = calculate_io_allowed(iops_limit, jiffy_elapsed) - - tg->io_disp[rw]; - tg->bytes_disp[rw] -= *bytes; - tg->io_disp[rw] -= *ios; + if (bps_limit != U64_MAX) { + bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed); + if (bytes_allowed > 0) + *bytes = bytes_allowed - tg->bytes_disp[rw]; + } + if (iops_limit != UINT_MAX) { + io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed); + if (io_allowed > 0) + *ios = io_allowed - tg->io_disp[rw]; + } + + tg->bytes_disp[rw] = -*bytes; + tg->io_disp[rw] = -*ios; } static void tg_update_carryover(struct throtl_grp *tg) @@ -665,12 +755,10 @@ static void tg_update_carryover(struct throtl_grp *tg) long long bytes[2] = {0}; int ios[2] = {0}; - if (tg->service_queue.nr_queued[READ]) - __tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]); - if (tg->service_queue.nr_queued[WRITE]) - __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]); + __tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]); + __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]); - /* see comments in struct throtl_grp for meaning of these fields. */ + /* see comments in struct throtl_grp for meaning of carryover. */ throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__, bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]); } @@ -682,10 +770,6 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio int io_allowed; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; - if (iops_limit == UINT_MAX) { - return 0; - } - jiffy_elapsed = jiffies - tg->slice_start[rw]; /* Round up to the next throttle slice, wait time must be nonzero */ @@ -711,11 +795,6 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned int bio_size = throtl_bio_data_size(bio); - /* no need to throttle if this bio's bytes have been accounted */ - if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) { - return 0; - } - jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; /* Slice has just started. Consider one slice interval */ @@ -724,7 +803,9 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd); - if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed) + /* Need to consider the case of bytes_allowed overflow. */ + if ((bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed) + || bytes_allowed < 0) return 0; /* Calc approx time to dispatch */ @@ -742,17 +823,82 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, return jiffy_wait; } +static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio) +{ + unsigned int bio_size = throtl_bio_data_size(bio); + + /* Charge the bio to the group */ + if (!bio_flagged(bio, BIO_BPS_THROTTLED) && + !bio_flagged(bio, BIO_TG_BPS_THROTTLED)) { + bio_set_flag(bio, BIO_TG_BPS_THROTTLED); + tg->bytes_disp[bio_data_dir(bio)] += bio_size; + } +} + +static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio) +{ + bio_clear_flag(bio, BIO_TG_BPS_THROTTLED); + tg->io_disp[bio_data_dir(bio)]++; +} + /* - * Returns whether one can dispatch a bio or not. Also returns approx number - * of jiffies to wait before this bio is with-in IO rate and can be dispatched + * If previous slice expired, start a new one otherwise renew/extend existing + * slice to make sure it is at least throtl_slice interval long since now. New + * slice is started only for empty throttle group. If there is queued bio, that + * means there should be an active slice and it should be extended instead. */ -static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, - unsigned long *wait) +static void tg_update_slice(struct throtl_grp *tg, bool rw) +{ + if (throtl_slice_used(tg, rw) && + sq_queued(&tg->service_queue, rw) == 0) + throtl_start_new_slice(tg, rw, true); + else + throtl_extend_slice(tg, rw, jiffies + tg->td->throtl_slice); +} + +static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); - unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; u64 bps_limit = tg_bps_limit(tg, rw); + unsigned long bps_wait; + + /* no need to throttle if this bio's bytes have been accounted */ + if (bps_limit == U64_MAX || tg->flags & THROTL_TG_CANCELING || + bio_flagged(bio, BIO_BPS_THROTTLED) || + bio_flagged(bio, BIO_TG_BPS_THROTTLED)) + return 0; + + tg_update_slice(tg, rw); + bps_wait = tg_within_bps_limit(tg, bio, bps_limit); + throtl_extend_slice(tg, rw, jiffies + bps_wait); + + return bps_wait; +} + +static unsigned long tg_dispatch_iops_time(struct throtl_grp *tg, struct bio *bio) +{ + bool rw = bio_data_dir(bio); u32 iops_limit = tg_iops_limit(tg, rw); + unsigned long iops_wait; + + if (iops_limit == UINT_MAX || tg->flags & THROTL_TG_CANCELING) + return 0; + + tg_update_slice(tg, rw); + iops_wait = tg_within_iops_limit(tg, bio, iops_limit); + throtl_extend_slice(tg, rw, jiffies + iops_wait); + + return iops_wait; +} + +/* + * Returns approx number of jiffies to wait before this bio is with-in IO rate + * and can be moved to other queue or dispatched. + */ +static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio) +{ + bool rw = bio_data_dir(bio); + unsigned long wait; /* * Currently whole state machine of group depends on first bio @@ -760,62 +906,20 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, * this function with a different bio if there are other bios * queued. */ - BUG_ON(tg->service_queue.nr_queued[rw] && + BUG_ON(sq_queued(&tg->service_queue, rw) && bio != throtl_peek_queued(&tg->service_queue.queued[rw])); - /* If tg->bps = -1, then BW is unlimited */ - if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) || - tg->flags & THROTL_TG_CANCELING) { - if (wait) - *wait = 0; - return true; - } + wait = tg_dispatch_bps_time(tg, bio); + if (wait != 0) + return wait; /* - * If previous slice expired, start a new one otherwise renew/extend - * existing slice to make sure it is at least throtl_slice interval - * long since now. New slice is started only for empty throttle group. - * If there is queued bio, that means there should be an active - * slice and it should be extended instead. + * Charge bps here because @bio will be directly placed into the + * iops queue afterward. */ - if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) - throtl_start_new_slice(tg, rw, true); - else { - if (time_before(tg->slice_end[rw], - jiffies + tg->td->throtl_slice)) - throtl_extend_slice(tg, rw, - jiffies + tg->td->throtl_slice); - } - - bps_wait = tg_within_bps_limit(tg, bio, bps_limit); - iops_wait = tg_within_iops_limit(tg, bio, iops_limit); - if (bps_wait + iops_wait == 0) { - if (wait) - *wait = 0; - return true; - } - - max_wait = max(bps_wait, iops_wait); - - if (wait) - *wait = max_wait; - - if (time_before(tg->slice_end[rw], jiffies + max_wait)) - throtl_extend_slice(tg, rw, jiffies + max_wait); - - return false; -} - -static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) -{ - bool rw = bio_data_dir(bio); - unsigned int bio_size = throtl_bio_data_size(bio); + throtl_charge_bps_bio(tg, bio); - /* Charge the bio to the group */ - if (!bio_flagged(bio, BIO_BPS_THROTTLED)) - tg->bytes_disp[rw] += bio_size; - - tg->io_disp[rw]++; + return tg_dispatch_iops_time(tg, bio); } /** @@ -842,28 +946,36 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, * dispatched. Mark that @tg was empty. This is automatically * cleared on the next tg_update_disptime(). */ - if (!sq->nr_queued[rw]) + if (sq_queued(sq, rw) == 0) tg->flags |= THROTL_TG_WAS_EMPTY; - throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); + throtl_qnode_add_bio(bio, qn, sq); + + /* + * Since we have split the queues, when the iops queue is + * previously empty and a new @bio is added into the first @qn, + * we also need to update the @tg->disptime. + */ + if (bio_flagged(bio, BIO_BPS_THROTTLED) && + bio == throtl_peek_queued(&sq->queued[rw])) + tg->flags |= THROTL_TG_IOPS_WAS_EMPTY; - sq->nr_queued[rw]++; throtl_enqueue_tg(tg); } static void tg_update_disptime(struct throtl_grp *tg) { struct throtl_service_queue *sq = &tg->service_queue; - unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; + unsigned long read_wait = -1, write_wait = -1, min_wait, disptime; struct bio *bio; bio = throtl_peek_queued(&sq->queued[READ]); if (bio) - tg_may_dispatch(tg, bio, &read_wait); + read_wait = tg_dispatch_time(tg, bio); bio = throtl_peek_queued(&sq->queued[WRITE]); if (bio) - tg_may_dispatch(tg, bio, &write_wait); + write_wait = tg_dispatch_time(tg, bio); min_wait = min(read_wait, write_wait); disptime = jiffies + min_wait; @@ -875,6 +987,7 @@ static void tg_update_disptime(struct throtl_grp *tg) /* see throtl_add_bio_tg() */ tg->flags &= ~THROTL_TG_WAS_EMPTY; + tg->flags &= ~THROTL_TG_IOPS_WAS_EMPTY; } static void start_parent_slice_with_credit(struct throtl_grp *child_tg, @@ -901,10 +1014,9 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) * getting released prematurely. Remember the tg to put and put it * after @bio is transferred to @parent_sq. */ - bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); - sq->nr_queued[rw]--; + bio = throtl_pop_queued(sq, &tg_to_put, rw); - throtl_charge_bio(tg, bio); + throtl_charge_iops_bio(tg, bio); /* * If our parent is another tg, we just need to transfer @bio to @@ -919,7 +1031,7 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) } else { bio_set_flag(bio, BIO_BPS_THROTTLED); throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], - &parent_sq->queued[rw]); + parent_sq); BUG_ON(tg->td->nr_queued[rw] <= 0); tg->td->nr_queued[rw]--; } @@ -941,7 +1053,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg) /* Try to dispatch 75% READS and 25% WRITES */ while ((bio = throtl_peek_queued(&sq->queued[READ])) && - tg_may_dispatch(tg, bio, NULL)) { + tg_dispatch_time(tg, bio) == 0) { tg_dispatch_one_bio(tg, READ); nr_reads++; @@ -951,7 +1063,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg) } while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && - tg_may_dispatch(tg, bio, NULL)) { + tg_dispatch_time(tg, bio) == 0) { tg_dispatch_one_bio(tg, WRITE); nr_writes++; @@ -984,7 +1096,7 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) nr_disp += throtl_dispatch_tg(tg); sq = &tg->service_queue; - if (sq->nr_queued[READ] || sq->nr_queued[WRITE]) + if (sq_queued(sq, READ) || sq_queued(sq, WRITE)) tg_update_disptime(tg); else throtl_dequeue_tg(tg); @@ -1037,9 +1149,11 @@ again: dispatched = false; while (true) { + unsigned int __maybe_unused bio_cnt_r = sq_queued(sq, READ); + unsigned int __maybe_unused bio_cnt_w = sq_queued(sq, WRITE); + throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", - sq->nr_queued[READ] + sq->nr_queued[WRITE], - sq->nr_queued[READ], sq->nr_queued[WRITE]); + bio_cnt_r + bio_cnt_w, bio_cnt_r, bio_cnt_w); ret = throtl_select_dispatch(sq); if (ret) { @@ -1061,7 +1175,8 @@ again: if (parent_sq) { /* @parent_sq is another throl_grp, propagate dispatch */ - if (tg->flags & THROTL_TG_WAS_EMPTY) { + if (tg->flags & THROTL_TG_WAS_EMPTY || + tg->flags & THROTL_TG_IOPS_WAS_EMPTY) { tg_update_disptime(tg); if (!throtl_schedule_next_dispatch(parent_sq, false)) { /* window is already open, repeat dispatching */ @@ -1101,7 +1216,7 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work) spin_lock_irq(&q->queue_lock); for (rw = READ; rw <= WRITE; rw++) - while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) + while ((bio = throtl_pop_queued(td_sq, NULL, rw))) bio_list_add(&bio_list_on_stack, bio); spin_unlock_irq(&q->queue_lock); @@ -1606,11 +1721,30 @@ void blk_throtl_cancel_bios(struct gendisk *disk) static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw) { - /* throtl is FIFO - if bios are already queued, should queue */ - if (tg->service_queue.nr_queued[rw]) + struct throtl_service_queue *sq = &tg->service_queue; + + /* + * For a split bio, we need to specifically distinguish whether the + * iops queue is empty. + */ + if (bio_flagged(bio, BIO_BPS_THROTTLED)) + return sq->nr_queued_iops[rw] == 0 && + tg_dispatch_iops_time(tg, bio) == 0; + + /* + * Throtl is FIFO - if bios are already queued, should queue. + * If the bps queue is empty and @bio is within the bps limit, charge + * bps here for direct placement into the iops queue. + */ + if (sq_queued(&tg->service_queue, rw)) { + if (sq->nr_queued_bps[rw] == 0 && + tg_dispatch_bps_time(tg, bio) == 0) + throtl_charge_bps_bio(tg, bio); + return false; + } - return tg_may_dispatch(tg, bio, NULL); + return tg_dispatch_time(tg, bio) == 0; } bool __blk_throtl_bio(struct bio *bio) @@ -1631,7 +1765,7 @@ bool __blk_throtl_bio(struct bio *bio) while (true) { if (tg_within_limit(tg, bio, rw)) { /* within limits, let's charge and dispatch directly */ - throtl_charge_bio(tg, bio); + throtl_charge_iops_bio(tg, bio); /* * We need to trim slice even when bios are not being @@ -1654,7 +1788,8 @@ bool __blk_throtl_bio(struct bio *bio) * control algorithm is adaptive, and extra IO bytes * will be throttled for paying the debt */ - throtl_charge_bio(tg, bio); + throtl_charge_bps_bio(tg, bio); + throtl_charge_iops_bio(tg, bio); } else { /* if above limits, break to queue */ break; @@ -1680,7 +1815,7 @@ bool __blk_throtl_bio(struct bio *bio) tg->bytes_disp[rw], bio->bi_iter.bi_size, tg_bps_limit(tg, rw), tg->io_disp[rw], tg_iops_limit(tg, rw), - sq->nr_queued[READ], sq->nr_queued[WRITE]); + sq_queued(sq, READ), sq_queued(sq, WRITE)); td->nr_queued[rw]++; throtl_add_bio_tg(bio, qn, tg); @@ -1688,11 +1823,13 @@ bool __blk_throtl_bio(struct bio *bio) /* * Update @tg's dispatch time and force schedule dispatch if @tg - * was empty before @bio. The forced scheduling isn't likely to - * cause undue delay as @bio is likely to be dispatched directly if - * its @tg's disptime is not in the future. + * was empty before @bio, or the iops queue is empty and @bio will + * add to. The forced scheduling isn't likely to cause undue + * delay as @bio is likely to be dispatched directly if its @tg's + * disptime is not in the future. */ - if (tg->flags & THROTL_TG_WAS_EMPTY) { + if (tg->flags & THROTL_TG_WAS_EMPTY || + tg->flags & THROTL_TG_IOPS_WAS_EMPTY) { tg_update_disptime(tg); throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); } diff --git a/block/blk-throttle.h b/block/blk-throttle.h index f9f8666891ab..3b27755bfbff 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -29,7 +29,8 @@ */ struct throtl_qnode { struct list_head node; /* service_queue->queued[] */ - struct bio_list bios; /* queued bios */ + struct bio_list bios_bps; /* queued bios for bps limit */ + struct bio_list bios_iops; /* queued bios for iops limit */ struct throtl_grp *tg; /* tg this qnode belongs to */ }; @@ -41,7 +42,8 @@ struct throtl_service_queue { * children throtl_grp's. */ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ - unsigned int nr_queued[2]; /* number of queued bios */ + unsigned int nr_queued_bps[2]; /* number of queued bps bios */ + unsigned int nr_queued_iops[2]; /* number of queued iops bios */ /* * RB tree of active children throtl_grp's, which are sorted by @@ -54,9 +56,14 @@ struct throtl_service_queue { }; enum tg_state_flags { - THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ - THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ - THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */ + THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ + THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ + /* + * The sq's iops queue is empty, and a bio is about to be enqueued + * to the first qnode's bios_iops list. + */ + THROTL_TG_IOPS_WAS_EMPTY = 1 << 2, + THROTL_TG_CANCELING = 1 << 3, /* starts to cancel bio */ }; struct throtl_grp { @@ -102,19 +109,16 @@ struct throtl_grp { /* IOPS limits */ unsigned int iops[2]; - /* Number of bytes dispatched in current slice */ - int64_t bytes_disp[2]; - /* Number of bio's dispatched in current slice */ - int io_disp[2]; - /* - * The following two fields are updated when new configuration is - * submitted while some bios are still throttled, they record how many - * bytes/ios are waited already in previous configuration, and they will - * be used to calculate wait time under new configuration. + * Number of bytes/bio's dispatched in current slice. + * When new configuration is submitted while some bios are still throttled, + * first calculate the carryover: the amount of bytes/IOs already waited + * under the previous configuration. Then, [bytes/io]_disp are represented + * as the negative of the carryover, and they will be used to calculate the + * wait time under the new configuration. */ - long long carryover_bytes[2]; - int carryover_ios[2]; + int64_t bytes_disp[2]; + int io_disp[2]; unsigned long last_check_time; diff --git a/block/blk-wbt.c b/block/blk-wbt.c index f1754d07f7e0..a50d4cd55f41 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -37,7 +37,7 @@ enum wbt_flags { WBT_TRACKED = 1, /* write, tracked for throttling */ WBT_READ = 2, /* read */ - WBT_SWAP = 4, /* write, from swap_writepage() */ + WBT_SWAP = 4, /* write, from swap_writeout() */ WBT_DISCARD = 8, /* discard */ WBT_NR_BITS = 4, /* number of bits */ @@ -704,8 +704,9 @@ void wbt_enable_default(struct gendisk *disk) struct rq_qos *rqos; bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ); - if (q->elevator && - test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags)) + mutex_lock(&disk->rqos_state_mutex); + + if (blk_queue_disable_wbt(q)) enable = false; /* Throttling already enabled? */ @@ -713,8 +714,10 @@ void wbt_enable_default(struct gendisk *disk) if (rqos) { if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT) RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; + mutex_unlock(&disk->rqos_state_mutex); return; } + mutex_unlock(&disk->rqos_state_mutex); /* Queue not registered? Maybe shutting down... */ if (!blk_queue_registered(q)) @@ -774,11 +777,13 @@ void wbt_disable_default(struct gendisk *disk) struct rq_wb *rwb; if (!rqos) return; + mutex_lock(&disk->rqos_state_mutex); rwb = RQWB(rqos); if (rwb->enable_state == WBT_STATE_ON_DEFAULT) { blk_stat_deactivate(rwb->cb); rwb->enable_state = WBT_STATE_OFF_DEFAULT; } + mutex_unlock(&disk->rqos_state_mutex); } EXPORT_SYMBOL_GPL(wbt_disable_default); diff --git a/block/blk.h b/block/blk.h index 594eeba7b949..37ec459fe656 100644 --- a/block/blk.h +++ b/block/blk.h @@ -103,8 +103,7 @@ struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, - struct page *page, unsigned len, unsigned offset, - bool *same_page); + struct page *page, unsigned len, unsigned offset); static inline bool biovec_phys_mergeable(struct request_queue *q, struct bio_vec *vec1, struct bio_vec *vec2) @@ -322,11 +321,9 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, bool blk_insert_flush(struct request *rq); -int elevator_switch(struct request_queue *q, struct elevator_type *new_e); -void elevator_disable(struct request_queue *q); -void elevator_exit(struct request_queue *q); -int elv_register_queue(struct request_queue *q, bool uevent); -void elv_unregister_queue(struct request_queue *q); +void elv_update_nr_hw_queues(struct request_queue *q); +void elevator_set_default(struct request_queue *q); +void elevator_set_none(struct request_queue *q); ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf); @@ -407,6 +404,27 @@ static inline struct bio *__bio_split_to_limits(struct bio *bio, } } +/** + * get_max_segment_size() - maximum number of bytes to add as a single segment + * @lim: Request queue limits. + * @paddr: address of the range to add + * @len: maximum length available to add at @paddr + * + * Returns the maximum number of bytes of the range starting at @paddr that can + * be added to a single segment. + */ +static inline unsigned get_max_segment_size(const struct queue_limits *lim, + phys_addr_t paddr, unsigned int len) +{ + /* + * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 + * after having calculated the minimum. + */ + return min_t(unsigned long, len, + min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr), + (unsigned long)lim->max_segment_size - 1) + 1); +} + int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs); bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, @@ -421,7 +439,6 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi, int blk_dev_init(void); void update_io_ticks(struct block_device *part, unsigned long now, bool end); -unsigned int part_in_flight(struct block_device *part); static inline void req_set_nomerge(struct request_queue *q, struct request *req) { @@ -443,23 +460,6 @@ static inline void ioc_clear_queue(struct request_queue *q) } #endif /* CONFIG_BLK_ICQ */ -struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q); - -static inline bool blk_queue_may_bounce(struct request_queue *q) -{ - return IS_ENABLED(CONFIG_BOUNCE) && - (q->limits.features & BLK_FEAT_BOUNCE_HIGH) && - max_low_pfn >= max_pfn; -} - -static inline struct bio *blk_queue_bounce(struct bio *bio, - struct request_queue *q) -{ - if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio))) - return __blk_queue_bounce(bio, q); - return bio; -} - #ifdef CONFIG_BLK_DEV_ZONED void disk_init_zone_resources(struct gendisk *disk); void disk_free_zone_resources(struct gendisk *disk); diff --git a/block/bounce.c b/block/bounce.c deleted file mode 100644 index 09a9616cf209..000000000000 --- a/block/bounce.c +++ /dev/null @@ -1,267 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* bounce buffer handling for block devices - * - * - Split from highmem.c - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/mm.h> -#include <linux/export.h> -#include <linux/swap.h> -#include <linux/gfp.h> -#include <linux/bio-integrity.h> -#include <linux/pagemap.h> -#include <linux/mempool.h> -#include <linux/blkdev.h> -#include <linux/backing-dev.h> -#include <linux/init.h> -#include <linux/hash.h> -#include <linux/highmem.h> -#include <linux/printk.h> -#include <asm/tlbflush.h> - -#include <trace/events/block.h> -#include "blk.h" -#include "blk-cgroup.h" - -#define POOL_SIZE 64 -#define ISA_POOL_SIZE 16 - -static struct bio_set bounce_bio_set, bounce_bio_split; -static mempool_t page_pool; - -static void init_bounce_bioset(void) -{ - static bool bounce_bs_setup; - int ret; - - if (bounce_bs_setup) - return; - - ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); - BUG_ON(ret); - - ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); - BUG_ON(ret); - bounce_bs_setup = true; -} - -static __init int init_emergency_pool(void) -{ - int ret; - -#ifndef CONFIG_MEMORY_HOTPLUG - if (max_pfn <= max_low_pfn) - return 0; -#endif - - ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0); - BUG_ON(ret); - pr_info("pool size: %d pages\n", POOL_SIZE); - - init_bounce_bioset(); - return 0; -} - -__initcall(init_emergency_pool); - -/* - * Simple bounce buffer support for highmem pages. Depending on the - * queue gfp mask set, *to may or may not be a highmem page. kmap it - * always, it will do the Right Thing - */ -static void copy_to_high_bio_irq(struct bio *to, struct bio *from) -{ - struct bio_vec tovec, fromvec; - struct bvec_iter iter; - /* - * The bio of @from is created by bounce, so we can iterate - * its bvec from start to end, but the @from->bi_iter can't be - * trusted because it might be changed by splitting. - */ - struct bvec_iter from_iter = BVEC_ITER_ALL_INIT; - - bio_for_each_segment(tovec, to, iter) { - fromvec = bio_iter_iovec(from, from_iter); - if (tovec.bv_page != fromvec.bv_page) { - /* - * fromvec->bv_offset and fromvec->bv_len might have - * been modified by the block layer, so use the original - * copy, bounce_copy_vec already uses tovec->bv_len - */ - memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) + - tovec.bv_offset); - } - bio_advance_iter(from, &from_iter, tovec.bv_len); - } -} - -static void bounce_end_io(struct bio *bio) -{ - struct bio *bio_orig = bio->bi_private; - struct bio_vec *bvec, orig_vec; - struct bvec_iter orig_iter = bio_orig->bi_iter; - struct bvec_iter_all iter_all; - - /* - * free up bounce indirect pages used - */ - bio_for_each_segment_all(bvec, bio, iter_all) { - orig_vec = bio_iter_iovec(bio_orig, orig_iter); - if (bvec->bv_page != orig_vec.bv_page) { - dec_zone_page_state(bvec->bv_page, NR_BOUNCE); - mempool_free(bvec->bv_page, &page_pool); - } - bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len); - } - - bio_orig->bi_status = bio->bi_status; - bio_endio(bio_orig); - bio_put(bio); -} - -static void bounce_end_io_write(struct bio *bio) -{ - bounce_end_io(bio); -} - -static void bounce_end_io_read(struct bio *bio) -{ - struct bio *bio_orig = bio->bi_private; - - if (!bio->bi_status) - copy_to_high_bio_irq(bio_orig, bio); - - bounce_end_io(bio); -} - -static struct bio *bounce_clone_bio(struct bio *bio_src) -{ - struct bvec_iter iter; - struct bio_vec bv; - struct bio *bio; - - /* - * Pre immutable biovecs, __bio_clone() used to just do a memcpy from - * bio_src->bi_io_vec to bio->bi_io_vec. - * - * We can't do that anymore, because: - * - * - The point of cloning the biovec is to produce a bio with a biovec - * the caller can modify: bi_idx and bi_bvec_done should be 0. - * - * - The original bio could've had more than BIO_MAX_VECS biovecs; if - * we tried to clone the whole thing bio_alloc_bioset() would fail. - * But the clone should succeed as long as the number of biovecs we - * actually need to allocate is fewer than BIO_MAX_VECS. - * - * - Lastly, bi_vcnt should not be looked at or relied upon by code - * that does not own the bio - reason being drivers don't use it for - * iterating over the biovec anymore, so expecting it to be kept up - * to date (i.e. for clones that share the parent biovec) is just - * asking for trouble and would force extra work. - */ - bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src), - bio_src->bi_opf, GFP_NOIO, &bounce_bio_set); - if (bio_flagged(bio_src, BIO_REMAPPED)) - bio_set_flag(bio, BIO_REMAPPED); - bio->bi_ioprio = bio_src->bi_ioprio; - bio->bi_write_hint = bio_src->bi_write_hint; - bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; - bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; - - switch (bio_op(bio)) { - case REQ_OP_DISCARD: - case REQ_OP_SECURE_ERASE: - case REQ_OP_WRITE_ZEROES: - break; - default: - bio_for_each_segment(bv, bio_src, iter) - bio->bi_io_vec[bio->bi_vcnt++] = bv; - break; - } - - if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0) - goto err_put; - - if (bio_integrity(bio_src) && - bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0) - goto err_put; - - bio_clone_blkg_association(bio, bio_src); - - return bio; - -err_put: - bio_put(bio); - return NULL; -} - -struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q) -{ - struct bio *bio; - int rw = bio_data_dir(bio_orig); - struct bio_vec *to, from; - struct bvec_iter iter; - unsigned i = 0, bytes = 0; - bool bounce = false; - int sectors; - - bio_for_each_segment(from, bio_orig, iter) { - if (i++ < BIO_MAX_VECS) - bytes += from.bv_len; - if (PageHighMem(from.bv_page)) - bounce = true; - } - if (!bounce) - return bio_orig; - - /* - * Individual bvecs might not be logical block aligned. Round down - * the split size so that each bio is properly block size aligned, - * even if we do not use the full hardware limits. - */ - sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >> - SECTOR_SHIFT; - if (sectors < bio_sectors(bio_orig)) { - bio = bio_split(bio_orig, sectors, GFP_NOIO, &bounce_bio_split); - bio_chain(bio, bio_orig); - submit_bio_noacct(bio_orig); - bio_orig = bio; - } - bio = bounce_clone_bio(bio_orig); - - /* - * Bvec table can't be updated by bio_for_each_segment_all(), - * so retrieve bvec from the table directly. This way is safe - * because the 'bio' is single-page bvec. - */ - for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) { - struct page *bounce_page; - - if (!PageHighMem(to->bv_page)) - continue; - - bounce_page = mempool_alloc(&page_pool, GFP_NOIO); - inc_zone_page_state(bounce_page, NR_BOUNCE); - - if (rw == WRITE) { - flush_dcache_page(to->bv_page); - memcpy_from_bvec(page_address(bounce_page), to); - } - to->bv_page = bounce_page; - } - - trace_block_bio_bounce(bio_orig); - - bio->bi_flags |= (1 << BIO_BOUNCED); - - if (rw == READ) - bio->bi_end_io = bounce_end_io_read; - else - bio->bi_end_io = bounce_end_io_write; - - bio->bi_private = bio_orig; - return bio; -} diff --git a/block/elevator.c b/block/elevator.c index b4d08026b02c..ab22542e6cf0 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -45,6 +45,17 @@ #include "blk-wbt.h" #include "blk-cgroup.h" +/* Holding context data for changing elevator */ +struct elv_change_ctx { + const char *name; + bool no_uevent; + + /* for unregistering old elevator */ + struct elevator_queue *old; + /* for registering new elevator */ + struct elevator_queue *new; +}; + static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); @@ -148,18 +159,18 @@ static void elevator_release(struct kobject *kobj) kfree(e); } -void elevator_exit(struct request_queue *q) +static void elevator_exit(struct request_queue *q) { struct elevator_queue *e = q->elevator; + lockdep_assert_held(&q->elevator_lock); + ioc_clear_queue(q); blk_mq_sched_free_rqs(q); mutex_lock(&e->sysfs_lock); blk_mq_exit_sched(q, e); mutex_unlock(&e->sysfs_lock); - - kobject_put(&e->kobj); } static inline void __elv_rqhash_del(struct request *rq) @@ -412,14 +423,15 @@ elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { const struct elv_fs_entry *entry = to_elv(attr); struct elevator_queue *e; - ssize_t error; + ssize_t error = -ENODEV; if (!entry->show) return -EIO; e = container_of(kobj, struct elevator_queue, kobj); mutex_lock(&e->sysfs_lock); - error = e->type ? entry->show(e, page) : -ENOENT; + if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags)) + error = entry->show(e, page); mutex_unlock(&e->sysfs_lock); return error; } @@ -430,14 +442,15 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr, { const struct elv_fs_entry *entry = to_elv(attr); struct elevator_queue *e; - ssize_t error; + ssize_t error = -ENODEV; if (!entry->store) return -EIO; e = container_of(kobj, struct elevator_queue, kobj); mutex_lock(&e->sysfs_lock); - error = e->type ? entry->store(e, page, length) : -ENOENT; + if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags)) + error = entry->store(e, page, length); mutex_unlock(&e->sysfs_lock); return error; } @@ -452,13 +465,12 @@ static const struct kobj_type elv_ktype = { .release = elevator_release, }; -int elv_register_queue(struct request_queue *q, bool uevent) +static int elv_register_queue(struct request_queue *q, + struct elevator_queue *e, + bool uevent) { - struct elevator_queue *e = q->elevator; int error; - lockdep_assert_held(&q->elevator_lock); - error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched"); if (!error) { const struct elv_fs_entry *attr = e->type->elevator_attrs; @@ -472,20 +484,25 @@ int elv_register_queue(struct request_queue *q, bool uevent) if (uevent) kobject_uevent(&e->kobj, KOBJ_ADD); + /* + * Sched is initialized, it is ready to export it via + * debugfs + */ + blk_mq_sched_reg_debugfs(q); set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags); } return error; } -void elv_unregister_queue(struct request_queue *q) +static void elv_unregister_queue(struct request_queue *q, + struct elevator_queue *e) { - struct elevator_queue *e = q->elevator; - - lockdep_assert_held(&q->elevator_lock); - if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) { kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); + + /* unexport via debugfs before exiting sched */ + blk_mq_sched_unreg_debugfs(q); } } @@ -548,159 +565,194 @@ void elv_unregister(struct elevator_type *e) EXPORT_SYMBOL_GPL(elv_unregister); /* - * For single queue devices, default to using mq-deadline. If we have multiple - * queues or mq-deadline is not available, default to "none". - */ -static struct elevator_type *elevator_get_default(struct request_queue *q) -{ - if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) - return NULL; - - if (q->nr_hw_queues != 1 && - !blk_mq_is_shared_tags(q->tag_set->flags)) - return NULL; - - return elevator_find_get("mq-deadline"); -} - -/* - * Use the default elevator settings. If the chosen elevator initialization - * fails, fall back to the "none" elevator (no elevator). - */ -void elevator_init_mq(struct request_queue *q) -{ - struct elevator_type *e; - unsigned int memflags; - int err; - - WARN_ON_ONCE(blk_queue_registered(q)); - - if (unlikely(q->elevator)) - return; - - e = elevator_get_default(q); - if (!e) - return; - - /* - * We are called before adding disk, when there isn't any FS I/O, - * so freezing queue plus canceling dispatch work is enough to - * drain any dispatch activities originated from passthrough - * requests, then no need to quiesce queue which may add long boot - * latency, especially when lots of disks are involved. - * - * Disk isn't added yet, so verifying queue lock only manually. - */ - memflags = blk_mq_freeze_queue(q); - - blk_mq_cancel_work_sync(q); - - err = blk_mq_init_sched(q, e); - - blk_mq_unfreeze_queue(q, memflags); - - if (err) { - pr_warn("\"%s\" elevator initialization failed, " - "falling back to \"none\"\n", e->elevator_name); - } - - elevator_put(e); -} - -/* * Switch to new_e io scheduler. * * If switching fails, we are most likely running out of memory and not able * to restore the old io scheduler, so leaving the io scheduler being none. */ -int elevator_switch(struct request_queue *q, struct elevator_type *new_e) +static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx) { - unsigned int memflags; - int ret; + struct elevator_type *new_e = NULL; + int ret = 0; + WARN_ON_ONCE(q->mq_freeze_depth == 0); lockdep_assert_held(&q->elevator_lock); - memflags = blk_mq_freeze_queue(q); + if (strncmp(ctx->name, "none", 4)) { + new_e = elevator_find_get(ctx->name); + if (!new_e) + return -EINVAL; + } + blk_mq_quiesce_queue(q); if (q->elevator) { - elv_unregister_queue(q); + ctx->old = q->elevator; elevator_exit(q); } - ret = blk_mq_init_sched(q, new_e); - if (ret) - goto out_unfreeze; - - ret = elv_register_queue(q, true); - if (ret) { - elevator_exit(q); - goto out_unfreeze; + if (new_e) { + ret = blk_mq_init_sched(q, new_e); + if (ret) + goto out_unfreeze; + ctx->new = q->elevator; + } else { + blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); + q->elevator = NULL; + q->nr_requests = q->tag_set->queue_depth; } - blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); + blk_add_trace_msg(q, "elv switch: %s", ctx->name); out_unfreeze: blk_mq_unquiesce_queue(q); - blk_mq_unfreeze_queue(q, memflags); if (ret) { pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n", new_e->elevator_name); } + if (new_e) + elevator_put(new_e); return ret; } -void elevator_disable(struct request_queue *q) +static void elv_exit_and_release(struct request_queue *q) { - unsigned int memflags; - - lockdep_assert_held(&q->elevator_lock); + struct elevator_queue *e; + unsigned memflags; memflags = blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); - - elv_unregister_queue(q); + mutex_lock(&q->elevator_lock); + e = q->elevator; elevator_exit(q); - blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); - q->elevator = NULL; - q->nr_requests = q->tag_set->queue_depth; - blk_add_trace_msg(q, "elv switch: none"); - - blk_mq_unquiesce_queue(q); + mutex_unlock(&q->elevator_lock); blk_mq_unfreeze_queue(q, memflags); + if (e) + kobject_put(&e->kobj); +} + +static int elevator_change_done(struct request_queue *q, + struct elv_change_ctx *ctx) +{ + int ret = 0; + + if (ctx->old) { + bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, + &ctx->old->flags); + + elv_unregister_queue(q, ctx->old); + kobject_put(&ctx->old->kobj); + if (enable_wbt) + wbt_enable_default(q->disk); + } + if (ctx->new) { + ret = elv_register_queue(q, ctx->new, !ctx->no_uevent); + if (ret) + elv_exit_and_release(q); + } + return ret; } /* * Switch this queue to the given IO scheduler. */ -static int elevator_change(struct request_queue *q, const char *elevator_name) +static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) { - struct elevator_type *e; - int ret; + unsigned int memflags; + int ret = 0; - /* Make sure queue is not in the middle of being removed */ - if (!blk_queue_registered(q)) - return -ENOENT; + lockdep_assert_held(&q->tag_set->update_nr_hwq_lock); + + memflags = blk_mq_freeze_queue(q); + /* + * May be called before adding disk, when there isn't any FS I/O, + * so freezing queue plus canceling dispatch work is enough to + * drain any dispatch activities originated from passthrough + * requests, then no need to quiesce queue which may add long boot + * latency, especially when lots of disks are involved. + * + * Disk isn't added yet, so verifying queue lock only manually. + */ + blk_mq_cancel_work_sync(q); + mutex_lock(&q->elevator_lock); + if (!(q->elevator && elevator_match(q->elevator->type, ctx->name))) + ret = elevator_switch(q, ctx); + mutex_unlock(&q->elevator_lock); + blk_mq_unfreeze_queue(q, memflags); + if (!ret) + ret = elevator_change_done(q, ctx); + + return ret; +} + +/* + * The I/O scheduler depends on the number of hardware queues, this forces a + * reattachment when nr_hw_queues changes. + */ +void elv_update_nr_hw_queues(struct request_queue *q) +{ + struct elv_change_ctx ctx = {}; + int ret = -ENODEV; + + WARN_ON_ONCE(q->mq_freeze_depth == 0); - if (!strncmp(elevator_name, "none", 4)) { - if (q->elevator) - elevator_disable(q); - return 0; + mutex_lock(&q->elevator_lock); + if (q->elevator && !blk_queue_dying(q) && blk_queue_registered(q)) { + ctx.name = q->elevator->type->elevator_name; + + /* force to reattach elevator after nr_hw_queue is updated */ + ret = elevator_switch(q, &ctx); } + mutex_unlock(&q->elevator_lock); + blk_mq_unfreeze_queue_nomemrestore(q); + if (!ret) + WARN_ON_ONCE(elevator_change_done(q, &ctx)); +} - if (q->elevator && elevator_match(q->elevator->type, elevator_name)) - return 0; +/* + * Use the default elevator settings. If the chosen elevator initialization + * fails, fall back to the "none" elevator (no elevator). + */ +void elevator_set_default(struct request_queue *q) +{ + struct elv_change_ctx ctx = { + .name = "mq-deadline", + .no_uevent = true, + }; + int err = 0; - e = elevator_find_get(elevator_name); - if (!e) - return -EINVAL; - ret = elevator_switch(q, e); - elevator_put(e); - return ret; + /* now we allow to switch elevator */ + blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q); + + if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) + return; + + /* + * For single queue devices, default to using mq-deadline. If we + * have multiple queues or mq-deadline is not available, default + * to "none". + */ + if (elevator_find_get(ctx.name) && (q->nr_hw_queues == 1 || + blk_mq_is_shared_tags(q->tag_set->flags))) + err = elevator_change(q, &ctx); + if (err < 0) + pr_warn("\"%s\" elevator initialization, failed %d, " + "falling back to \"none\"\n", ctx.name, err); } -static void elv_iosched_load_module(char *elevator_name) +void elevator_set_none(struct request_queue *q) +{ + struct elv_change_ctx ctx = { + .name = "none", + }; + int err; + + err = elevator_change(q, &ctx); + if (err < 0) + pr_warn("%s: set none elevator failed %d\n", __func__, err); +} + +static void elv_iosched_load_module(const char *elevator_name) { struct elevator_type *found; @@ -716,10 +768,14 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, size_t count) { char elevator_name[ELV_NAME_MAX]; - char *name; + struct elv_change_ctx ctx = {}; int ret; - unsigned int memflags; struct request_queue *q = disk->queue; + struct blk_mq_tag_set *set = q->tag_set; + + /* Make sure queue is not in the middle of being removed */ + if (!blk_queue_registered(q)) + return -ENOENT; /* * If the attribute needs to load a module, do it before freezing the @@ -727,24 +783,25 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, * queue is the one for the device storing the module file. */ strscpy(elevator_name, buf, sizeof(elevator_name)); - name = strstrip(elevator_name); + ctx.name = strstrip(elevator_name); - elv_iosched_load_module(name); + elv_iosched_load_module(ctx.name); - memflags = blk_mq_freeze_queue(q); - mutex_lock(&q->elevator_lock); - ret = elevator_change(q, name); - if (!ret) - ret = count; - mutex_unlock(&q->elevator_lock); - blk_mq_unfreeze_queue(q, memflags); + down_read(&set->update_nr_hwq_lock); + if (!blk_queue_no_elv_switch(q)) { + ret = elevator_change(q, &ctx); + if (!ret) + ret = count; + } else { + ret = -ENOENT; + } + up_read(&set->update_nr_hwq_lock); return ret; } ssize_t elv_iosched_show(struct gendisk *disk, char *name) { struct request_queue *q = disk->queue; - struct elevator_queue *eq = q->elevator; struct elevator_type *cur = NULL, *e; int len = 0; @@ -753,7 +810,7 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name) len += sprintf(name+len, "[none] "); } else { len += sprintf(name+len, "none "); - cur = eq->type; + cur = q->elevator->type; } spin_lock(&elv_list_lock); diff --git a/block/elevator.h b/block/elevator.h index e4e44dfac503..a07ce773a38f 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -121,7 +121,8 @@ struct elevator_queue }; #define ELEVATOR_FLAG_REGISTERED 0 -#define ELEVATOR_FLAG_DISABLE_WBT 1 +#define ELEVATOR_FLAG_DYING 1 +#define ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT 2 /* * block elevator interface @@ -182,4 +183,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) +void blk_mq_sched_reg_debugfs(struct request_queue *q); +void blk_mq_sched_unreg_debugfs(struct request_queue *q); + #endif /* _ELEVATOR_H */ diff --git a/block/fops.c b/block/fops.c index 82b672d15ea4..1309861d4c2c 100644 --- a/block/fops.c +++ b/block/fops.c @@ -73,6 +73,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, } bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; + bio.bi_write_stream = iocb->ki_write_stream; bio.bi_ioprio = iocb->ki_ioprio; if (iocb->ki_flags & IOCB_ATOMIC) bio.bi_opf |= REQ_ATOMIC; @@ -206,6 +207,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, for (;;) { bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; + bio->bi_write_stream = iocb->ki_write_stream; bio->bi_private = dio; bio->bi_end_io = blkdev_bio_end_io; bio->bi_ioprio = iocb->ki_ioprio; @@ -333,6 +335,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, dio->iocb = iocb; bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; + bio->bi_write_stream = iocb->ki_write_stream; bio->bi_end_io = blkdev_bio_end_io_async; bio->bi_ioprio = iocb->ki_ioprio; @@ -398,6 +401,26 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) if (blkdev_dio_invalid(bdev, iocb, iter)) return -EINVAL; + if (iov_iter_rw(iter) == WRITE) { + u16 max_write_streams = bdev_max_write_streams(bdev); + + if (iocb->ki_write_stream) { + if (iocb->ki_write_stream > max_write_streams) + return -EINVAL; + } else if (max_write_streams) { + enum rw_hint write_hint = + file_inode(iocb->ki_filp)->i_write_hint; + + /* + * Just use the write hint as write stream for block + * device writes. This assumes no file system is + * mounted that would use the streams differently. + */ + if (write_hint <= max_write_streams) + iocb->ki_write_stream = write_hint; + } + } + nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); if (likely(nr_pages <= BIO_MAX_VECS)) { if (is_sync_kiocb(iocb)) @@ -451,12 +474,13 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock, static int blkdev_writepages(struct address_space *mapping, struct writeback_control *wbc) { + struct folio *folio = NULL; struct blk_plug plug; int err; blk_start_plug(&plug); - err = write_cache_pages(mapping, wbc, block_write_full_folio, - blkdev_get_block); + while ((folio = writeback_iter(mapping, wbc, folio, &err))) + err = block_write_full_folio(folio, wbc, blkdev_get_block); blk_finish_plug(&plug); return err; diff --git a/block/genhd.c b/block/genhd.c index c2bd86cd09de..8171a6bc3210 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -125,37 +125,46 @@ static void part_stat_read_all(struct block_device *part, } } -unsigned int part_in_flight(struct block_device *part) +static void bdev_count_inflight_rw(struct block_device *part, + unsigned int inflight[2], bool mq_driver) { - unsigned int inflight = 0; int cpu; - for_each_possible_cpu(cpu) { - inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + - part_stat_local_read_cpu(part, in_flight[1], cpu); + if (mq_driver) { + blk_mq_in_driver_rw(part, inflight); + } else { + for_each_possible_cpu(cpu) { + inflight[READ] += part_stat_local_read_cpu( + part, in_flight[READ], cpu); + inflight[WRITE] += part_stat_local_read_cpu( + part, in_flight[WRITE], cpu); + } } - if ((int)inflight < 0) - inflight = 0; - return inflight; + if (WARN_ON_ONCE((int)inflight[READ] < 0)) + inflight[READ] = 0; + if (WARN_ON_ONCE((int)inflight[WRITE] < 0)) + inflight[WRITE] = 0; } -static void part_in_flight_rw(struct block_device *part, - unsigned int inflight[2]) +/** + * bdev_count_inflight - get the number of inflight IOs for a block device. + * + * @part: the block device. + * + * Inflight here means started IO accounting, from bdev_start_io_acct() for + * bio-based block device, and from blk_account_io_start() for rq-based block + * device. + */ +unsigned int bdev_count_inflight(struct block_device *part) { - int cpu; + unsigned int inflight[2] = {0}; - inflight[0] = 0; - inflight[1] = 0; - for_each_possible_cpu(cpu) { - inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); - inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu); - } - if ((int)inflight[0] < 0) - inflight[0] = 0; - if ((int)inflight[1] < 0) - inflight[1] = 0; + bdev_count_inflight_rw(part, inflight, false); + + return inflight[READ] + inflight[WRITE]; } +EXPORT_SYMBOL_GPL(bdev_count_inflight); /* * Can be deleted altogether. Later. @@ -389,19 +398,35 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode) return ret; } -/** - * add_disk_fwnode - add disk information to kernel list with fwnode - * @parent: parent device for the disk - * @disk: per-device partitioning information - * @groups: Additional per-device sysfs groups - * @fwnode: attached disk fwnode - * - * This function registers the partitioning information in @disk - * with the kernel. Also attach a fwnode to the disk device. - */ -int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, - const struct attribute_group **groups, - struct fwnode_handle *fwnode) +static void add_disk_final(struct gendisk *disk) +{ + struct device *ddev = disk_to_dev(disk); + + if (!(disk->flags & GENHD_FL_HIDDEN)) { + /* Make sure the first partition scan will be proceed */ + if (get_capacity(disk) && disk_has_partscan(disk)) + set_bit(GD_NEED_PART_SCAN, &disk->state); + + bdev_add(disk->part0, ddev->devt); + if (get_capacity(disk)) + disk_scan_partitions(disk, BLK_OPEN_READ); + + /* + * Announce the disk and partitions after all partitions are + * created. (for hidden disks uevents remain suppressed forever) + */ + dev_set_uevent_suppress(ddev, 0); + disk_uevent(disk, KOBJ_ADD); + } + + blk_apply_bdi_limits(disk->bdi, &disk->queue->limits); + disk_add_events(disk); + set_bit(GD_ADDED, &disk->state); +} + +static int __add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups, + struct fwnode_handle *fwnode) { struct device *ddev = disk_to_dev(disk); @@ -416,12 +441,6 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, */ if (disk->fops->submit_bio || disk->fops->poll_bio) return -EINVAL; - - /* - * Initialize the I/O scheduler code and pick a default one if - * needed. - */ - elevator_init_mq(disk->queue); } else { if (!disk->fops->submit_bio) return -EINVAL; @@ -438,7 +457,7 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, ret = -EINVAL; if (disk->major) { if (WARN_ON(!disk->minors)) - goto out_exit_elevator; + goto out; if (disk->minors > DISK_MAX_PARTS) { pr_err("block: can't allocate more than %d partitions\n", @@ -448,14 +467,14 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, if (disk->first_minor > MINORMASK || disk->minors > MINORMASK + 1 || disk->first_minor + disk->minors > MINORMASK + 1) - goto out_exit_elevator; + goto out; } else { if (WARN_ON(disk->minors)) - goto out_exit_elevator; + goto out; ret = blk_alloc_ext_minor(); if (ret < 0) - goto out_exit_elevator; + goto out; disk->major = BLOCK_EXT_MAJOR; disk->first_minor = ret; } @@ -516,21 +535,6 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, &disk->bdi->dev->kobj, "bdi"); if (ret) goto out_unregister_bdi; - - /* Make sure the first partition scan will be proceed */ - if (get_capacity(disk) && disk_has_partscan(disk)) - set_bit(GD_NEED_PART_SCAN, &disk->state); - - bdev_add(disk->part0, ddev->devt); - if (get_capacity(disk)) - disk_scan_partitions(disk, BLK_OPEN_READ); - - /* - * Announce the disk and partitions after all partitions are - * created. (for hidden disks uevents remain suppressed forever) - */ - dev_set_uevent_suppress(ddev, 0); - disk_uevent(disk, KOBJ_ADD); } else { /* * Even if the block_device for a hidden gendisk is not @@ -539,10 +543,6 @@ int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, */ disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor); } - - blk_apply_bdi_limits(disk->bdi, &disk->queue->limits); - disk_add_events(disk); - set_bit(GD_ADDED, &disk->state); return 0; out_unregister_bdi: @@ -564,12 +564,46 @@ out_device_del: out_free_ext_minor: if (disk->major == BLOCK_EXT_MAJOR) blk_free_ext_minor(disk->first_minor); -out_exit_elevator: - if (disk->queue->elevator) { - mutex_lock(&disk->queue->elevator_lock); - elevator_exit(disk->queue); - mutex_unlock(&disk->queue->elevator_lock); +out: + return ret; +} + +/** + * add_disk_fwnode - add disk information to kernel list with fwnode + * @parent: parent device for the disk + * @disk: per-device partitioning information + * @groups: Additional per-device sysfs groups + * @fwnode: attached disk fwnode + * + * This function registers the partitioning information in @disk + * with the kernel. Also attach a fwnode to the disk device. + */ +int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups, + struct fwnode_handle *fwnode) +{ + struct blk_mq_tag_set *set; + unsigned int memflags; + int ret; + + if (queue_is_mq(disk->queue)) { + set = disk->queue->tag_set; + memflags = memalloc_noio_save(); + down_read(&set->update_nr_hwq_lock); + ret = __add_disk(parent, disk, groups, fwnode); + up_read(&set->update_nr_hwq_lock); + memalloc_noio_restore(memflags); + } else { + ret = __add_disk(parent, disk, groups, fwnode); } + + /* + * add_disk_final() needn't to read `nr_hw_queues`, so move it out + * of read lock `set->update_nr_hwq_lock` for avoiding unnecessary + * lock dependency on `disk->open_mutex` from scanning partition. + */ + if (!ret) + add_disk_final(disk); return ret; } EXPORT_SYMBOL_GPL(add_disk_fwnode); @@ -652,26 +686,7 @@ void blk_mark_disk_dead(struct gendisk *disk) } EXPORT_SYMBOL_GPL(blk_mark_disk_dead); -/** - * del_gendisk - remove the gendisk - * @disk: the struct gendisk to remove - * - * Removes the gendisk and all its associated resources. This deletes the - * partitions associated with the gendisk, and unregisters the associated - * request_queue. - * - * This is the counter to the respective __device_add_disk() call. - * - * The final removal of the struct gendisk happens when its refcount reaches 0 - * with put_disk(), which should be called after del_gendisk(), if - * __device_add_disk() was used. - * - * Drivers exist which depend on the release of the gendisk to be synchronous, - * it should not be deferred. - * - * Context: can sleep - */ -void del_gendisk(struct gendisk *disk) +static void __del_gendisk(struct gendisk *disk) { struct request_queue *q = disk->queue; struct block_device *part; @@ -743,14 +758,7 @@ void del_gendisk(struct gendisk *disk) if (queue_is_mq(q)) blk_mq_cancel_work_sync(q); - blk_mq_quiesce_queue(q); - if (q->elevator) { - mutex_lock(&q->elevator_lock); - elevator_exit(q); - mutex_unlock(&q->elevator_lock); - } rq_qos_exit(q); - blk_mq_unquiesce_queue(q); /* * If the disk does not own the queue, allow using passthrough requests @@ -764,6 +772,55 @@ void del_gendisk(struct gendisk *disk) if (start_drain) blk_unfreeze_release_lock(q); } + +static void disable_elv_switch(struct request_queue *q) +{ + struct blk_mq_tag_set *set = q->tag_set; + WARN_ON_ONCE(!queue_is_mq(q)); + + down_write(&set->update_nr_hwq_lock); + blk_queue_flag_set(QUEUE_FLAG_NO_ELV_SWITCH, q); + up_write(&set->update_nr_hwq_lock); +} + +/** + * del_gendisk - remove the gendisk + * @disk: the struct gendisk to remove + * + * Removes the gendisk and all its associated resources. This deletes the + * partitions associated with the gendisk, and unregisters the associated + * request_queue. + * + * This is the counter to the respective __device_add_disk() call. + * + * The final removal of the struct gendisk happens when its refcount reaches 0 + * with put_disk(), which should be called after del_gendisk(), if + * __device_add_disk() was used. + * + * Drivers exist which depend on the release of the gendisk to be synchronous, + * it should not be deferred. + * + * Context: can sleep + */ +void del_gendisk(struct gendisk *disk) +{ + struct blk_mq_tag_set *set; + unsigned int memflags; + + if (!queue_is_mq(disk->queue)) { + __del_gendisk(disk); + } else { + set = disk->queue->tag_set; + + disable_elv_switch(disk->queue); + + memflags = memalloc_noio_save(); + down_read(&set->update_nr_hwq_lock); + __del_gendisk(disk); + up_read(&set->update_nr_hwq_lock); + memalloc_noio_restore(memflags); + } +} EXPORT_SYMBOL(del_gendisk); /** @@ -1005,7 +1062,7 @@ ssize_t part_stat_show(struct device *dev, struct disk_stats stat; unsigned int inflight; - inflight = part_in_flight(bdev); + inflight = bdev_count_inflight(bdev); if (inflight) { part_stat_lock(); update_io_ticks(bdev, jiffies, true); @@ -1042,19 +1099,21 @@ ssize_t part_stat_show(struct device *dev, (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC)); } +/* + * Show the number of IOs issued to driver. + * For bio-based device, started from bdev_start_io_acct(); + * For rq-based device, started from blk_mq_start_request(); + */ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf) { struct block_device *bdev = dev_to_bdev(dev); struct request_queue *q = bdev_get_queue(bdev); - unsigned int inflight[2]; + unsigned int inflight[2] = {0}; - if (queue_is_mq(q)) - blk_mq_in_flight_rw(q, bdev, inflight); - else - part_in_flight_rw(bdev, inflight); + bdev_count_inflight_rw(bdev, inflight, queue_is_mq(q)); - return sysfs_emit(buf, "%8u %8u\n", inflight[0], inflight[1]); + return sysfs_emit(buf, "%8u %8u\n", inflight[READ], inflight[WRITE]); } static ssize_t disk_capability_show(struct device *dev, @@ -1307,7 +1366,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) if (bdev_is_partition(hd) && !bdev_nr_sectors(hd)) continue; - inflight = part_in_flight(hd); + inflight = bdev_count_inflight(hd); if (inflight) { part_stat_lock(); update_io_ticks(hd, jiffies, true); @@ -1422,6 +1481,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED INIT_LIST_HEAD(&disk->slave_bdevs); #endif + mutex_init(&disk->rqos_state_mutex); return disk; out_erase_part0: diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 754f6b7415cd..2edf1cac06d5 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -715,7 +715,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } /* - * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list(). + * Called from blk_mq_insert_request() or blk_mq_dispatch_list(). */ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *list, diff --git a/crypto/842.c b/crypto/842.c index 5fb37a925989..8c257c40e2b9 100644 --- a/crypto/842.c +++ b/crypto/842.c @@ -23,10 +23,6 @@ #include <linux/module.h> #include <linux/sw842.h> -struct crypto842_ctx { - void *wmem; /* working memory for compress */ -}; - static void *crypto842_alloc_ctx(void) { void *ctx; @@ -74,7 +70,7 @@ static int __init crypto842_mod_init(void) { return crypto_register_scomp(&scomp); } -subsys_initcall(crypto842_mod_init); +module_init(crypto842_mod_init); static void __exit crypto842_mod_exit(void) { diff --git a/crypto/Kconfig b/crypto/Kconfig index dbf97c4e7c59..e9fee7818e27 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -25,7 +25,7 @@ menu "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" - depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS + depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && CRYPTO_SELFTESTS depends on (MODULE_SIG || !MODULES) help This option enables the fips boot option which is @@ -143,16 +143,17 @@ config CRYPTO_ACOMP config CRYPTO_HKDF tristate - select CRYPTO_SHA256 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS - select CRYPTO_SHA512 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS + select CRYPTO_SHA256 if CRYPTO_SELFTESTS + select CRYPTO_SHA512 if CRYPTO_SELFTESTS select CRYPTO_HASH2 config CRYPTO_MANAGER - tristate "Cryptographic algorithm manager" + tristate + default CRYPTO_ALGAPI if CRYPTO_SELFTESTS select CRYPTO_MANAGER2 help - Create default cryptographic template instantiations such as - cbc(aes). + This provides the support for instantiating templates such as + cbc(aes), and the support for the crypto self-tests. config CRYPTO_MANAGER2 def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y) @@ -173,35 +174,27 @@ config CRYPTO_USER Userspace configuration for cryptographic instantiations such as cbc(aes). -config CRYPTO_MANAGER_DISABLE_TESTS - bool "Disable run-time self tests" - default y +config CRYPTO_SELFTESTS + bool "Enable cryptographic self-tests" + depends on DEBUG_KERNEL help - Disable run-time self tests that normally take place at - algorithm registration. + Enable the cryptographic self-tests. -config CRYPTO_MANAGER_EXTRA_TESTS - bool "Enable extra run-time crypto self tests" - depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER - help - Enable extra run-time self tests of registered crypto algorithms, - including randomized fuzz tests. + The cryptographic self-tests run at boot time, or at algorithm + registration time if algorithms are dynamically loaded later. - This is intended for developer use only, as these tests take much - longer to run than the normal self tests. + This is primarily intended for developer use. It should not be + enabled in production kernels, unless you are trying to use these + tests to fulfill a FIPS testing requirement. config CRYPTO_NULL tristate "Null algorithms" - select CRYPTO_NULL2 + select CRYPTO_ALGAPI + select CRYPTO_SKCIPHER + select CRYPTO_HASH help These are 'Null' algorithms, used by IPsec, which do nothing. -config CRYPTO_NULL2 - tristate - select CRYPTO_ALGAPI2 - select CRYPTO_SKCIPHER2 - select CRYPTO_HASH2 - config CRYPTO_PCRYPT tristate "Parallel crypto engine" depends on SMP @@ -228,7 +221,6 @@ config CRYPTO_AUTHENC select CRYPTO_SKCIPHER select CRYPTO_MANAGER select CRYPTO_HASH - select CRYPTO_NULL help Authenc: Combined mode wrapper for IPsec. @@ -240,18 +232,21 @@ config CRYPTO_KRB5ENC select CRYPTO_SKCIPHER select CRYPTO_MANAGER select CRYPTO_HASH - select CRYPTO_NULL help Combined hash and cipher support for Kerberos 5 RFC3961 simplified profile. This is required for Kerberos 5-style encryption, used by sunrpc/NFS and rxrpc/AFS. -config CRYPTO_TEST - tristate "Testing module" +config CRYPTO_BENCHMARK + tristate "Crypto benchmarking module" depends on m || EXPERT select CRYPTO_MANAGER help - Quick & dirty crypto test module. + Quick & dirty crypto benchmarking module. + + This is mainly intended for use by people developing cryptographic + algorithms in the kernel. It should not be enabled in production + kernels. config CRYPTO_SIMD tristate @@ -634,8 +629,8 @@ config CRYPTO_ARC4 config CRYPTO_CHACHA20 tristate "ChaCha" + select CRYPTO_LIB_CHACHA select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_LIB_CHACHA_INTERNAL select CRYPTO_SKCIPHER help The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms @@ -784,8 +779,8 @@ config CRYPTO_AEGIS128_SIMD config CRYPTO_CHACHA20POLY1305 tristate "ChaCha20-Poly1305" select CRYPTO_CHACHA20 - select CRYPTO_POLY1305 select CRYPTO_AEAD + select CRYPTO_LIB_POLY1305 select CRYPTO_MANAGER help ChaCha20 stream cipher and Poly1305 authenticator combined @@ -806,7 +801,6 @@ config CRYPTO_GCM select CRYPTO_CTR select CRYPTO_AEAD select CRYPTO_GHASH - select CRYPTO_NULL select CRYPTO_MANAGER help GCM (Galois/Counter Mode) authenticated encryption mode and GMAC @@ -817,7 +811,6 @@ config CRYPTO_GCM config CRYPTO_GENIV tristate select CRYPTO_AEAD - select CRYPTO_NULL select CRYPTO_MANAGER select CRYPTO_RNG_DEFAULT @@ -953,18 +946,6 @@ config CRYPTO_POLYVAL This is used in HCTR2. It is not a general-purpose cryptographic hash function. -config CRYPTO_POLY1305 - tristate "Poly1305" - select CRYPTO_HASH - select CRYPTO_LIB_POLY1305_GENERIC - select CRYPTO_LIB_POLY1305_INTERNAL - help - Poly1305 authenticator algorithm (RFC7539) - - Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. - It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use - in IETF protocols. This is the portable C implementation of Poly1305. - config CRYPTO_RMD160 tristate "RIPEMD-160" select CRYPTO_HASH @@ -994,6 +975,7 @@ config CRYPTO_SHA256 tristate "SHA-224 and SHA-256" select CRYPTO_HASH select CRYPTO_LIB_SHA256 + select CRYPTO_LIB_SHA256_GENERIC help SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3) @@ -1012,13 +994,10 @@ config CRYPTO_SHA3 help SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3) -config CRYPTO_SM3 - tristate - config CRYPTO_SM3_GENERIC tristate "SM3 (ShangMi 3)" select CRYPTO_HASH - select CRYPTO_SM3 + select CRYPTO_LIB_SM3 help SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3) @@ -1406,7 +1385,6 @@ config CRYPTO_USER_API_AEAD depends on NET select CRYPTO_AEAD select CRYPTO_SKCIPHER - select CRYPTO_NULL select CRYPTO_USER_API help Enable the userspace interface for AEAD cipher algorithms. diff --git a/crypto/Makefile b/crypto/Makefile index 0e6ab5ffd3f7..017df3a2e4bb 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -71,15 +71,15 @@ obj-$(CONFIG_CRYPTO_USER) += crypto_user.o obj-$(CONFIG_CRYPTO_CMAC) += cmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o -obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o +obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o -obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o +obj-$(CONFIG_CRYPTO_SHA256) += sha256.o +CFLAGS_sha256.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o -obj-$(CONFIG_CRYPTO_SM3) += sm3.o obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o @@ -148,14 +148,16 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o -obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o -obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o +obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o +CFLAGS_chacha.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o -obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o -obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o -CFLAGS_crc32c_generic.o += -DARCH=$(ARCH) -CFLAGS_crc32_generic.o += -DARCH=$(ARCH) +obj-$(CONFIG_CRYPTO_CRC32C) += crc32c-cryptoapi.o +crc32c-cryptoapi-y := crc32c.o +CFLAGS_crc32c.o += -DARCH=$(ARCH) +obj-$(CONFIG_CRYPTO_CRC32) += crc32-cryptoapi.o +crc32-cryptoapi-y := crc32.o +CFLAGS_crc32.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o @@ -172,7 +174,7 @@ KASAN_SANITIZE_jitterentropy.o = n UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o -obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o +obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o diff --git a/crypto/acompress.c b/crypto/acompress.c index f7a3fbe5447e..be28cbfd22e3 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -8,20 +8,32 @@ */ #include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> #include <linux/cryptouser.h> -#include <linux/errno.h> +#include <linux/cpumask.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/page-flags.h> +#include <linux/percpu.h> +#include <linux/scatterlist.h> +#include <linux/sched.h> #include <linux/seq_file.h> -#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/spinlock.h> #include <linux/string.h> +#include <linux/workqueue.h> #include <net/netlink.h> #include "compress.h" struct crypto_scomp; +enum { + ACOMP_WALK_SLEEP = 1 << 0, + ACOMP_WALK_SRC_LINEAR = 1 << 1, + ACOMP_WALK_DST_LINEAR = 1 << 2, +}; + static const struct crypto_type crypto_acomp_type; static void acomp_reqchain_done(void *data, int err); @@ -65,7 +77,7 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) alg->exit(acomp); if (acomp_is_async(acomp)) - crypto_free_acomp(acomp->fb); + crypto_free_acomp(crypto_acomp_fb(acomp)); } static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) @@ -75,8 +87,6 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) struct crypto_acomp *fb = NULL; int err; - acomp->fb = acomp; - if (tfm->__crt_alg->cra_type != &crypto_acomp_type) return crypto_init_scomp_ops_async(tfm); @@ -90,12 +100,12 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE) goto out_free_fb; - acomp->fb = fb; + tfm->fb = crypto_acomp_tfm(fb); } acomp->compress = alg->compress; acomp->decompress = alg->decompress; - acomp->reqsize = alg->reqsize; + acomp->reqsize = alg->base.cra_reqsize; acomp->base.exit = crypto_acomp_exit_tfm; @@ -136,6 +146,7 @@ static const struct crypto_type crypto_acomp_type = { .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, .type = CRYPTO_ALG_TYPE_ACOMPRESS, .tfmsize = offsetof(struct crypto_acomp, base), + .algsize = offsetof(struct acomp_alg, base), }; struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, @@ -161,7 +172,6 @@ static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt) state->data = req->base.data; req->base.complete = cplt; req->base.data = state; - state->req0 = req; } static void acomp_restore_req(struct acomp_req *req) @@ -172,23 +182,16 @@ static void acomp_restore_req(struct acomp_req *req) req->base.data = state->data; } -static void acomp_reqchain_virt(struct acomp_req_chain *state, int err) +static void acomp_reqchain_virt(struct acomp_req *req) { - struct acomp_req *req = state->cur; + struct acomp_req_chain *state = &req->chain; unsigned int slen = req->slen; unsigned int dlen = req->dlen; - req->base.err = err; - state = &req->chain; - if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT) acomp_request_set_src_dma(req, state->src, slen); - else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO) - acomp_request_set_src_folio(req, state->sfolio, state->soff, slen); if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT) acomp_request_set_dst_dma(req, state->dst, dlen); - else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO) - acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen); } static void acomp_virt_to_sg(struct acomp_req *req) @@ -196,9 +199,7 @@ static void acomp_virt_to_sg(struct acomp_req *req) struct acomp_req_chain *state = &req->chain; state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | - CRYPTO_ACOMP_REQ_DST_VIRT | - CRYPTO_ACOMP_REQ_SRC_FOLIO | - CRYPTO_ACOMP_REQ_DST_FOLIO); + CRYPTO_ACOMP_REQ_DST_VIRT); if (acomp_request_src_isvirt(req)) { unsigned int slen = req->slen; @@ -207,17 +208,6 @@ static void acomp_virt_to_sg(struct acomp_req *req) state->src = svirt; sg_init_one(&state->ssg, svirt, slen); acomp_request_set_src_sg(req, &state->ssg, slen); - } else if (acomp_request_src_isfolio(req)) { - struct folio *folio = req->sfolio; - unsigned int slen = req->slen; - size_t off = req->soff; - - state->sfolio = folio; - state->soff = off; - sg_init_table(&state->ssg, 1); - sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE), - slen, off % PAGE_SIZE); - acomp_request_set_src_sg(req, &state->ssg, slen); } if (acomp_request_dst_isvirt(req)) { @@ -227,39 +217,15 @@ static void acomp_virt_to_sg(struct acomp_req *req) state->dst = dvirt; sg_init_one(&state->dsg, dvirt, dlen); acomp_request_set_dst_sg(req, &state->dsg, dlen); - } else if (acomp_request_dst_isfolio(req)) { - struct folio *folio = req->dfolio; - unsigned int dlen = req->dlen; - size_t off = req->doff; - - state->dfolio = folio; - state->doff = off; - sg_init_table(&state->dsg, 1); - sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE), - dlen, off % PAGE_SIZE); - acomp_request_set_src_sg(req, &state->dsg, dlen); } } -static int acomp_do_nondma(struct acomp_req_chain *state, - struct acomp_req *req) +static int acomp_do_nondma(struct acomp_req *req, bool comp) { - u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | - CRYPTO_ACOMP_REQ_SRC_NONDMA | - CRYPTO_ACOMP_REQ_DST_VIRT | - CRYPTO_ACOMP_REQ_DST_NONDMA; - ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); + ACOMP_FBREQ_ON_STACK(fbreq, req); int err; - acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL); - fbreq->base.flags &= ~keep; - fbreq->base.flags |= req->base.flags & keep; - fbreq->src = req->src; - fbreq->dst = req->dst; - fbreq->slen = req->slen; - fbreq->dlen = req->dlen; - - if (state->op == crypto_acomp_reqtfm(req)->compress) + if (comp) err = crypto_acomp_compress(fbreq); else err = crypto_acomp_decompress(fbreq); @@ -268,114 +234,74 @@ static int acomp_do_nondma(struct acomp_req_chain *state, return err; } -static int acomp_do_one_req(struct acomp_req_chain *state, - struct acomp_req *req) +static int acomp_do_one_req(struct acomp_req *req, bool comp) { - state->cur = req; - if (acomp_request_isnondma(req)) - return acomp_do_nondma(state, req); + return acomp_do_nondma(req, comp); acomp_virt_to_sg(req); - return state->op(req); + return comp ? crypto_acomp_reqtfm(req)->compress(req) : + crypto_acomp_reqtfm(req)->decompress(req); } -static int acomp_reqchain_finish(struct acomp_req *req0, int err, u32 mask) +static int acomp_reqchain_finish(struct acomp_req *req, int err) { - struct acomp_req_chain *state = req0->base.data; - struct acomp_req *req = state->cur; - struct acomp_req *n; - - acomp_reqchain_virt(state, err); - - if (req != req0) - list_add_tail(&req->base.list, &req0->base.list); - - list_for_each_entry_safe(req, n, &state->head, base.list) { - list_del_init(&req->base.list); - - req->base.flags &= mask; - req->base.complete = acomp_reqchain_done; - req->base.data = state; - - err = acomp_do_one_req(state, req); - - if (err == -EINPROGRESS) { - if (!list_empty(&state->head)) - err = -EBUSY; - goto out; - } - - if (err == -EBUSY) - goto out; - - acomp_reqchain_virt(state, err); - list_add_tail(&req->base.list, &req0->base.list); - } - - acomp_restore_req(req0); - -out: + acomp_reqchain_virt(req); + acomp_restore_req(req); return err; } static void acomp_reqchain_done(void *data, int err) { - struct acomp_req_chain *state = data; - crypto_completion_t compl = state->compl; + struct acomp_req *req = data; + crypto_completion_t compl; - data = state->data; + compl = req->chain.compl; + data = req->chain.data; - if (err == -EINPROGRESS) { - if (!list_empty(&state->head)) - return; + if (err == -EINPROGRESS) goto notify; - } - err = acomp_reqchain_finish(state->req0, err, - CRYPTO_TFM_REQ_MAY_BACKLOG); - if (err == -EBUSY) - return; + err = acomp_reqchain_finish(req, err); notify: compl(data, err); } -static int acomp_do_req_chain(struct acomp_req *req, - int (*op)(struct acomp_req *req)) +static int acomp_do_req_chain(struct acomp_req *req, bool comp) { - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - struct acomp_req_chain *state; int err; - if (crypto_acomp_req_chain(tfm) || - (!acomp_request_chained(req) && acomp_request_issg(req))) - return op(req); - acomp_save_req(req, acomp_reqchain_done); - state = req->base.data; - - state->op = op; - state->src = NULL; - INIT_LIST_HEAD(&state->head); - list_splice_init(&req->base.list, &state->head); - err = acomp_do_one_req(state, req); + err = acomp_do_one_req(req, comp); if (err == -EBUSY || err == -EINPROGRESS) - return -EBUSY; + return err; - return acomp_reqchain_finish(req, err, ~0); + return acomp_reqchain_finish(req, err); } int crypto_acomp_compress(struct acomp_req *req) { - return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress); + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + if (acomp_req_on_stack(req) && acomp_is_async(tfm)) + return -EAGAIN; + if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req)) + return crypto_acomp_reqtfm(req)->compress(req); + return acomp_do_req_chain(req, true); } EXPORT_SYMBOL_GPL(crypto_acomp_compress); int crypto_acomp_decompress(struct acomp_req *req) { - return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress); + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + if (acomp_req_on_stack(req) && acomp_is_async(tfm)) + return -EAGAIN; + if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req)) + return crypto_acomp_reqtfm(req)->decompress(req); + return acomp_do_req_chain(req, false); } EXPORT_SYMBOL_GPL(crypto_acomp_decompress); @@ -434,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count) } EXPORT_SYMBOL_GPL(crypto_unregister_acomps); +static void acomp_stream_workfn(struct work_struct *work) +{ + struct crypto_acomp_streams *s = + container_of(work, struct crypto_acomp_streams, stream_work); + struct crypto_acomp_stream __percpu *streams = s->streams; + int cpu; + + for_each_cpu(cpu, &s->stream_want) { + struct crypto_acomp_stream *ps; + void *ctx; + + ps = per_cpu_ptr(streams, cpu); + if (ps->ctx) + continue; + + ctx = s->alloc_ctx(); + if (IS_ERR(ctx)) + break; + + spin_lock_bh(&ps->lock); + ps->ctx = ctx; + spin_unlock_bh(&ps->lock); + + cpumask_clear_cpu(cpu, &s->stream_want); + } +} + +void crypto_acomp_free_streams(struct crypto_acomp_streams *s) +{ + struct crypto_acomp_stream __percpu *streams = s->streams; + void (*free_ctx)(void *); + int i; + + s->streams = NULL; + if (!streams) + return; + + cancel_work_sync(&s->stream_work); + free_ctx = s->free_ctx; + + for_each_possible_cpu(i) { + struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i); + + if (!ps->ctx) + continue; + + free_ctx(ps->ctx); + } + + free_percpu(streams); +} +EXPORT_SYMBOL_GPL(crypto_acomp_free_streams); + +int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s) +{ + struct crypto_acomp_stream __percpu *streams; + struct crypto_acomp_stream *ps; + unsigned int i; + void *ctx; + + if (s->streams) + return 0; + + streams = alloc_percpu(struct crypto_acomp_stream); + if (!streams) + return -ENOMEM; + + ctx = s->alloc_ctx(); + if (IS_ERR(ctx)) { + free_percpu(streams); + return PTR_ERR(ctx); + } + + i = cpumask_first(cpu_possible_mask); + ps = per_cpu_ptr(streams, i); + ps->ctx = ctx; + + for_each_possible_cpu(i) { + ps = per_cpu_ptr(streams, i); + spin_lock_init(&ps->lock); + } + + s->streams = streams; + + INIT_WORK(&s->stream_work, acomp_stream_workfn); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams); + +struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( + struct crypto_acomp_streams *s) __acquires(stream) +{ + struct crypto_acomp_stream __percpu *streams = s->streams; + int cpu = raw_smp_processor_id(); + struct crypto_acomp_stream *ps; + + ps = per_cpu_ptr(streams, cpu); + spin_lock_bh(&ps->lock); + if (likely(ps->ctx)) + return ps; + spin_unlock(&ps->lock); + + cpumask_set_cpu(cpu, &s->stream_want); + schedule_work(&s->stream_work); + + ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask)); + spin_lock(&ps->lock); + return ps; +} +EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh); + +void acomp_walk_done_src(struct acomp_walk *walk, int used) +{ + walk->slen -= used; + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) + scatterwalk_advance(&walk->in, used); + else + scatterwalk_done_src(&walk->in, used); + + if ((walk->flags & ACOMP_WALK_SLEEP)) + cond_resched(); +} +EXPORT_SYMBOL_GPL(acomp_walk_done_src); + +void acomp_walk_done_dst(struct acomp_walk *walk, int used) +{ + walk->dlen -= used; + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) + scatterwalk_advance(&walk->out, used); + else + scatterwalk_done_dst(&walk->out, used); + + if ((walk->flags & ACOMP_WALK_SLEEP)) + cond_resched(); +} +EXPORT_SYMBOL_GPL(acomp_walk_done_dst); + +int acomp_walk_next_src(struct acomp_walk *walk) +{ + unsigned int slen = walk->slen; + unsigned int max = UINT_MAX; + + if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) + max = PAGE_SIZE; + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { + walk->in.__addr = (void *)(((u8 *)walk->in.sg) + + walk->in.offset); + return min(slen, max); + } + + return slen ? scatterwalk_next(&walk->in, slen) : 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_next_src); + +int acomp_walk_next_dst(struct acomp_walk *walk) +{ + unsigned int dlen = walk->dlen; + unsigned int max = UINT_MAX; + + if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP)) + max = PAGE_SIZE; + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { + walk->out.__addr = (void *)(((u8 *)walk->out.sg) + + walk->out.offset); + return min(dlen, max); + } + + return dlen ? scatterwalk_next(&walk->out, dlen) : 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_next_dst); + +int acomp_walk_virt(struct acomp_walk *__restrict walk, + struct acomp_req *__restrict req, bool atomic) +{ + struct scatterlist *src = req->src; + struct scatterlist *dst = req->dst; + + walk->slen = req->slen; + walk->dlen = req->dlen; + + if (!walk->slen || !walk->dlen) + return -EINVAL; + + walk->flags = 0; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags |= ACOMP_WALK_SLEEP; + if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT)) + walk->flags |= ACOMP_WALK_SRC_LINEAR; + if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT)) + walk->flags |= ACOMP_WALK_DST_LINEAR; + + if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) { + walk->in.sg = (void *)req->svirt; + walk->in.offset = 0; + } else + scatterwalk_start(&walk->in, src); + if ((walk->flags & ACOMP_WALK_DST_LINEAR)) { + walk->out.sg = (void *)req->dvirt; + walk->out.offset = 0; + } else + scatterwalk_start(&walk->out, dst); + + return 0; +} +EXPORT_SYMBOL_GPL(acomp_walk_virt); + +struct acomp_req *acomp_request_clone(struct acomp_req *req, + size_t total, gfp_t gfp) +{ + struct acomp_req *nreq; + + nreq = container_of(crypto_request_clone(&req->base, total, gfp), + struct acomp_req, base); + if (nreq == req) + return req; + + if (req->src == &req->chain.ssg) + nreq->src = &nreq->chain.ssg; + if (req->dst == &req->chain.dsg) + nreq->dst = &nreq->chain.dsg; + return nreq; +} +EXPORT_SYMBOL_GPL(acomp_request_clone); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous compression type"); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index c3ef583598b4..a6bca877c3c7 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -639,7 +639,7 @@ static void __exit adiantum_module_exit(void) crypto_unregister_template(&adiantum_tmpl); } -subsys_initcall(adiantum_module_init); +module_init(adiantum_module_init); module_exit(adiantum_module_exit); MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); diff --git a/crypto/aead.c b/crypto/aead.c index 12f5b42171af..5d14b775036e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -186,6 +186,7 @@ static const struct crypto_type crypto_aead_type = { .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_AEAD, .tfmsize = offsetof(struct crypto_aead, base), + .algsize = offsetof(struct aead_alg, base), }; int crypto_grab_aead(struct crypto_aead_spawn *spawn, diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 72f6ee1345ef..ca80d861345d 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -566,7 +566,7 @@ static void __exit crypto_aegis128_module_exit(void) crypto_unregister_aead(&crypto_aegis128_alg_generic); } -subsys_initcall(crypto_aegis128_module_init); +module_init(crypto_aegis128_module_init); module_exit(crypto_aegis128_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 3c66d425c97b..85d2e78c8ef2 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -1311,7 +1311,7 @@ static void __exit aes_fini(void) crypto_unregister_alg(&aes_alg); } -subsys_initcall(aes_init); +module_init(aes_init); module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); diff --git a/crypto/ahash.c b/crypto/ahash.c index 2d9eec2b2b1c..e10bc2659ae4 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -18,7 +18,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> -#include <linux/sched.h> +#include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/string.h> @@ -42,26 +42,46 @@ struct crypto_hash_walk { struct scatterlist *sg; }; -struct ahash_save_req_state { - struct list_head head; - struct ahash_request *req0; - struct ahash_request *cur; - int (*op)(struct ahash_request *req); +static int ahash_def_finup(struct ahash_request *req); + +static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; +} + +static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; +} + +static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm) +{ + return crypto_ahash_alg(tfm)->halg.base.cra_flags & + CRYPTO_ALG_NEED_FALLBACK; +} + +static inline void ahash_op_done(void *data, int err, + int (*finish)(struct ahash_request *, int)) +{ + struct ahash_request *areq = data; crypto_completion_t compl; - void *data; - struct scatterlist sg; - const u8 *src; - u8 *page; - unsigned int offset; - unsigned int nbytes; -}; -static void ahash_reqchain_done(void *data, int err); -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt); -static void ahash_restore_req(struct ahash_request *req); -static void ahash_def_finup_done1(void *data, int err); -static int ahash_def_finup_finish1(struct ahash_request *req, int err); -static int ahash_def_finup(struct ahash_request *req); + compl = areq->saved_complete; + data = areq->saved_data; + if (err == -EINPROGRESS) + goto out; + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + err = finish(areq, err); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + compl(data, err); +} static int hash_walk_next(struct crypto_hash_walk *walk) { @@ -266,7 +286,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; } @@ -303,6 +322,9 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, int err; err = alg->setkey(tfm, key, keylen); + if (!err && crypto_ahash_need_fallback(tfm)) + err = crypto_ahash_setkey(crypto_ahash_fb(tfm), + key, keylen); if (unlikely(err)) { ahash_set_needkey(tfm, alg); return err; @@ -313,421 +335,261 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static bool ahash_request_hasvirt(struct ahash_request *req) -{ - return ahash_request_isvirt(req); -} - -static int ahash_reqchain_virt(struct ahash_save_req_state *state, - int err, u32 mask) -{ - struct ahash_request *req = state->cur; - - for (;;) { - unsigned len = state->nbytes; - - req->base.err = err; - - if (!state->offset) - break; - - if (state->offset == len || err) { - u8 *result = req->result; - - ahash_request_set_virt(req, state->src, result, len); - state->offset = 0; - break; - } - - len -= state->offset; - - len = min(PAGE_SIZE, len); - memcpy(state->page, state->src + state->offset, len); - state->offset += len; - req->nbytes = len; - - err = state->op(req); - if (err == -EINPROGRESS) { - if (!list_empty(&state->head) || - state->offset < state->nbytes) - err = -EBUSY; - break; - } - - if (err == -EBUSY) - break; - } - - return err; -} - -static int ahash_reqchain_finish(struct ahash_request *req0, - struct ahash_save_req_state *state, - int err, u32 mask) -{ - struct ahash_request *req = state->cur; - struct crypto_ahash *tfm; - struct ahash_request *n; - bool update; - u8 *page; - - err = ahash_reqchain_virt(state, err, mask); - if (err == -EINPROGRESS || err == -EBUSY) - goto out; - - if (req != req0) - list_add_tail(&req->base.list, &req0->base.list); - - tfm = crypto_ahash_reqtfm(req); - update = state->op == crypto_ahash_alg(tfm)->update; - - list_for_each_entry_safe(req, n, &state->head, base.list) { - list_del_init(&req->base.list); - - req->base.flags &= mask; - req->base.complete = ahash_reqchain_done; - req->base.data = state; - state->cur = req; - - if (update && ahash_request_isvirt(req) && req->nbytes) { - unsigned len = req->nbytes; - u8 *result = req->result; - - state->src = req->svirt; - state->nbytes = len; - - len = min(PAGE_SIZE, len); - - memcpy(state->page, req->svirt, len); - state->offset = len; - - ahash_request_set_crypt(req, &state->sg, result, len); - } - - err = state->op(req); - - if (err == -EINPROGRESS) { - if (!list_empty(&state->head) || - state->offset < state->nbytes) - err = -EBUSY; - goto out; - } - - if (err == -EBUSY) - goto out; - - err = ahash_reqchain_virt(state, err, mask); - if (err == -EINPROGRESS || err == -EBUSY) - goto out; - - list_add_tail(&req->base.list, &req0->base.list); - } - - page = state->page; - if (page) { - memset(page, 0, PAGE_SIZE); - free_page((unsigned long)page); - } - ahash_restore_req(req0); - -out: - return err; -} - -static void ahash_reqchain_done(void *data, int err) -{ - struct ahash_save_req_state *state = data; - crypto_completion_t compl = state->compl; - - data = state->data; - - if (err == -EINPROGRESS) { - if (!list_empty(&state->head) || state->offset < state->nbytes) - return; - goto notify; - } - - err = ahash_reqchain_finish(state->req0, state, err, - CRYPTO_TFM_REQ_MAY_BACKLOG); - if (err == -EBUSY) - return; - -notify: - compl(data, err); -} - static int ahash_do_req_chain(struct ahash_request *req, - int (*op)(struct ahash_request *req)) + int (*const *op)(struct ahash_request *req)) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - bool update = op == crypto_ahash_alg(tfm)->update; - struct ahash_save_req_state *state; - struct ahash_save_req_state state0; - u8 *page = NULL; int err; - if (crypto_ahash_req_chain(tfm) || - (!ahash_request_chained(req) && - (!update || !ahash_request_isvirt(req)))) - return op(req); - - if (update && ahash_request_hasvirt(req)) { - gfp_t gfp; - u32 flags; - - flags = ahash_request_flags(req); - gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC; - page = (void *)__get_free_page(gfp); - err = -ENOMEM; - if (!page) - goto out_set_chain; - } - - state = &state0; - if (ahash_is_async(tfm)) { - err = ahash_save_req(req, ahash_reqchain_done); - if (err) - goto out_free_page; + if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req)) + return (*op)(req); - state = req->base.data; - } + if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE) + return -ENOSYS; - state->op = op; - state->cur = req; - state->page = page; - state->offset = 0; - state->nbytes = 0; - INIT_LIST_HEAD(&state->head); + { + u8 state[HASH_MAX_STATESIZE]; - if (page) - sg_init_one(&state->sg, page, PAGE_SIZE); + if (op == &crypto_ahash_alg(tfm)->digest) { + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = crypto_ahash_digest(req); + goto out_no_state; + } - if (update && ahash_request_isvirt(req) && req->nbytes) { - unsigned len = req->nbytes; - u8 *result = req->result; + err = crypto_ahash_export(req, state); + ahash_request_set_tfm(req, crypto_ahash_fb(tfm)); + err = err ?: crypto_ahash_import(req, state); - state->src = req->svirt; - state->nbytes = len; + if (op == &crypto_ahash_alg(tfm)->finup) { + err = err ?: crypto_ahash_finup(req); + goto out_no_state; + } - len = min(PAGE_SIZE, len); + err = err ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, state); - memcpy(page, req->svirt, len); - state->offset = len; + ahash_request_set_tfm(req, tfm); + return err ?: crypto_ahash_import(req, state); - ahash_request_set_crypt(req, &state->sg, result, len); +out_no_state: + ahash_request_set_tfm(req, tfm); + return err; } - - err = op(req); - if (err == -EBUSY || err == -EINPROGRESS) - return -EBUSY; - - return ahash_reqchain_finish(req, state, err, ~0); - -out_free_page: - free_page((unsigned long)page); - -out_set_chain: - req->base.err = err; - return err; } int crypto_ahash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) { - int err; - - err = crypto_shash_init(prepare_shash_desc(req, tfm)); - req->base.err = err; - return err; - } - + if (likely(tfm->using_shash)) + return crypto_shash_init(prepare_shash_desc(req, tfm)); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (crypto_ahash_block_only(tfm)) { + u8 *buf = ahash_request_ctx(req); - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init); + buf += crypto_ahash_reqsize(tfm) - 1; + *buf = 0; + } + return crypto_ahash_alg(tfm)->init(req); } EXPORT_SYMBOL_GPL(crypto_ahash_init); -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) +static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_save_req_state *state; - gfp_t gfp; - u32 flags; - - if (!ahash_is_async(tfm)) - return 0; - - flags = ahash_request_flags(req); - gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - state = kmalloc(sizeof(*state), gfp); - if (!state) - return -ENOMEM; - - state->compl = req->base.complete; - state->data = req->base.data; + req->saved_complete = req->base.complete; + req->saved_data = req->base.data; req->base.complete = cplt; - req->base.data = state; - state->req0 = req; - - return 0; + req->base.data = req; } static void ahash_restore_req(struct ahash_request *req) { - struct ahash_save_req_state *state; - struct crypto_ahash *tfm; - - tfm = crypto_ahash_reqtfm(req); - if (!ahash_is_async(tfm)) - return; - - state = req->base.data; - - req->base.complete = state->compl; - req->base.data = state->data; - kfree(state); + req->base.complete = req->saved_complete; + req->base.data = req->saved_data; } -int crypto_ahash_update(struct ahash_request *req) +static int ahash_update_finish(struct ahash_request *req, int err) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (likely(tfm->using_shash)) { - int err; - - err = shash_ahash_update(req, ahash_request_ctx(req)); - req->base.err = err; - return err; + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen; + u8 *buf; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); } - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update); -} -EXPORT_SYMBOL_GPL(crypto_ahash_update); + req->nbytes += nonzero - blen; -int crypto_ahash_final(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + blen = err < 0 ? 0 : err + nonzero; + if (ahash_request_isvirt(req)) + memcpy(buf, req->svirt + req->nbytes - blen, blen); + else + memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen); + *blenp = blen; - if (likely(tfm->using_shash)) { - int err; + ahash_restore_req(req); - err = crypto_shash_final(ahash_request_ctx(req), req->result); - req->base.err = err; - return err; - } + return err; +} - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final); +static void ahash_update_done(void *data, int err) +{ + ahash_op_done(data, err, ahash_update_finish); } -EXPORT_SYMBOL_GPL(crypto_ahash_final); -int crypto_ahash_finup(struct ahash_request *req) +int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool nonzero = crypto_ahash_final_nonzero(tfm); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; - if (likely(tfm->using_shash)) { - int err; - - err = shash_ahash_finup(req, ahash_request_ctx(req)); - req->base.err = err; - return err; - } + if (likely(tfm->using_shash)) + return shash_ahash_update(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); - if (!crypto_ahash_alg(tfm)->finup || - (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))) - return ahash_def_finup(req); + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup); -} -EXPORT_SYMBOL_GPL(crypto_ahash_finup); + if (blen + req->nbytes < bs + nonzero) { + if (ahash_request_isvirt(req)) + memcpy(buf + blen, req->svirt, req->nbytes); + else + memcpy_from_sglist(buf + blen, req->src, 0, + req->nbytes); -static int ahash_def_digest_finish(struct ahash_request *req, int err) -{ - struct crypto_ahash *tfm; + *blenp += req->nbytes; + return 0; + } - if (err) - goto out; + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } + req->nbytes -= nonzero; - tfm = crypto_ahash_reqtfm(req); - if (ahash_is_async(tfm)) - req->base.complete = ahash_def_finup_done1; + ahash_save_req(req, ahash_update_done); - err = crypto_ahash_update(req); + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update); if (err == -EINPROGRESS || err == -EBUSY) return err; - return ahash_def_finup_finish1(req, err); - -out: - ahash_restore_req(req); - return err; + return ahash_update_finish(req, err); } +EXPORT_SYMBOL_GPL(crypto_ahash_update); -static void ahash_def_digest_done(void *data, int err) +static int ahash_finup_finish(struct ahash_request *req, int err) { - struct ahash_save_req_state *state0 = data; - struct ahash_save_req_state state; - struct ahash_request *areq; - - state = *state0; - areq = state.req0; - if (err == -EINPROGRESS) - goto out; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + u8 *blenp = ahash_request_ctx(req); + int blen; + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + + if (blen) { + if (sg_is_last(req->src)) + req->src = NULL; + else { + req->src = req->sg_head + 1; + if (sg_is_chain(req->src)) + req->src = sg_chain_ptr(req->src); + } + req->nbytes -= blen; + } - areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_restore_req(req); - err = ahash_def_digest_finish(areq, err); - if (err == -EINPROGRESS || err == -EBUSY) - return; + return err; +} -out: - state.compl(state.data, err); +static void ahash_finup_done(void *data, int err) +{ + ahash_op_done(data, err, ahash_finup_finish); } -static int ahash_def_digest(struct ahash_request *req) +int crypto_ahash_finup(struct ahash_request *req) { - int err; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + int bs = crypto_ahash_blocksize(tfm); + u8 *blenp = ahash_request_ctx(req); + int blen, err; + u8 *buf; - err = ahash_save_req(req, ahash_def_digest_done); - if (err) - return err; + if (likely(tfm->using_shash)) + return shash_ahash_finup(req, ahash_request_ctx(req)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; + if (!crypto_ahash_alg(tfm)->finup) + return ahash_def_finup(req); + if (!crypto_ahash_block_only(tfm)) + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); + + blenp += crypto_ahash_reqsize(tfm) - 1; + blen = *blenp; + buf = blenp - bs; + + if (blen) { + memset(req->sg_head, 0, sizeof(req->sg_head[0])); + sg_set_buf(req->sg_head, buf, blen); + if (!req->src) + sg_mark_end(req->sg_head); + else if (req->src != req->sg_head + 1) + sg_chain(req->sg_head, 2, req->src); + req->src = req->sg_head; + req->nbytes += blen; + } - err = crypto_ahash_init(req); + ahash_save_req(req, ahash_finup_done); + + err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup); if (err == -EINPROGRESS || err == -EBUSY) return err; - return ahash_def_digest_finish(req, err); + return ahash_finup_finish(req, err); } +EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) { - int err; - - err = shash_ahash_digest(req, prepare_shash_desc(req, tfm)); - req->base.err = err; - return err; - } - - if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)) - return ahash_def_digest(req); - + if (likely(tfm->using_shash)) + return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + if (ahash_req_on_stack(req) && ahash_is_async(tfm)) + return -EAGAIN; if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - - return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest); + return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); static void ahash_def_finup_done2(void *data, int err) { - struct ahash_save_req_state *state = data; - struct ahash_request *areq = state->req0; + struct ahash_request *areq = data; if (err == -EINPROGRESS) return; @@ -738,14 +600,10 @@ static void ahash_def_finup_done2(void *data, int err) static int ahash_def_finup_finish1(struct ahash_request *req, int err) { - struct crypto_ahash *tfm; - if (err) goto out; - tfm = crypto_ahash_reqtfm(req); - if (ahash_is_async(tfm)) - req->base.complete = ahash_def_finup_done2; + req->base.complete = ahash_def_finup_done2; err = crypto_ahash_final(req); if (err == -EINPROGRESS || err == -EBUSY) @@ -758,32 +616,14 @@ out: static void ahash_def_finup_done1(void *data, int err) { - struct ahash_save_req_state *state0 = data; - struct ahash_save_req_state state; - struct ahash_request *areq; - - state = *state0; - areq = state.req0; - if (err == -EINPROGRESS) - goto out; - - areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - err = ahash_def_finup_finish1(areq, err); - if (err == -EINPROGRESS || err == -EBUSY) - return; - -out: - state.compl(state.data, err); + ahash_op_done(data, err, ahash_def_finup_finish1); } static int ahash_def_finup(struct ahash_request *req) { int err; - err = ahash_save_req(req, ahash_def_finup_done1); - if (err) - return err; + ahash_save_req(req, ahash_def_finup_done1); err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) @@ -792,16 +632,47 @@ static int ahash_def_finup(struct ahash_request *req) return ahash_def_finup_finish1(req, err); } +int crypto_ahash_export_core(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export_core(ahash_request_ctx(req), out); + return crypto_ahash_alg(tfm)->export_core(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export_core); + int crypto_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return crypto_shash_export(ahash_request_ctx(req), out); + if (crypto_ahash_block_only(tfm)) { + unsigned int plen = crypto_ahash_blocksize(tfm) + 1; + unsigned int reqsize = crypto_ahash_reqsize(tfm); + unsigned int ss = crypto_ahash_statesize(tfm); + u8 *buf = ahash_request_ctx(req); + + memcpy(out + ss - plen, buf + reqsize - plen, plen); + } return crypto_ahash_alg(tfm)->export(req, out); } EXPORT_SYMBOL_GPL(crypto_ahash_export); +int crypto_ahash_import_core(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import_core(prepare_shash_desc(req, tfm), + in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->import_core(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import_core); + int crypto_ahash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -810,6 +681,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in) return crypto_shash_import(prepare_shash_desc(req, tfm), in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; + if (crypto_ahash_block_only(tfm)) { + unsigned int reqsize = crypto_ahash_reqsize(tfm); + u8 *buf = ahash_request_ctx(req); + + buf[reqsize - 1] = 0; + } return crypto_ahash_alg(tfm)->import(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import); @@ -819,26 +696,73 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); - alg->exit_tfm(hash); + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + + if (crypto_ahash_need_fallback(hash)) + crypto_free_ahash(crypto_ahash_fb(hash)); } static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); + struct crypto_ahash *fb = NULL; + int err; crypto_ahash_set_statesize(hash, alg->halg.statesize); - crypto_ahash_set_reqsize(hash, alg->reqsize); + crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm)); if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash), + CRYPTO_ALG_REQ_VIRT, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_REQ_VIRT | + CRYPTO_AHASH_ALG_NO_EXPORT_CORE); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + tfm->fb = crypto_ahash_tfm(fb); + } + ahash_set_needkey(hash, alg); - if (alg->exit_tfm) - tfm->exit = crypto_ahash_exit_tfm; + tfm->exit = crypto_ahash_exit_tfm; + + if (alg->init_tfm) + err = alg->init_tfm(hash); + else if (tfm->__crt_alg->cra_init) + err = tfm->__crt_alg->cra_init(tfm); + else + return 0; + + if (err) + goto out_free_sync_hash; + + if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) > + MAX_SYNC_HASH_REQSIZE) + goto out_exit_tfm; - return alg->init_tfm ? alg->init_tfm(hash) : 0; + BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE); + if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE) + crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE); + + return 0; + +out_exit_tfm: + if (alg->exit_tfm) + alg->exit_tfm(hash); + else if (tfm->__crt_alg->cra_exit) + tfm->__crt_alg->cra_exit(tfm); + err = -EINVAL; +out_free_sync_hash: + crypto_free_ahash(fb); + return err; } static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) @@ -897,6 +821,7 @@ static const struct crypto_type crypto_ahash_type = { .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), + .algsize = offsetof(struct ahash_alg, halg.base), }; int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, @@ -921,7 +846,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_ahash); -static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; @@ -930,11 +855,13 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; } +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) { struct hash_alg_common *halg = crypto_hash_alg_common(hash); struct crypto_tfm *tfm = crypto_ahash_tfm(hash); + struct crypto_ahash *fb = NULL; struct crypto_ahash *nhash; struct ahash_alg *alg; int err; @@ -964,28 +891,52 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) err = PTR_ERR(shash); goto out_free_nhash; } + crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash; nhash->using_shash = true; *nctx = shash; return nhash; } + if (crypto_ahash_need_fallback(hash)) { + fb = crypto_clone_ahash(crypto_ahash_fb(hash)); + err = PTR_ERR(fb); + if (IS_ERR(fb)) + goto out_free_nhash; + + crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb); + } + err = -ENOSYS; alg = crypto_ahash_alg(hash); if (!alg->clone_tfm) - goto out_free_nhash; + goto out_free_fb; err = alg->clone_tfm(nhash, hash); if (err) - goto out_free_nhash; + goto out_free_fb; + + crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm; return nhash; +out_free_fb: + crypto_free_ahash(fb); out_free_nhash: crypto_free_ahash(nhash); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_clone_ahash); +static int ahash_default_export_core(struct ahash_request *req, void *out) +{ + return -ENOSYS; +} + +static int ahash_default_import_core(struct ahash_request *req, const void *in) +{ + return -ENOSYS; +} + static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; @@ -994,7 +945,11 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (alg->halg.statesize == 0) return -EINVAL; - if (alg->reqsize && alg->reqsize < alg->halg.statesize) + if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize) + return -EINVAL; + + if (!(base->cra_flags & CRYPTO_ALG_ASYNC) && + base->cra_reqsize > MAX_SYNC_HASH_REQSIZE) return -EINVAL; err = hash_prepare_alg(&alg->halg); @@ -1004,9 +959,28 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; + if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) & + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT)) + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + if (!alg->setkey) alg->setkey = ahash_nosetkey; + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + if (!alg->finup) + return -EINVAL; + + base->cra_reqsize += base->cra_blocksize + 1; + alg->halg.statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = ahash_default_export_core; + alg->import_core = ahash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + return 0; } @@ -1074,5 +1048,42 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); +void ahash_request_free(struct ahash_request *req) +{ + if (unlikely(!req)) + return; + + if (!ahash_req_on_stack(req)) { + kfree(req); + return; + } + + ahash_request_zero(req); +} +EXPORT_SYMBOL_GPL(ahash_request_free); + +int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, + unsigned int len, u8 *out) +{ + HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm)); + int err; + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, out, len); + err = crypto_ahash_digest(req); + + ahash_request_zero(req); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_hash_digest); + +void ahash_free_singlespawn_instance(struct ahash_instance *inst) +{ + crypto_drop_spawn(ahash_instance_ctx(inst)); + kfree(inst); +} +EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 72c82d9aa077..a36f50c83827 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -97,6 +97,7 @@ static const struct crypto_type crypto_akcipher_type = { .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AKCIPHER, .tfmsize = offsetof(struct crypto_akcipher, base), + .algsize = offsetof(struct akcipher_alg, base), }; int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, diff --git a/crypto/algapi.c b/crypto/algapi.c index ea9ed9580aa8..e604d0d8b7b4 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -71,12 +71,23 @@ static void crypto_free_instance(struct crypto_instance *inst) static void crypto_destroy_instance_workfn(struct work_struct *w) { - struct crypto_instance *inst = container_of(w, struct crypto_instance, + struct crypto_template *tmpl = container_of(w, struct crypto_template, free_work); - struct crypto_template *tmpl = inst->tmpl; + struct crypto_instance *inst; + struct hlist_node *n; + HLIST_HEAD(list); + + down_write(&crypto_alg_sem); + hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) { + if (refcount_read(&inst->alg.cra_refcnt) != -1) + continue; + hlist_del(&inst->list); + hlist_add_head(&inst->list, &list); + } + up_write(&crypto_alg_sem); - crypto_free_instance(inst); - crypto_tmpl_put(tmpl); + hlist_for_each_entry_safe(inst, n, &list, list) + crypto_free_instance(inst); } static void crypto_destroy_instance(struct crypto_alg *alg) @@ -84,9 +95,10 @@ static void crypto_destroy_instance(struct crypto_alg *alg) struct crypto_instance *inst = container_of(alg, struct crypto_instance, alg); + struct crypto_template *tmpl = inst->tmpl; - INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn); - schedule_work(&inst->free_work); + refcount_set(&alg->cra_refcnt, -1); + schedule_work(&tmpl->free_work); } /* @@ -132,14 +144,16 @@ static void crypto_remove_instance(struct crypto_instance *inst, inst->alg.cra_flags |= CRYPTO_ALG_DEAD; - if (!tmpl || !crypto_tmpl_get(tmpl)) + if (!tmpl) return; - list_move(&inst->alg.cra_list, list); + list_del_init(&inst->alg.cra_list); hlist_del(&inst->list); - inst->alg.cra_destroy = crypto_destroy_instance; + hlist_add_head(&inst->list, &tmpl->dead); BUG_ON(!list_empty(&inst->alg.cra_users)); + + crypto_alg_put(&inst->alg); } /* @@ -260,8 +274,7 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg) { struct crypto_larval *larval; - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER) || - IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) || + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) || (alg->cra_flags & CRYPTO_ALG_INTERNAL)) return NULL; /* No self-test needed */ @@ -404,6 +417,15 @@ void crypto_remove_final(struct list_head *list) } EXPORT_SYMBOL_GPL(crypto_remove_final); +static void crypto_free_alg(struct crypto_alg *alg) +{ + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; + + crypto_destroy_alg(alg); + kfree(p); +} + int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; @@ -416,6 +438,19 @@ int crypto_register_alg(struct crypto_alg *alg) if (err) return err; + if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST && + !WARN_ON_ONCE(alg->cra_destroy)) { + unsigned int algsize = alg->cra_type->algsize; + u8 *p = (u8 *)alg - algsize; + + p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL); + if (!p) + return -ENOMEM; + + alg = (void *)(p + algsize); + alg->cra_destroy = crypto_free_alg; + } + down_write(&crypto_alg_sem); larval = __crypto_register_alg(alg, &algs_to_put); if (!IS_ERR_OR_NULL(larval)) { @@ -424,8 +459,10 @@ int crypto_register_alg(struct crypto_alg *alg) } up_write(&crypto_alg_sem); - if (IS_ERR(larval)) + if (IS_ERR(larval)) { + crypto_alg_put(alg); return PTR_ERR(larval); + } if (test_started) crypto_schedule_test(larval); @@ -461,11 +498,9 @@ void crypto_unregister_alg(struct crypto_alg *alg) if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name)) return; - if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1)) - return; - - crypto_alg_put(alg); + WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1); + list_add(&alg->cra_list, &list); crypto_remove_final(&list); } EXPORT_SYMBOL_GPL(crypto_unregister_alg); @@ -504,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl) struct crypto_template *q; int err = -EEXIST; + INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn); + down_write(&crypto_alg_sem); crypto_check_module_sig(tmpl->module); @@ -565,6 +602,8 @@ void crypto_unregister_template(struct crypto_template *tmpl) crypto_free_instance(inst); } crypto_remove_final(&users); + + flush_work(&tmpl->free_work); } EXPORT_SYMBOL_GPL(crypto_unregister_template); @@ -618,6 +657,7 @@ int crypto_register_instance(struct crypto_template *tmpl, inst->alg.cra_module = tmpl->module; inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; + inst->alg.cra_destroy = crypto_destroy_instance; down_write(&crypto_alg_sem); @@ -883,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg) +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return 0; } -EXPORT_SYMBOL_GPL(crypto_inst_setname); +EXPORT_SYMBOL_GPL(__crypto_inst_setname); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) { @@ -1018,7 +1058,7 @@ static void __init crypto_start_tests(void) if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI)) return; - if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return; set_crypto_boot_test_finished(); diff --git a/crypto/algboss.c b/crypto/algboss.c index a20926bfd34e..846f586889ee 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -189,7 +189,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg) struct task_struct *thread; struct crypto_test_param *param; - if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return NOTIFY_DONE; if (!try_module_get(THIS_MODULE)) @@ -247,13 +247,7 @@ static void __exit cryptomgr_exit(void) BUG_ON(err); } -/* - * This is arch_initcall() so that the crypto self-tests are run on algorithms - * registered early by subsys_initcall(). subsys_initcall() is needed for - * generic implementations so that they're available for comparison tests when - * other implementations are registered later by module_init(). - */ -arch_initcall(cryptomgr_init); +module_init(cryptomgr_init); module_exit(cryptomgr_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 7d58cbbce4af..79b016a899a1 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -27,7 +27,6 @@ #include <crypto/scatterwalk.h> #include <crypto/if_alg.h> #include <crypto/skcipher.h> -#include <crypto/null.h> #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> @@ -36,19 +35,13 @@ #include <linux/net.h> #include <net/sock.h> -struct aead_tfm { - struct crypto_aead *aead; - struct crypto_sync_skcipher *null_tfm; -}; - static inline bool aead_sufficient_data(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int as = crypto_aead_authsize(tfm); /* @@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int ivsize = crypto_aead_ivsize(tfm); return af_alg_sendmsg(sock, msg, size, ivsize); } -static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm, - struct scatterlist *src, - struct scatterlist *dst, unsigned int len) -{ - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); - - skcipher_request_set_sync_tfm(skreq, null_tfm); - skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP, - NULL, NULL); - skcipher_request_set_crypt(skreq, src, dst, len, NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; - struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm; + struct crypto_aead *tfm = pask->private; unsigned int i, as = crypto_aead_authsize(tfm); struct af_alg_async_req *areq; struct af_alg_tsgl *tsgl, *tmp; @@ -223,11 +199,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * v v * RX SGL: AAD || PT || Tag */ - err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sgt.sgl, - processed); - if (err) - goto free; + memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, + processed); af_alg_pull_tsgl(sk, processed, NULL, 0); } else { /* @@ -241,12 +214,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * RX SGL: AAD || CT ----+ */ - /* Copy AAD || CT to RX SGL buffer for in-place operation. */ - err = crypto_aead_copy_sgl(null_tfm, tsgl_src, - areq->first_rsgl.sgl.sgt.sgl, - outlen); - if (err) - goto free; + /* Copy AAD || CT to RX SGL buffer for in-place operation. */ + memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen); /* Create TX SGL for tag and chain it to RX SGL. */ areq->tsgl_entries = af_alg_count_tsgl(sk, processed, @@ -379,7 +348,7 @@ static int aead_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct aead_tfm *tfm; + struct crypto_aead *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -393,7 +362,7 @@ static int aead_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) + if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; atomic_dec(&pask->nokey_refcnt); @@ -454,54 +423,22 @@ static struct proto_ops algif_aead_ops_nokey = { static void *aead_bind(const char *name, u32 type, u32 mask) { - struct aead_tfm *tfm; - struct crypto_aead *aead; - struct crypto_sync_skcipher *null_tfm; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - aead = crypto_alloc_aead(name, type, mask); - if (IS_ERR(aead)) { - kfree(tfm); - return ERR_CAST(aead); - } - - null_tfm = crypto_get_default_null_skcipher(); - if (IS_ERR(null_tfm)) { - crypto_free_aead(aead); - kfree(tfm); - return ERR_CAST(null_tfm); - } - - tfm->aead = aead; - tfm->null_tfm = null_tfm; - - return tfm; + return crypto_alloc_aead(name, type, mask); } static void aead_release(void *private) { - struct aead_tfm *tfm = private; - - crypto_free_aead(tfm->aead); - crypto_put_default_null_skcipher(); - kfree(tfm); + crypto_free_aead(private); } static int aead_setauthsize(void *private, unsigned int authsize) { - struct aead_tfm *tfm = private; - - return crypto_aead_setauthsize(tfm->aead, authsize); + return crypto_aead_setauthsize(private, authsize); } static int aead_setkey(void *private, const u8 *key, unsigned int keylen) { - struct aead_tfm *tfm = private; - - return crypto_aead_setkey(tfm->aead, key, keylen); + return crypto_aead_setkey(private, key, keylen); } static void aead_sock_destruct(struct sock *sk) @@ -510,8 +447,7 @@ static void aead_sock_destruct(struct sock *sk) struct af_alg_ctx *ctx = ask->private; struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct aead_tfm *aeadc = pask->private; - struct crypto_aead *tfm = aeadc->aead; + struct crypto_aead *tfm = pask->private; unsigned int ivlen = crypto_aead_ivsize(tfm); af_alg_pull_tsgl(sk, ctx->used, NULL, 0); @@ -524,10 +460,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) { struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - struct aead_tfm *tfm = private; - struct crypto_aead *aead = tfm->aead; + struct crypto_aead *tfm = private; unsigned int len = sizeof(*ctx); - unsigned int ivlen = crypto_aead_ivsize(aead); + unsigned int ivlen = crypto_aead_ivsize(tfm); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -554,9 +489,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) static int aead_accept_parent(void *private, struct sock *sk) { - struct aead_tfm *tfm = private; + struct crypto_aead *tfm = private; - if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) + if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return aead_accept_parent_nokey(private, sk); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5498a87249d3..e3f1a4852737 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, goto out_free_state; err = crypto_ahash_import(&ctx2->req, state); - if (err) { - sock_orphan(sk2); - sock_put(sk2); - } out_free_state: kfree_sensitive(state); diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 64f57c4c4b06..153523ce6076 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -467,7 +467,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator"); MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); module_param(dbg, int, 0); MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); -subsys_initcall(prng_mod_init); +module_init(prng_mod_init); module_exit(prng_mod_fini); MODULE_ALIAS_CRYPTO("stdrng"); MODULE_ALIAS_CRYPTO("ansi_cprng"); diff --git a/crypto/anubis.c b/crypto/anubis.c index 886e7c913688..4268c3833baa 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -694,7 +694,7 @@ static void __exit anubis_mod_fini(void) crypto_unregister_alg(&anubis_alg); } -subsys_initcall(anubis_mod_init); +module_init(anubis_mod_init); module_exit(anubis_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/api.c b/crypto/api.c index 3416e98128a0..5724d62e9d07 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -31,8 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); -#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && \ - !IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) +#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); #endif @@ -220,10 +219,19 @@ again: if (crypto_is_test_larval(larval)) crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); - } else if (!alg) { + } else if (!alg || PTR_ERR(alg) == -EEXIST) { + int err = alg ? -EEXIST : -EAGAIN; + + /* + * EEXIST is expected because two probes can be scheduled + * at the same time with one using alg_name and the other + * using driver_name. Do a re-lookup but do not retry in + * case we hit a quirk like gcm_base(ctr(aes),...) which + * will never match. + */ alg = &larval->alg; alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: - ERR_PTR(-EAGAIN); + ERR_PTR(err); } else if (IS_ERR(alg)) ; else if (crypto_is_test_larval(larval) && @@ -528,6 +536,7 @@ void *crypto_create_tfm_node(struct crypto_alg *alg, goto out; tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); + tfm->fb = tfm; err = frontend->init_tfm(tfm); if (err) @@ -569,7 +578,7 @@ void *crypto_clone_tfm(const struct crypto_type *frontend, tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); tfm->crt_flags = otfm->crt_flags; - tfm->exit = otfm->exit; + tfm->fb = tfm; out: return mem; @@ -707,11 +716,27 @@ void crypto_destroy_alg(struct crypto_alg *alg) { if (alg->cra_type && alg->cra_type->destroy) alg->cra_type->destroy(alg); - if (alg->cra_destroy) alg->cra_destroy(alg); } EXPORT_SYMBOL_GPL(crypto_destroy_alg); +struct crypto_async_request *crypto_request_clone( + struct crypto_async_request *req, size_t total, gfp_t gfp) +{ + struct crypto_tfm *tfm = req->tfm; + struct crypto_async_request *nreq; + + nreq = kmemdup(req, total, gfp); + if (!nreq) { + req->tfm = tfm->fb; + return req; + } + + nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK; + return nreq; +} +EXPORT_SYMBOL_GPL(crypto_request_clone); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/crypto/arc4.c b/crypto/arc4.c index 1a4825c97c5a..1608018111d0 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -73,7 +73,7 @@ static void __exit arc4_exit(void) crypto_unregister_lskcipher(&arc4_alg); } -subsys_initcall(arc4_init); +module_init(arc4_init); module_exit(arc4_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c index bd359d3313c2..faa7900383f6 100644 --- a/crypto/aria_generic.c +++ b/crypto/aria_generic.c @@ -304,7 +304,7 @@ static void __exit aria_fini(void) crypto_unregister_alg(&aria_alg); } -subsys_initcall(aria_init); +module_init(aria_init); module_exit(aria_fini); MODULE_DESCRIPTION("ARIA Cipher Algorithm"); diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index bf165d321440..e5b177c8e842 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -163,10 +163,8 @@ static u8 *pkey_pack_u32(u8 *dst, u32 val) static int software_key_query(const struct kernel_pkey_params *params, struct kernel_pkey_query *info) { - struct crypto_akcipher *tfm; struct public_key *pkey = params->key->payload.data[asym_crypto]; char alg_name[CRYPTO_MAX_ALG_NAME]; - struct crypto_sig *sig; u8 *key, *ptr; int ret, len; bool issig; @@ -188,7 +186,11 @@ static int software_key_query(const struct kernel_pkey_params *params, ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); + memset(info, 0, sizeof(*info)); + if (issig) { + struct crypto_sig *sig; + sig = crypto_alloc_sig(alg_name, 0, 0); if (IS_ERR(sig)) { ret = PTR_ERR(sig); @@ -200,9 +202,10 @@ static int software_key_query(const struct kernel_pkey_params *params, else ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); if (ret < 0) - goto error_free_tfm; + goto error_free_sig; len = crypto_sig_keysize(sig); + info->key_size = len; info->max_sig_size = crypto_sig_maxsize(sig); info->max_data_size = crypto_sig_digestsize(sig); @@ -211,11 +214,19 @@ static int software_key_query(const struct kernel_pkey_params *params, info->supported_ops |= KEYCTL_SUPPORTS_SIGN; if (strcmp(params->encoding, "pkcs1") == 0) { + info->max_enc_size = len / BITS_PER_BYTE; + info->max_dec_size = len / BITS_PER_BYTE; + info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; } + +error_free_sig: + crypto_free_sig(sig); } else { + struct crypto_akcipher *tfm; + tfm = crypto_alloc_akcipher(alg_name, 0, 0); if (IS_ERR(tfm)) { ret = PTR_ERR(tfm); @@ -227,28 +238,23 @@ static int software_key_query(const struct kernel_pkey_params *params, else ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); if (ret < 0) - goto error_free_tfm; + goto error_free_akcipher; len = crypto_akcipher_maxsize(tfm); + info->key_size = len * BITS_PER_BYTE; info->max_sig_size = len; info->max_data_size = len; + info->max_enc_size = len; + info->max_dec_size = len; info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT; if (pkey->key_is_private) info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT; - } - - info->key_size = len * 8; - info->max_enc_size = len; - info->max_dec_size = len; - - ret = 0; -error_free_tfm: - if (issig) - crypto_free_sig(sig); - else +error_free_akcipher: crypto_free_akcipher(tfm); + } + error_free_key: kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index ee2fdab42334..2ffe4ae90bea 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -372,10 +372,9 @@ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen, /* Empty name string if no material */ if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) { - buffer = kmalloc(1, GFP_KERNEL); + buffer = kzalloc(1, GFP_KERNEL); if (!buffer) return -ENOMEM; - buffer[0] = 0; goto done; } diff --git a/crypto/authenc.c b/crypto/authenc.c index 3aaf3ab4e360..a723769c8777 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -9,7 +9,6 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> @@ -28,7 +27,6 @@ struct authenc_instance_ctx { struct crypto_authenc_ctx { struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; }; struct authenc_request_ctx { @@ -170,21 +168,6 @@ out: authenc_request_complete(areq, err); } -static int crypto_authenc_copy_assoc(struct aead_request *req) -{ - struct crypto_aead *authenc = crypto_aead_reqtfm(req); - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - - skcipher_request_set_sync_tfm(skreq, ctx->null); - skcipher_request_set_callback(skreq, aead_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, - NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int crypto_authenc_encrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); @@ -203,10 +186,7 @@ static int crypto_authenc_encrypt(struct aead_request *req) dst = src; if (req->src != req->dst) { - err = crypto_authenc_copy_assoc(req); - if (err) - return err; - + memcpy_sglist(req->dst, req->src, req->assoclen); dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); } @@ -303,7 +283,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); @@ -315,14 +294,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_skcipher; - ctx->auth = auth; ctx->enc = enc; - ctx->null = null; crypto_aead_set_reqsize( tfm, @@ -336,8 +309,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) return 0; -err_free_skcipher: - crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -349,7 +320,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher(); } static void crypto_authenc_free(struct aead_instance *inst) @@ -451,7 +421,7 @@ static void __exit crypto_authenc_module_exit(void) crypto_unregister_template(&crypto_authenc_tmpl); } -subsys_initcall(crypto_authenc_module_init); +module_init(crypto_authenc_module_init); module_exit(crypto_authenc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 2cc933e2f790..d1bf0fda3f2e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -12,7 +12,6 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> @@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx { unsigned int reqoff; struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; }; struct authenc_esn_request_ctx { @@ -158,20 +156,6 @@ static void crypto_authenc_esn_encrypt_done(void *data, int err) authenc_esn_request_complete(areq, err); } -static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len) -{ - struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); - struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); - SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); - - skcipher_request_set_sync_tfm(skreq, ctx->null); - skcipher_request_set_callback(skreq, aead_request_flags(req), - NULL, NULL); - skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL); - - return crypto_skcipher_encrypt(skreq); -} - static int crypto_authenc_esn_encrypt(struct aead_request *req) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); @@ -190,10 +174,7 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req) dst = src; if (req->src != req->dst) { - err = crypto_authenc_esn_copy(req, assoclen); - if (err) - return err; - + memcpy_sglist(req->dst, req->src, assoclen); sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen); } @@ -277,11 +258,8 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req) cryptlen -= authsize; - if (req->src != dst) { - err = crypto_authenc_esn_copy(req, assoclen + cryptlen); - if (err) - return err; - } + if (req->src != dst) + memcpy_sglist(dst, req->src, assoclen + cryptlen); scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, authsize, 0); @@ -317,7 +295,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_ahash *auth; struct crypto_skcipher *enc; - struct crypto_sync_skcipher *null; int err; auth = crypto_spawn_ahash(&ictx->auth); @@ -329,14 +306,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_skcipher; - ctx->auth = auth; ctx->enc = enc; - ctx->null = null; ctx->reqoff = 2 * crypto_ahash_digestsize(auth); @@ -352,8 +323,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) return 0; -err_free_skcipher: - crypto_free_skcipher(enc); err_free_ahash: crypto_free_ahash(auth); return err; @@ -365,7 +334,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher(); } static void crypto_authenc_esn_free(struct aead_instance *inst) @@ -465,7 +433,7 @@ static void __exit crypto_authenc_esn_module_exit(void) crypto_unregister_template(&crypto_authenc_esn_tmpl); } -subsys_initcall(crypto_authenc_esn_module_init); +module_init(crypto_authenc_esn_module_init); module_exit(crypto_authenc_esn_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 04a712ddfb43..60f056217510 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -15,12 +15,12 @@ * More information about BLAKE2 can be found at https://blake2.net. */ -#include <linux/unaligned.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/bitops.h> #include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> static const u8 blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, @@ -111,8 +111,8 @@ static void blake2b_compress_one_generic(struct blake2b_state *S, #undef G #undef ROUND -void blake2b_compress_generic(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc) +static void blake2b_compress_generic(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc) { do { blake2b_increment_counter(state, inc); @@ -120,17 +120,19 @@ void blake2b_compress_generic(struct blake2b_state *state, block += BLAKE2B_BLOCK_SIZE; } while (--nblocks); } -EXPORT_SYMBOL(blake2b_compress_generic); static int crypto_blake2b_update_generic(struct shash_desc *desc, const u8 *in, unsigned int inlen) { - return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic); + return crypto_blake2b_update_bo(desc, in, inlen, + blake2b_compress_generic); } -static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) +static int crypto_blake2b_finup_generic(struct shash_desc *desc, const u8 *in, + unsigned int inlen, u8 *out) { - return crypto_blake2b_final(desc, out, blake2b_compress_generic); + return crypto_blake2b_finup(desc, in, inlen, out, + blake2b_compress_generic); } #define BLAKE2B_ALG(name, driver_name, digest_size) \ @@ -138,7 +140,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) .base.cra_name = name, \ .base.cra_driver_name = driver_name, \ .base.cra_priority = 100, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \ + CRYPTO_AHASH_ALG_BLOCK_ONLY | \ + CRYPTO_AHASH_ALG_FINAL_NONZERO, \ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ .base.cra_module = THIS_MODULE, \ @@ -146,8 +150,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) .setkey = crypto_blake2b_setkey, \ .init = crypto_blake2b_init, \ .update = crypto_blake2b_update_generic, \ - .final = crypto_blake2b_final_generic, \ - .descsize = sizeof(struct blake2b_state), \ + .finup = crypto_blake2b_finup_generic, \ + .descsize = BLAKE2B_DESC_SIZE, \ + .statesize = BLAKE2B_STATE_SIZE, \ } static struct shash_alg blake2b_algs[] = { @@ -171,7 +176,7 @@ static void __exit blake2b_mod_fini(void) crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } -subsys_initcall(blake2b_mod_init); +module_init(blake2b_mod_init); module_exit(blake2b_mod_fini); MODULE_AUTHOR("David Sterba <kdave@kernel.org>"); diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index 0146bc762c09..f3c5f9b09850 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -124,7 +124,7 @@ static void __exit blowfish_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(blowfish_mod_init); +module_init(blowfish_mod_init); module_exit(blowfish_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 197fcf3abc89..ee4336a04b93 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -1064,7 +1064,7 @@ static void __exit camellia_fini(void) crypto_unregister_alg(&camellia_alg); } -subsys_initcall(camellia_init); +module_init(camellia_init); module_exit(camellia_fini); MODULE_DESCRIPTION("Camellia Cipher Algorithm"); diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index f3e57775fa02..f68330793e0c 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -531,7 +531,7 @@ static void __exit cast5_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(cast5_mod_init); +module_init(cast5_mod_init); module_exit(cast5_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 11b725b12f27..4c08c42646f0 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -271,7 +271,7 @@ static void __exit cast6_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(cast6_mod_init); +module_init(cast6_mod_init); module_exit(cast6_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/cbc.c b/crypto/cbc.c index e81918ca68b7..ed3df6246765 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -179,7 +179,7 @@ static void __exit crypto_cbc_module_exit(void) crypto_unregister_template(&crypto_cbc_tmpl); } -subsys_initcall(crypto_cbc_module_init); +module_init(crypto_cbc_module_init); module_exit(crypto_cbc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/ccm.c b/crypto/ccm.c index 06476b53b491..2ae929ffdef8 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -10,11 +10,12 @@ #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> +#include <crypto/utils.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/string.h> struct ccm_instance_ctx { struct crypto_skcipher_spawn ctr; @@ -54,11 +55,6 @@ struct cbcmac_tfm_ctx { struct crypto_cipher *child; }; -struct cbcmac_desc_ctx { - unsigned int len; - u8 dg[]; -}; - static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( struct aead_request *req) { @@ -783,12 +779,10 @@ static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, static int crypto_cbcmac_digest_init(struct shash_desc *pdesc) { - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_digestsize(pdesc->tfm); + u8 *dg = shash_desc_ctx(pdesc); - ctx->len = 0; - memset(ctx->dg, 0, bs); - + memset(dg, 0, bs); return 0; } @@ -797,39 +791,34 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, { struct crypto_shash *parent = pdesc->tfm; struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - - while (len > 0) { - unsigned int l = min(len, bs - ctx->len); - - crypto_xor(&ctx->dg[ctx->len], p, l); - ctx->len +=l; - len -= l; - p += l; - - if (ctx->len == bs) { - crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); - ctx->len = 0; - } - } - - return 0; + u8 *dg = shash_desc_ctx(pdesc); + + do { + crypto_xor(dg, p, bs); + crypto_cipher_encrypt_one(tfm, dg, dg); + p += bs; + len -= bs; + } while (len >= bs); + return len; } -static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_cbcmac_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); + u8 *dg = shash_desc_ctx(pdesc); - if (ctx->len) - crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); - - memcpy(out, ctx->dg, bs); + if (len) { + crypto_xor(dg, src, len); + crypto_cipher_encrypt_one(tfm, out, dg); + return 0; + } + memcpy(out, dg, bs); return 0; } @@ -883,19 +872,19 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = 1; + inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) + - alg->cra_blocksize; + inst->alg.descsize = alg->cra_blocksize; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY; inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx); inst->alg.base.cra_init = cbcmac_init_tfm; inst->alg.base.cra_exit = cbcmac_exit_tfm; inst->alg.init = crypto_cbcmac_digest_init; inst->alg.update = crypto_cbcmac_digest_update; - inst->alg.final = crypto_cbcmac_digest_final; + inst->alg.finup = crypto_cbcmac_digest_finup; inst->alg.setkey = crypto_cbcmac_digest_setkey; inst->free = shash_free_singlespawn_instance; @@ -940,7 +929,7 @@ static void __exit crypto_ccm_module_exit(void) ARRAY_SIZE(crypto_ccm_tmpls)); } -subsys_initcall(crypto_ccm_module_init); +module_init(crypto_ccm_module_init); module_exit(crypto_ccm_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/chacha.c b/crypto/chacha.c new file mode 100644 index 000000000000..c3a11f4e2d13 --- /dev/null +++ b/crypto/chacha.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers + * + * Copyright (C) 2015 Martin Willi + * Copyright (C) 2018 Google LLC + */ + +#include <linux/unaligned.h> +#include <crypto/algapi.h> +#include <crypto/chacha.h> +#include <crypto/internal/skcipher.h> +#include <linux/module.h> + +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +static int chacha_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize, int nrounds) +{ + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + int i; + + if (keysize != CHACHA_KEY_SIZE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx->key); i++) + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); + + ctx->nrounds = nrounds; + return 0; +} + +static int chacha20_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 20); +} + +static int chacha12_setkey(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keysize) +{ + return chacha_setkey(tfm, key, keysize, 12); +} + +static int chacha_stream_xor(struct skcipher_request *req, + const struct chacha_ctx *ctx, + const u8 iv[CHACHA_IV_SIZE], bool arch) +{ + struct skcipher_walk walk; + struct chacha_state state; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + chacha_init(&state, ctx->key, iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE); + + if (arch) + chacha_crypt(&state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, ctx->nrounds); + else + chacha_crypt_generic(&state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, + ctx->nrounds); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + + return err; +} + +static int crypto_chacha_crypt_generic(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + + return chacha_stream_xor(req, ctx, req->iv, false); +} + +static int crypto_chacha_crypt_arch(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + + return chacha_stream_xor(req, ctx, req->iv, true); +} + +static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + struct chacha_ctx subctx; + struct chacha_state state; + u8 real_iv[16]; + + /* Compute the subkey given the original key and first 128 nonce bits */ + chacha_init(&state, ctx->key, req->iv); + if (arch) + hchacha_block(&state, subctx.key, ctx->nrounds); + else + hchacha_block_generic(&state, subctx.key, ctx->nrounds); + subctx.nrounds = ctx->nrounds; + + /* Build the real IV */ + memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */ + memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */ + + /* Generate the stream and XOR it with the data */ + return chacha_stream_xor(req, &subctx, real_iv, arch); +} + +static int crypto_xchacha_crypt_generic(struct skcipher_request *req) +{ + return crypto_xchacha_crypt(req, false); +} + +static int crypto_xchacha_crypt_arch(struct skcipher_request *req) +{ + return crypto_xchacha_crypt(req, true); +} + +static struct skcipher_alg algs[] = { + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_chacha_crypt_generic, + .decrypt = crypto_chacha_crypt_generic, + }, + { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_xchacha_crypt_generic, + .decrypt = crypto_xchacha_crypt_generic, + }, + { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha12_setkey, + .encrypt = crypto_xchacha_crypt_generic, + .decrypt = crypto_xchacha_crypt_generic, + }, + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_chacha_crypt_arch, + .decrypt = crypto_chacha_crypt_arch, + }, + { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = crypto_xchacha_crypt_arch, + .decrypt = crypto_xchacha_crypt_arch, + }, + { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha12_setkey, + .encrypt = crypto_xchacha_crypt_arch, + .decrypt = crypto_xchacha_crypt_arch, + } +}; + +static unsigned int num_algs; + +static int __init crypto_chacha_mod_init(void) +{ + /* register the arch flavours only if they differ from generic */ + num_algs = ARRAY_SIZE(algs); + BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0); + if (!chacha_is_arch_optimized()) + num_algs /= 2; + + return crypto_register_skciphers(algs, num_algs); +} + +static void __exit crypto_chacha_mod_fini(void) +{ + crypto_unregister_skciphers(algs, num_algs); +} + +module_init(crypto_chacha_mod_init); +module_exit(crypto_chacha_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-generic"); +MODULE_ALIAS_CRYPTO("chacha20-" __stringify(ARCH)); +MODULE_ALIAS_CRYPTO("xchacha20"); +MODULE_ALIAS_CRYPTO("xchacha20-generic"); +MODULE_ALIAS_CRYPTO("xchacha20-" __stringify(ARCH)); +MODULE_ALIAS_CRYPTO("xchacha12"); +MODULE_ALIAS_CRYPTO("xchacha12-generic"); +MODULE_ALIAS_CRYPTO("xchacha12-" __stringify(ARCH)); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index d740849f1c19..b4b5a7198d84 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -12,36 +12,23 @@ #include <crypto/chacha.h> #include <crypto/poly1305.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> +#include <linux/string.h> struct chachapoly_instance_ctx { struct crypto_skcipher_spawn chacha; - struct crypto_ahash_spawn poly; unsigned int saltlen; }; struct chachapoly_ctx { struct crypto_skcipher *chacha; - struct crypto_ahash *poly; /* key bytes we use for the ChaCha20 IV */ unsigned int saltlen; u8 salt[] __counted_by(saltlen); }; -struct poly_req { - /* zero byte padding for AD/ciphertext, as needed */ - u8 pad[POLY1305_BLOCK_SIZE]; - /* tail data with AD/ciphertext lengths */ - struct { - __le64 assoclen; - __le64 cryptlen; - } tail; - struct scatterlist src[1]; - struct ahash_request req; /* must be last member */ -}; - struct chacha_req { u8 iv[CHACHA_IV_SIZE]; struct scatterlist src[1]; @@ -62,7 +49,6 @@ struct chachapoly_req_ctx { /* request flags, with MAY_SLEEP cleared if needed */ u32 flags; union { - struct poly_req poly; struct chacha_req chacha; } u; }; @@ -105,16 +91,6 @@ static int poly_verify_tag(struct aead_request *req) return 0; } -static int poly_copy_tag(struct aead_request *req) -{ - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - - scatterwalk_map_and_copy(rctx->tag, req->dst, - req->assoclen + rctx->cryptlen, - sizeof(rctx->tag), 1); - return 0; -} - static void chacha_decrypt_done(void *data, int err) { async_done_continue(data, err, poly_verify_tag); @@ -151,210 +127,76 @@ skip: return poly_verify_tag(req); } -static int poly_tail_continue(struct aead_request *req) -{ - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - - if (rctx->cryptlen == req->cryptlen) /* encrypting */ - return poly_copy_tag(req); - - return chacha_decrypt(req); -} - -static void poly_tail_done(void *data, int err) -{ - async_done_continue(data, err, poly_tail_continue); -} - -static int poly_tail(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; - - preq->tail.assoclen = cpu_to_le64(rctx->assoclen); - preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen); - sg_init_one(preq->src, &preq->tail, sizeof(preq->tail)); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_tail_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, - rctx->tag, sizeof(preq->tail)); - - err = crypto_ahash_finup(&preq->req); - if (err) - return err; - - return poly_tail_continue(req); -} - -static void poly_cipherpad_done(void *data, int err) -{ - async_done_continue(data, err, poly_tail); -} - -static int poly_cipherpad(struct aead_request *req) +static int poly_hash(struct aead_request *req) { - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; + const void *zp = page_address(ZERO_PAGE(0)); + struct scatterlist *sg = req->src; + struct poly1305_desc_ctx desc; + struct scatter_walk walk; + struct { + union { + struct { + __le64 assoclen; + __le64 cryptlen; + }; + u8 u8[16]; + }; + } tail; unsigned int padlen; - int err; - - padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; - memset(preq->pad, 0, sizeof(preq->pad)); - sg_init_one(preq->src, preq->pad, padlen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_cipherpad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); + unsigned int total; - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_tail(req); -} - -static void poly_cipher_done(void *data, int err) -{ - async_done_continue(data, err, poly_cipherpad); -} - -static int poly_cipher(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - struct scatterlist *crypt = req->src; - int err; + if (sg != req->dst) + memcpy_sglist(req->dst, sg, req->assoclen); if (rctx->cryptlen == req->cryptlen) /* encrypting */ - crypt = req->dst; - - crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_cipher_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; + sg = req->dst; - return poly_cipherpad(req); -} + poly1305_init(&desc, rctx->key); + scatterwalk_start(&walk, sg); -static void poly_adpad_done(void *data, int err) -{ - async_done_continue(data, err, poly_cipher); -} + total = rctx->assoclen; + while (total) { + unsigned int n = scatterwalk_next(&walk, total); -static int poly_adpad(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - unsigned int padlen; - int err; + poly1305_update(&desc, walk.addr, n); + scatterwalk_done_src(&walk, n); + total -= n; + } padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE; - memset(preq->pad, 0, sizeof(preq->pad)); - sg_init_one(preq->src, preq->pad, padlen); - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_adpad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_cipher(req); -} - -static void poly_ad_done(void *data, int err) -{ - async_done_continue(data, err, poly_adpad); -} - -static int poly_ad(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; - - ahash_request_set_callback(&preq->req, rctx->flags, - poly_ad_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_adpad(req); -} - -static void poly_setkey_done(void *data, int err) -{ - async_done_continue(data, err, poly_ad); -} + poly1305_update(&desc, zp, padlen); -static int poly_setkey(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; + scatterwalk_skip(&walk, req->assoclen - rctx->assoclen); - sg_init_one(preq->src, rctx->key, sizeof(rctx->key)); + total = rctx->cryptlen; + while (total) { + unsigned int n = scatterwalk_next(&walk, total); - ahash_request_set_callback(&preq->req, rctx->flags, - poly_setkey_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); - ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); - - err = crypto_ahash_update(&preq->req); - if (err) - return err; - - return poly_ad(req); -} - -static void poly_init_done(void *data, int err) -{ - async_done_continue(data, err, poly_setkey); -} + poly1305_update(&desc, walk.addr, n); + scatterwalk_done_src(&walk, n); + total -= n; + } -static int poly_init(struct aead_request *req) -{ - struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct chachapoly_req_ctx *rctx = aead_request_ctx(req); - struct poly_req *preq = &rctx->u.poly; - int err; + padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; + poly1305_update(&desc, zp, padlen); - ahash_request_set_callback(&preq->req, rctx->flags, - poly_init_done, req); - ahash_request_set_tfm(&preq->req, ctx->poly); + tail.assoclen = cpu_to_le64(rctx->assoclen); + tail.cryptlen = cpu_to_le64(rctx->cryptlen); + poly1305_update(&desc, tail.u8, sizeof(tail)); + memzero_explicit(&tail, sizeof(tail)); + poly1305_final(&desc, rctx->tag); - err = crypto_ahash_init(&preq->req); - if (err) - return err; + if (rctx->cryptlen != req->cryptlen) + return chacha_decrypt(req); - return poly_setkey(req); + memcpy_to_scatterwalk(&walk, rctx->tag, sizeof(rctx->tag)); + return 0; } static void poly_genkey_done(void *data, int err) { - async_done_continue(data, err, poly_init); + async_done_continue(data, err, poly_hash); } static int poly_genkey(struct aead_request *req) @@ -388,7 +230,7 @@ static int poly_genkey(struct aead_request *req) if (err) return err; - return poly_init(req); + return poly_hash(req); } static void chacha_encrypt_done(void *data, int err) @@ -437,14 +279,7 @@ static int chachapoly_encrypt(struct aead_request *req) /* encrypt call chain: * - chacha_encrypt/done() * - poly_genkey/done() - * - poly_init/done() - * - poly_setkey/done() - * - poly_ad/done() - * - poly_adpad/done() - * - poly_cipher/done() - * - poly_cipherpad/done() - * - poly_tail/done/continue() - * - poly_copy_tag() + * - poly_hash() */ return chacha_encrypt(req); } @@ -458,13 +293,7 @@ static int chachapoly_decrypt(struct aead_request *req) /* decrypt call chain: * - poly_genkey/done() - * - poly_init/done() - * - poly_setkey/done() - * - poly_ad/done() - * - poly_adpad/done() - * - poly_cipher/done() - * - poly_cipherpad/done() - * - poly_tail/done/continue() + * - poly_hash() * - chacha_decrypt/done() * - poly_verify_tag() */ @@ -503,21 +332,13 @@ static int chachapoly_init(struct crypto_aead *tfm) struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_skcipher *chacha; - struct crypto_ahash *poly; unsigned long align; - poly = crypto_spawn_ahash(&ictx->poly); - if (IS_ERR(poly)) - return PTR_ERR(poly); - chacha = crypto_spawn_skcipher(&ictx->chacha); - if (IS_ERR(chacha)) { - crypto_free_ahash(poly); + if (IS_ERR(chacha)) return PTR_ERR(chacha); - } ctx->chacha = chacha; - ctx->poly = poly; ctx->saltlen = ictx->saltlen; align = crypto_aead_alignmask(tfm); @@ -525,12 +346,9 @@ static int chachapoly_init(struct crypto_aead *tfm) crypto_aead_set_reqsize( tfm, align + offsetof(struct chachapoly_req_ctx, u) + - max(offsetof(struct chacha_req, req) + - sizeof(struct skcipher_request) + - crypto_skcipher_reqsize(chacha), - offsetof(struct poly_req, req) + - sizeof(struct ahash_request) + - crypto_ahash_reqsize(poly))); + offsetof(struct chacha_req, req) + + sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(chacha)); return 0; } @@ -539,7 +357,6 @@ static void chachapoly_exit(struct crypto_aead *tfm) { struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); - crypto_free_ahash(ctx->poly); crypto_free_skcipher(ctx->chacha); } @@ -548,7 +365,6 @@ static void chachapoly_free(struct aead_instance *inst) struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_skcipher(&ctx->chacha); - crypto_drop_ahash(&ctx->poly); kfree(inst); } @@ -559,7 +375,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; struct skcipher_alg_common *chacha; - struct hash_alg_common *poly; int err; if (ivsize > CHACHAPOLY_IV_SIZE) @@ -581,14 +396,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha); - err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[2]), 0, mask); - if (err) - goto err_free_inst; - poly = crypto_spawn_ahash_alg(&ctx->poly); - err = -EINVAL; - if (poly->digestsize != POLY1305_DIGEST_SIZE) + if (strcmp(crypto_attr_alg_name(tb[2]), "poly1305") && + strcmp(crypto_attr_alg_name(tb[2]), "poly1305-generic")) goto err_free_inst; /* Need 16-byte IV size, including Initial Block Counter value */ if (chacha->ivsize != CHACHA_IV_SIZE) @@ -599,16 +409,15 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha->base.cra_name, - poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,poly1305)", name, + chacha->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s,%s)", name, chacha->base.cra_driver_name, - poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + "%s(%s,poly1305-generic)", name, + chacha->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_priority = (chacha->base.cra_priority + - poly->base.cra_priority) / 2; + inst->alg.base.cra_priority = chacha->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = chacha->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + @@ -667,7 +476,7 @@ static void __exit chacha20poly1305_module_exit(void) ARRAY_SIZE(rfc7539_tmpls)); } -subsys_initcall(chacha20poly1305_module_init); +module_init(chacha20poly1305_module_init); module_exit(chacha20poly1305_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c deleted file mode 100644 index 1fb9fbd302c6..000000000000 --- a/crypto/chacha_generic.c +++ /dev/null @@ -1,139 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539) - * - * Copyright (C) 2015 Martin Willi - * Copyright (C) 2018 Google LLC - */ - -#include <linux/unaligned.h> -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/skcipher.h> -#include <linux/module.h> - -static int chacha_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - chacha_init(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE); - - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, ctx->nrounds); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static int crypto_chacha_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_stream_xor(req, ctx, req->iv); -} - -static int crypto_xchacha_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - /* Compute the subkey given the original key and first 128 nonce bits */ - chacha_init(state, ctx->key, req->iv); - hchacha_block_generic(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - /* Build the real IV */ - memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */ - memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */ - - /* Generate the stream and XOR it with the data */ - return chacha_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-generic", - .base.cra_priority = 100, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = crypto_chacha_crypt, - .decrypt = crypto_chacha_crypt, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-generic", - .base.cra_priority = 100, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = crypto_xchacha_crypt, - .decrypt = crypto_xchacha_crypt, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-generic", - .base.cra_priority = 100, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = crypto_xchacha_crypt, - .decrypt = crypto_xchacha_crypt, - } -}; - -static int __init chacha_generic_mod_init(void) -{ - return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -} - -static void __exit chacha_generic_mod_fini(void) -{ - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -subsys_initcall(chacha_generic_mod_init); -module_exit(chacha_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-generic"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-generic"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-generic"); diff --git a/crypto/cmac.c b/crypto/cmac.c index c66a0f4d8808..1b03964abe00 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -13,9 +13,12 @@ #include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> +#include <crypto/utils.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> /* * +------------------------ @@ -31,22 +34,6 @@ struct cmac_tfm_ctx { __be64 consts[]; }; -/* - * +------------------------ - * | <shash desc> - * +------------------------ - * | cmac_desc_ctx - * +------------------------ - * | odds (block size) - * +------------------------ - * | prev (block size) - * +------------------------ - */ -struct cmac_desc_ctx { - unsigned int len; - u8 odds[]; -}; - static int crypto_cmac_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -102,13 +89,10 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, static int crypto_cmac_digest_init(struct shash_desc *pdesc) { - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = &ctx->odds[bs]; + u8 *prev = shash_desc_ctx(pdesc); - ctx->len = 0; memset(prev, 0, bs); - return 0; } @@ -117,77 +101,36 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, { struct crypto_shash *parent = pdesc->tfm; struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; - - /* checking the data can fill the block */ - if ((ctx->len + len) <= bs) { - memcpy(odds + ctx->len, p, len); - ctx->len += len; - return 0; - } - - /* filling odds with new data and encrypting it */ - memcpy(odds + ctx->len, p, bs - ctx->len); - len -= bs - ctx->len; - p += bs - ctx->len; - - crypto_xor(prev, odds, bs); - crypto_cipher_encrypt_one(tfm, prev, prev); + u8 *prev = shash_desc_ctx(pdesc); - /* clearing the length */ - ctx->len = 0; - - /* encrypting the rest of data */ - while (len > bs) { + do { crypto_xor(prev, p, bs); crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; - } - - /* keeping the surplus of blocksize */ - if (len) { - memcpy(odds, p, len); - ctx->len = len; - } - - return 0; + } while (len >= bs); + return len; } -static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; + u8 *prev = shash_desc_ctx(pdesc); unsigned int offset = 0; - if (ctx->len != bs) { - unsigned int rlen; - u8 *p = odds + ctx->len; - - *p = 0x80; - p++; - - rlen = bs - ctx->len - 1; - if (rlen) - memset(p, 0, rlen); - + crypto_xor(prev, src, len); + if (len != bs) { + prev[len] ^= 0x80; offset += bs; } - - crypto_xor(prev, odds, bs); crypto_xor(prev, (const u8 *)tctx->consts + offset, bs); - crypto_cipher_encrypt_one(tfm, out, prev); - return 0; } @@ -269,13 +212,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) + alg->cra_blocksize * 2; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = sizeof(struct cmac_desc_ctx) + - alg->cra_blocksize * 2; + inst->alg.descsize = alg->cra_blocksize; inst->alg.init = crypto_cmac_digest_init; inst->alg.update = crypto_cmac_digest_update; - inst->alg.final = crypto_cmac_digest_final; + inst->alg.finup = crypto_cmac_digest_finup; inst->alg.setkey = crypto_cmac_digest_setkey; inst->alg.init_tfm = cmac_init_tfm; inst->alg.clone_tfm = cmac_clone_tfm; @@ -307,7 +251,7 @@ static void __exit crypto_cmac_module_exit(void) crypto_unregister_template(&crypto_cmac_tmpl); } -subsys_initcall(crypto_cmac_module_init); +module_init(crypto_cmac_module_init); module_exit(crypto_cmac_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/crc32_generic.c b/crypto/crc32.c index 783a30b27398..cc371d42601f 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32.c @@ -172,7 +172,7 @@ static void __exit crc32_mod_fini(void) crypto_unregister_shashes(algs, num_algs); } -subsys_initcall(crc32_mod_init); +module_init(crc32_mod_init); module_exit(crc32_mod_fini); MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); diff --git a/crypto/crc32c_generic.c b/crypto/crc32c.c index b1a36d32dc50..e5377898414a 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c.c @@ -212,7 +212,7 @@ static void __exit crc32c_mod_fini(void) crypto_unregister_shashes(algs, num_algs); } -subsys_initcall(crc32c_mod_init); +module_init(crc32c_mod_init); module_exit(crc32c_mod_fini); MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 31d022d47f7a..5bb6f8d88cc2 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -1138,7 +1138,7 @@ static void __exit cryptd_exit(void) crypto_unregister_template(&cryptd_tmpl); } -subsys_initcall(cryptd_init); +module_init(cryptd_init); module_exit(cryptd_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index c7c16da5e649..445d3c113ee1 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -23,9 +23,6 @@ #define CRYPTO_ENGINE_MAX_QLEN 10 -/* Temporary algorithm flag used to indicate an updated driver. */ -#define CRYPTO_ALG_ENGINE 0x200 - struct crypto_engine_alg { struct crypto_alg base; struct crypto_engine_op op; @@ -148,16 +145,9 @@ start_request: } } - if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) { - alg = container_of(async_req->tfm->__crt_alg, - struct crypto_engine_alg, base); - op = &alg->op; - } else { - dev_err(engine->dev, "failed to do request\n"); - ret = -EINVAL; - goto req_err_1; - } - + alg = container_of(async_req->tfm->__crt_alg, + struct crypto_engine_alg, base); + op = &alg->op; ret = op->do_one_request(engine, async_req); /* Request unsuccessfully executed by hardware */ @@ -569,9 +559,6 @@ int crypto_engine_register_aead(struct aead_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_aead(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_aead); @@ -614,9 +601,6 @@ int crypto_engine_register_ahash(struct ahash_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_ahash(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_ahash); @@ -660,9 +644,6 @@ int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_akcipher(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher); @@ -677,9 +658,6 @@ int crypto_engine_register_kpp(struct kpp_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_kpp(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); @@ -694,9 +672,6 @@ int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_skcipher(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index ced90f88ee07..34588f39fdfc 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -15,15 +15,11 @@ #include <crypto/null.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/spinlock.h> #include <linux/string.h> -static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock); -static struct crypto_sync_skcipher *crypto_default_null_skcipher; -static int crypto_default_null_skcipher_refcnt; - static int null_init(struct shash_desc *desc) { return 0; @@ -65,19 +61,9 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) static int null_skcipher_crypt(struct skcipher_request *req) { - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - if (walk.src.virt.addr != walk.dst.virt.addr) - memcpy(walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - - return err; + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, req->cryptlen); + return 0; } static struct shash_alg digest_null = { @@ -129,54 +115,6 @@ static struct crypto_alg cipher_null = { MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); -struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void) -{ - struct crypto_sync_skcipher *ntfm = NULL; - struct crypto_sync_skcipher *tfm; - - spin_lock_bh(&crypto_default_null_skcipher_lock); - tfm = crypto_default_null_skcipher; - - if (!tfm) { - spin_unlock_bh(&crypto_default_null_skcipher_lock); - - ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); - if (IS_ERR(ntfm)) - return ntfm; - - spin_lock_bh(&crypto_default_null_skcipher_lock); - tfm = crypto_default_null_skcipher; - if (!tfm) { - tfm = ntfm; - ntfm = NULL; - crypto_default_null_skcipher = tfm; - } - } - - crypto_default_null_skcipher_refcnt++; - spin_unlock_bh(&crypto_default_null_skcipher_lock); - - crypto_free_sync_skcipher(ntfm); - - return tfm; -} -EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); - -void crypto_put_default_null_skcipher(void) -{ - struct crypto_sync_skcipher *tfm = NULL; - - spin_lock_bh(&crypto_default_null_skcipher_lock); - if (!--crypto_default_null_skcipher_refcnt) { - tfm = crypto_default_null_skcipher; - crypto_default_null_skcipher = NULL; - } - spin_unlock_bh(&crypto_default_null_skcipher_lock); - - crypto_free_sync_skcipher(tfm); -} -EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); - static int __init crypto_null_mod_init(void) { int ret = 0; @@ -210,7 +148,7 @@ static void __exit crypto_null_mod_fini(void) crypto_unregister_skcipher(&skcipher_null); } -subsys_initcall(crypto_null_mod_init); +module_init(crypto_null_mod_init); module_exit(crypto_null_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/ctr.c b/crypto/ctr.c index 97a947b0a876..a388f0ceb3a0 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -350,7 +350,7 @@ static void __exit crypto_ctr_module_exit(void) ARRAY_SIZE(crypto_ctr_tmpls)); } -subsys_initcall(crypto_ctr_module_init); +module_init(crypto_ctr_module_init); module_exit(crypto_ctr_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/cts.c b/crypto/cts.c index f5b42156b6c7..48898d5e24ff 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -402,7 +402,7 @@ static void __exit crypto_cts_module_exit(void) crypto_unregister_template(&crypto_cts_tmpl); } -subsys_initcall(crypto_cts_module_init); +module_init(crypto_cts_module_init); module_exit(crypto_cts_module_exit); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c index 68a673262e04..f3e56e73c66c 100644 --- a/crypto/curve25519-generic.c +++ b/crypto/curve25519-generic.c @@ -82,7 +82,7 @@ static void __exit curve25519_exit(void) crypto_unregister_kpp(&curve25519_alg); } -subsys_initcall(curve25519_init); +module_init(curve25519_init); module_exit(curve25519_exit); MODULE_ALIAS_CRYPTO("curve25519"); diff --git a/crypto/deflate.c b/crypto/deflate.c index 5c346c544093..fe8e4ad0fee1 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -6,253 +6,250 @@ * by IPCOMP (RFC 3173 & RFC 2394). * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> - * - * FIXME: deflate transforms will require up to a total of about 436k of kernel - * memory on i386 (390k for compression, the rest for decompression), as the - * current zlib kernel code uses a worst case pre-allocation system by default. - * This needs to be fixed so that the amount of memory required is properly - * related to the winbits and memlevel parameters. - * - * The default winbits of 11 should suit most packets, and it may be something - * to configure on a per-tfm basis in the future. - * - * Currently, compression history is not maintained between tfm calls, as - * it is not needed for IPCOMP and keeps the code simpler. It can be - * implemented if someone wants it. + * Copyright (c) 2023 Google, LLC. <ardb@kernel.org> + * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au> */ +#include <crypto/internal/acompress.h> +#include <crypto/scatterwalk.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/crypto.h> +#include <linux/mutex.h> +#include <linux/percpu.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/zlib.h> -#include <linux/vmalloc.h> -#include <linux/interrupt.h> -#include <linux/mm.h> -#include <linux/net.h> -#include <crypto/internal/scompress.h> #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION #define DEFLATE_DEF_WINBITS 11 #define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL -struct deflate_ctx { - struct z_stream_s comp_stream; - struct z_stream_s decomp_stream; +struct deflate_stream { + struct z_stream_s stream; + u8 workspace[]; }; -static int deflate_comp_init(struct deflate_ctx *ctx) -{ - int ret = 0; - struct z_stream_s *stream = &ctx->comp_stream; - - stream->workspace = vzalloc(zlib_deflate_workspacesize( - -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL)); - if (!stream->workspace) { - ret = -ENOMEM; - goto out; - } - ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, - -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, - Z_DEFAULT_STRATEGY); - if (ret != Z_OK) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(stream->workspace); - goto out; -} +static DEFINE_MUTEX(deflate_stream_lock); -static int deflate_decomp_init(struct deflate_ctx *ctx) +static void *deflate_alloc_stream(void) { - int ret = 0; - struct z_stream_s *stream = &ctx->decomp_stream; + size_t size = max(zlib_inflate_workspacesize(), + zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, + DEFLATE_DEF_MEMLEVEL)); + struct deflate_stream *ctx; - stream->workspace = vzalloc(zlib_inflate_workspacesize()); - if (!stream->workspace) { - ret = -ENOMEM; - goto out; - } - ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); - if (ret != Z_OK) { - ret = -EINVAL; - goto out_free; - } -out: - return ret; -out_free: - vfree(stream->workspace); - goto out; -} + ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); -static void deflate_comp_exit(struct deflate_ctx *ctx) -{ - zlib_deflateEnd(&ctx->comp_stream); - vfree(ctx->comp_stream.workspace); -} + ctx->stream.workspace = ctx->workspace; -static void deflate_decomp_exit(struct deflate_ctx *ctx) -{ - zlib_inflateEnd(&ctx->decomp_stream); - vfree(ctx->decomp_stream.workspace); + return ctx; } -static int __deflate_init(void *ctx) +static struct crypto_acomp_streams deflate_streams = { + .alloc_ctx = deflate_alloc_stream, + .cfree_ctx = kvfree, +}; + +static int deflate_compress_one(struct acomp_req *req, + struct deflate_stream *ds) { + struct z_stream_s *stream = &ds->stream; + struct acomp_walk walk; int ret; - ret = deflate_comp_init(ctx); + ret = acomp_walk_virt(&walk, req, true); if (ret) - goto out; - ret = deflate_decomp_init(ctx); - if (ret) - deflate_comp_exit(ctx); -out: - return ret; -} + return ret; -static void *deflate_alloc_ctx(void) -{ - struct deflate_ctx *ctx; - int ret; + do { + unsigned int dcur; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); + dcur = acomp_walk_next_dst(&walk); + if (!dcur) + return -ENOSPC; - ret = __deflate_init(ctx); - if (ret) { - kfree(ctx); - return ERR_PTR(ret); - } + stream->avail_out = dcur; + stream->next_out = walk.dst.virt.addr; - return ctx; -} + do { + int flush = Z_FINISH; + unsigned int scur; -static void __deflate_exit(void *ctx) -{ - deflate_comp_exit(ctx); - deflate_decomp_exit(ctx); -} + stream->avail_in = 0; + stream->next_in = NULL; -static void deflate_free_ctx(void *ctx) -{ - __deflate_exit(ctx); - kfree_sensitive(ctx); + scur = acomp_walk_next_src(&walk); + if (scur) { + if (acomp_walk_more_src(&walk, scur)) + flush = Z_NO_FLUSH; + stream->avail_in = scur; + stream->next_in = walk.src.virt.addr; + } + + ret = zlib_deflate(stream, flush); + + if (scur) { + scur -= stream->avail_in; + acomp_walk_done_src(&walk, scur); + } + } while (ret == Z_OK && stream->avail_out); + + acomp_walk_done_dst(&walk, dcur); + } while (ret == Z_OK); + + if (ret != Z_STREAM_END) + return -EINVAL; + + req->dlen = stream->total_out; + return 0; } -static int __deflate_compress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int deflate_compress(struct acomp_req *req) { - int ret = 0; - struct deflate_ctx *dctx = ctx; - struct z_stream_s *stream = &dctx->comp_stream; + struct crypto_acomp_stream *s; + struct deflate_stream *ds; + int err; + + s = crypto_acomp_lock_stream_bh(&deflate_streams); + ds = s->ctx; - ret = zlib_deflateReset(stream); - if (ret != Z_OK) { - ret = -EINVAL; + err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, + -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); + if (err != Z_OK) { + err = -EINVAL; goto out; } - stream->next_in = (u8 *)src; - stream->avail_in = slen; - stream->next_out = (u8 *)dst; - stream->avail_out = *dlen; + err = deflate_compress_one(req, ds); - ret = zlib_deflate(stream, Z_FINISH); - if (ret != Z_STREAM_END) { - ret = -EINVAL; - goto out; - } - ret = 0; - *dlen = stream->total_out; out: - return ret; + crypto_acomp_unlock_stream_bh(s); + + return err; } -static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) +static int deflate_decompress_one(struct acomp_req *req, + struct deflate_stream *ds) { - return __deflate_compress(src, slen, dst, dlen, ctx); + struct z_stream_s *stream = &ds->stream; + bool out_of_space = false; + struct acomp_walk walk; + int ret; + + ret = acomp_walk_virt(&walk, req, true); + if (ret) + return ret; + + do { + unsigned int scur; + + stream->avail_in = 0; + stream->next_in = NULL; + + scur = acomp_walk_next_src(&walk); + if (scur) { + stream->avail_in = scur; + stream->next_in = walk.src.virt.addr; + } + + do { + unsigned int dcur; + + dcur = acomp_walk_next_dst(&walk); + if (!dcur) { + out_of_space = true; + break; + } + + stream->avail_out = dcur; + stream->next_out = walk.dst.virt.addr; + + ret = zlib_inflate(stream, Z_NO_FLUSH); + + dcur -= stream->avail_out; + acomp_walk_done_dst(&walk, dcur); + } while (ret == Z_OK && stream->avail_in); + + if (scur) + acomp_walk_done_src(&walk, scur); + + if (out_of_space) + return -ENOSPC; + } while (ret == Z_OK); + + if (ret != Z_STREAM_END) + return -EINVAL; + + req->dlen = stream->total_out; + return 0; } -static int __deflate_decompress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) +static int deflate_decompress(struct acomp_req *req) { + struct crypto_acomp_stream *s; + struct deflate_stream *ds; + int err; - int ret = 0; - struct deflate_ctx *dctx = ctx; - struct z_stream_s *stream = &dctx->decomp_stream; + s = crypto_acomp_lock_stream_bh(&deflate_streams); + ds = s->ctx; - ret = zlib_inflateReset(stream); - if (ret != Z_OK) { - ret = -EINVAL; + err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS); + if (err != Z_OK) { + err = -EINVAL; goto out; } - stream->next_in = (u8 *)src; - stream->avail_in = slen; - stream->next_out = (u8 *)dst; - stream->avail_out = *dlen; - - ret = zlib_inflate(stream, Z_SYNC_FLUSH); - /* - * Work around a bug in zlib, which sometimes wants to taste an extra - * byte when being used in the (undocumented) raw deflate mode. - * (From USAGI). - */ - if (ret == Z_OK && !stream->avail_in && stream->avail_out) { - u8 zerostuff = 0; - stream->next_in = &zerostuff; - stream->avail_in = 1; - ret = zlib_inflate(stream, Z_FINISH); - } - if (ret != Z_STREAM_END) { - ret = -EINVAL; - goto out; - } - ret = 0; - *dlen = stream->total_out; + err = deflate_decompress_one(req, ds); + out: - return ret; + crypto_acomp_unlock_stream_bh(s); + + return err; } -static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen, - void *ctx) +static int deflate_init(struct crypto_acomp *tfm) { - return __deflate_decompress(src, slen, dst, dlen, ctx); + int ret; + + mutex_lock(&deflate_stream_lock); + ret = crypto_acomp_alloc_streams(&deflate_streams); + mutex_unlock(&deflate_stream_lock); + + return ret; } -static struct scomp_alg scomp = { - .alloc_ctx = deflate_alloc_ctx, - .free_ctx = deflate_free_ctx, - .compress = deflate_scompress, - .decompress = deflate_sdecompress, - .base = { - .cra_name = "deflate", - .cra_driver_name = "deflate-scomp", - .cra_module = THIS_MODULE, - } +static struct acomp_alg acomp = { + .compress = deflate_compress, + .decompress = deflate_decompress, + .init = deflate_init, + .base.cra_name = "deflate", + .base.cra_driver_name = "deflate-generic", + .base.cra_flags = CRYPTO_ALG_REQ_VIRT, + .base.cra_module = THIS_MODULE, }; static int __init deflate_mod_init(void) { - return crypto_register_scomp(&scomp); + return crypto_register_acomp(&acomp); } static void __exit deflate_mod_fini(void) { - crypto_unregister_scomp(&scomp); + crypto_unregister_acomp(&acomp); + crypto_acomp_free_streams(&deflate_streams); } -subsys_initcall(deflate_mod_init); +module_init(deflate_mod_init); module_exit(deflate_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); +MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); +MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>"); MODULE_ALIAS_CRYPTO("deflate"); MODULE_ALIAS_CRYPTO("deflate-generic"); diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 1274e18d3eb9..fce341400914 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -122,7 +122,7 @@ static void __exit des_generic_mod_fini(void) crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs)); } -subsys_initcall(des_generic_mod_init); +module_init(des_generic_mod_init); module_exit(des_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/dh.c b/crypto/dh.c index afc0fd847761..8250eeeebd0f 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -920,7 +920,7 @@ static void __exit dh_exit(void) crypto_unregister_kpp(&dh); } -subsys_initcall(dh_init); +module_init(dh_init); module_exit(dh_exit); MODULE_ALIAS_CRYPTO("dh"); MODULE_LICENSE("GPL"); diff --git a/crypto/drbg.c b/crypto/drbg.c index f28dfc2511a2..dbe4c8bb5ceb 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -2132,7 +2132,7 @@ static void __exit drbg_exit(void) crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } -subsys_initcall(drbg_init); +module_init(drbg_init); module_exit(drbg_exit); #ifndef CRYPTO_DRBG_HASH_STRING #define CRYPTO_DRBG_HASH_STRING "" diff --git a/crypto/ecb.c b/crypto/ecb.c index 95d7e972865a..cd1b20456dad 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -219,7 +219,7 @@ static void __exit crypto_ecb_module_exit(void) crypto_unregister_template(&crypto_ecb_tmpl); } -subsys_initcall(crypto_ecb_module_init); +module_init(crypto_ecb_module_init); module_exit(crypto_ecb_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 72cfd1590156..9f0b93b3166d 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -240,7 +240,7 @@ static void __exit ecdh_exit(void) crypto_unregister_kpp(&ecdh_nist_p384); } -subsys_initcall(ecdh_init); +module_init(ecdh_init); module_exit(ecdh_exit); MODULE_ALIAS_CRYPTO("ecdh"); MODULE_LICENSE("GPL"); diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c index 4454f1f8f33f..e0c55c64711c 100644 --- a/crypto/ecdsa-p1363.c +++ b/crypto/ecdsa-p1363.c @@ -21,7 +21,8 @@ static int ecdsa_p1363_verify(struct crypto_sig *tfm, const void *digest, unsigned int dlen) { struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); - unsigned int keylen = crypto_sig_keysize(ctx->child); + unsigned int keylen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64)); struct ecdsa_raw_sig sig; @@ -45,7 +46,8 @@ static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm) { struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); - return 2 * crypto_sig_keysize(ctx->child); + return 2 * DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); } static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm) diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c index 90a04f4b9a2f..ee71594d10a0 100644 --- a/crypto/ecdsa-x962.c +++ b/crypto/ecdsa-x962.c @@ -82,7 +82,7 @@ static int ecdsa_x962_verify(struct crypto_sig *tfm, int err; sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), - sizeof(u64)); + sizeof(u64) * BITS_PER_BYTE); err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen); if (err < 0) @@ -103,7 +103,8 @@ static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm) { struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm); struct sig_alg *alg = crypto_sig_alg(ctx->child); - int slen = crypto_sig_keysize(ctx->child); + int slen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + BITS_PER_BYTE); /* * Verify takes ECDSA-Sig-Value (described in RFC 5480) as input, diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c index 117526d15dde..ce8e4364842f 100644 --- a/crypto/ecdsa.c +++ b/crypto/ecdsa.c @@ -167,7 +167,7 @@ static unsigned int ecdsa_key_size(struct crypto_sig *tfm) { struct ecc_ctx *ctx = crypto_sig_ctx(tfm); - return DIV_ROUND_UP(ctx->curve->nbits, 8); + return ctx->curve->nbits; } static unsigned int ecdsa_digest_size(struct crypto_sig *tfm) @@ -334,7 +334,7 @@ static void __exit ecdsa_exit(void) crypto_unregister_sig(&ecdsa_nist_p521); } -subsys_initcall(ecdsa_init); +module_init(ecdsa_init); module_exit(ecdsa_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 69686668625e..e0a2d3209938 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -32,7 +32,6 @@ static int echainiv_encrypt(struct aead_request *req) u64 seqno; u8 *info; unsigned int ivsize = crypto_aead_ivsize(geniv); - int err; if (req->cryptlen < ivsize) return -EINVAL; @@ -41,20 +40,9 @@ static int echainiv_encrypt(struct aead_request *req) info = req->iv; - if (req->src != req->dst) { - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - - skcipher_request_set_sync_tfm(nreq, ctx->sknull); - skcipher_request_set_callback(nreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, - req->assoclen + req->cryptlen, - NULL); - - err = crypto_skcipher_encrypt(nreq); - if (err) - return err; - } + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, + req->assoclen + req->cryptlen); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); @@ -157,7 +145,7 @@ static void __exit echainiv_module_exit(void) crypto_unregister_template(&echainiv_tmpl); } -subsys_initcall(echainiv_module_init); +module_init(echainiv_module_init); module_exit(echainiv_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c index b3dd8a3ddeb7..2c0602f0cd40 100644 --- a/crypto/ecrdsa.c +++ b/crypto/ecrdsa.c @@ -249,7 +249,7 @@ static unsigned int ecrdsa_key_size(struct crypto_sig *tfm) * Verify doesn't need any output, so it's just informational * for keyctl to determine the key bit size. */ - return ctx->pub_key.ndigits * sizeof(u64); + return ctx->pub_key.ndigits * sizeof(u64) * BITS_PER_BYTE; } static unsigned int ecrdsa_max_size(struct crypto_sig *tfm) diff --git a/crypto/essiv.c b/crypto/essiv.c index ec0ec8992c2d..d003b78fcd85 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -548,8 +548,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) } /* record the driver name so we can instantiate this exact algo later */ - strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, - CRYPTO_MAX_ALG_NAME); + strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name); /* Instance fields */ @@ -642,7 +641,7 @@ static void __exit essiv_module_exit(void) crypto_unregister_template(&essiv_tmpl); } -subsys_initcall(essiv_module_init); +module_init(essiv_module_init); module_exit(essiv_module_exit); MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption"); diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 95a16e88899b..80036835cec5 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -411,7 +411,7 @@ static void __exit fcrypt_mod_fini(void) crypto_unregister_alg(&fcrypt_alg); } -subsys_initcall(fcrypt_mod_init); +module_init(fcrypt_mod_init); module_exit(fcrypt_mod_fini); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/crypto/fips.c b/crypto/fips.c index 2fa3a9ee61a1..e88a604cb42b 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -95,5 +95,5 @@ static void __exit fips_exit(void) crypto_proc_fips_exit(); } -subsys_initcall(fips_init); +module_init(fips_init); module_exit(fips_exit); diff --git a/crypto/gcm.c b/crypto/gcm.c index 84f7c23d14e4..97716482bed0 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -9,7 +9,6 @@ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> -#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <crypto/gcm.h> #include <crypto/hash.h> @@ -46,7 +45,6 @@ struct crypto_rfc4543_instance_ctx { struct crypto_rfc4543_ctx { struct crypto_aead *child; - struct crypto_sync_skcipher *null; u8 nonce[4]; }; @@ -79,8 +77,6 @@ static struct { struct scatterlist sg; } *gcm_zeroes; -static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc); - static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { @@ -930,12 +926,12 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) unsigned int authsize = crypto_aead_authsize(aead); u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); - int err; if (req->src != req->dst) { - err = crypto_rfc4543_copy_src_to_dst(req, enc); - if (err) - return err; + unsigned int nbytes = req->assoclen + req->cryptlen - + (enc ? 0 : authsize); + + memcpy_sglist(req->dst, req->src, nbytes); } memcpy(iv, ctx->nonce, 4); @@ -952,22 +948,6 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); } -static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); - unsigned int authsize = crypto_aead_authsize(aead); - unsigned int nbytes = req->assoclen + req->cryptlen - - (enc ? 0 : authsize); - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); - - skcipher_request_set_sync_tfm(nreq, ctx->null); - skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); - - return crypto_skcipher_encrypt(nreq); -} - static int crypto_rfc4543_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ?: @@ -987,21 +967,13 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; - struct crypto_sync_skcipher *null; unsigned long align; - int err = 0; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); - null = crypto_get_default_null_skcipher(); - err = PTR_ERR(null); - if (IS_ERR(null)) - goto err_free_aead; - ctx->child = aead; - ctx->null = null; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); @@ -1012,10 +984,6 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) align + GCM_AES_IV_SIZE); return 0; - -err_free_aead: - crypto_free_aead(aead); - return err; } static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) @@ -1023,7 +991,6 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); } static void crypto_rfc4543_free(struct aead_instance *inst) @@ -1152,7 +1119,7 @@ static void __exit crypto_gcm_module_exit(void) ARRAY_SIZE(crypto_gcm_tmpls)); } -subsys_initcall(crypto_gcm_module_init); +module_init(crypto_gcm_module_init); module_exit(crypto_gcm_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/geniv.c b/crypto/geniv.c index bee4621b4f12..42eff6a7387c 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -9,7 +9,6 @@ #include <crypto/internal/geniv.h> #include <crypto/internal/rng.h> -#include <crypto/null.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> @@ -125,15 +124,10 @@ int aead_init_geniv(struct crypto_aead *aead) if (err) goto out; - ctx->sknull = crypto_get_default_null_skcipher(); - err = PTR_ERR(ctx->sknull); - if (IS_ERR(ctx->sknull)) - goto out; - child = crypto_spawn_aead(aead_instance_ctx(inst)); err = PTR_ERR(child); if (IS_ERR(child)) - goto drop_null; + goto out; ctx->child = child; crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) + @@ -143,10 +137,6 @@ int aead_init_geniv(struct crypto_aead *aead) out: return err; - -drop_null: - crypto_put_default_null_skcipher(); - goto out; } EXPORT_SYMBOL_GPL(aead_init_geniv); @@ -155,7 +145,6 @@ void aead_exit_geniv(struct crypto_aead *tfm) struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher(); } EXPORT_SYMBOL_GPL(aead_exit_geniv); diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index c70d163c1ac9..e5803c249c12 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -34,14 +34,14 @@ * (https://csrc.nist.gov/publications/detail/sp/800-38d/final) */ -#include <crypto/algapi.h> #include <crypto/gf128mul.h> #include <crypto/ghash.h> #include <crypto/internal/hash.h> -#include <linux/crypto.h> -#include <linux/init.h> +#include <crypto/utils.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> static int ghash_init(struct shash_desc *desc) { @@ -82,59 +82,36 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; - if (dctx->bytes) { - int n = min(srclen, dctx->bytes); - u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - dctx->bytes -= n; - srclen -= n; - - while (n--) - *pos++ ^= *src++; - - if (!dctx->bytes) - gf128mul_4k_lle((be128 *)dst, ctx->gf128); - } - - while (srclen >= GHASH_BLOCK_SIZE) { + do { crypto_xor(dst, src, GHASH_BLOCK_SIZE); gf128mul_4k_lle((be128 *)dst, ctx->gf128); src += GHASH_BLOCK_SIZE; srclen -= GHASH_BLOCK_SIZE; - } - - if (srclen) { - dctx->bytes = GHASH_BLOCK_SIZE - srclen; - while (srclen--) - *dst++ ^= *src++; - } + } while (srclen >= GHASH_BLOCK_SIZE); - return 0; + return srclen; } -static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) +static void ghash_flush(struct shash_desc *desc, const u8 *src, + unsigned int len) { + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); u8 *dst = dctx->buffer; - if (dctx->bytes) { - u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); - - while (dctx->bytes--) - *tmp++ ^= 0; - + if (len) { + crypto_xor(dst, src, len); gf128mul_4k_lle((be128 *)dst, ctx->gf128); } - - dctx->bytes = 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; - ghash_flush(ctx, dctx); + ghash_flush(desc, src, len); memcpy(dst, buf, GHASH_BLOCK_SIZE); return 0; @@ -151,13 +128,14 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, - .final = ghash_final, + .finup = ghash_finup, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, @@ -175,7 +153,7 @@ static void __exit ghash_mod_exit(void) crypto_unregister_shash(&ghash_alg); } -subsys_initcall(ghash_mod_init); +module_init(ghash_mod_init); module_exit(ghash_mod_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/hctr2.c b/crypto/hctr2.c index cbcd673be481..c8932777bba8 100644 --- a/crypto/hctr2.c +++ b/crypto/hctr2.c @@ -570,7 +570,7 @@ static void __exit hctr2_module_exit(void) ARRAY_SIZE(hctr2_tmpls)); } -subsys_initcall(hctr2_module_init); +module_init(hctr2_module_init); module_exit(hctr2_module_exit); MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode"); diff --git a/crypto/hkdf.c b/crypto/hkdf.c index 2434c5c42545..f24c2a8d4df9 100644 --- a/crypto/hkdf.c +++ b/crypto/hkdf.c @@ -543,7 +543,7 @@ static int __init crypto_hkdf_module_init(void) { int ret = 0, i; - if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return 0; for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) { diff --git a/crypto/hmac.c b/crypto/hmac.c index 7cec25ff9889..148af460ae97 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -13,13 +13,11 @@ #include <crypto/hmac.h> #include <crypto/internal/hash.h> -#include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/fips.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/scatterlist.h> +#include <linux/slab.h> #include <linux/string.h> struct hmac_ctx { @@ -28,6 +26,12 @@ struct hmac_ctx { u8 pads[]; }; +struct ahash_hmac_ctx { + struct crypto_ahash *hash; + /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ + u8 pads[]; +}; + static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -39,7 +43,7 @@ static int hmac_setkey(struct crypto_shash *parent, u8 *ipad = &tctx->pads[0]; u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); - unsigned int i; + int err, i; if (fips_enabled && (keylen < 112 / 8)) return -EINVAL; @@ -65,12 +69,14 @@ static int hmac_setkey(struct crypto_shash *parent, opad[i] ^= HMAC_OPAD_VALUE; } - return crypto_shash_init(shash) ?: - crypto_shash_update(shash, ipad, bs) ?: - crypto_shash_export(shash, ipad) ?: - crypto_shash_init(shash) ?: - crypto_shash_update(shash, opad, bs) ?: - crypto_shash_export(shash, opad); + err = crypto_shash_init(shash) ?: + crypto_shash_update(shash, ipad, bs) ?: + crypto_shash_export(shash, ipad) ?: + crypto_shash_init(shash) ?: + crypto_shash_update(shash, opad, bs) ?: + crypto_shash_export(shash, opad); + shash_desc_zero(shash); + return err; } static int hmac_export(struct shash_desc *pdesc, void *out) @@ -90,6 +96,22 @@ static int hmac_import(struct shash_desc *pdesc, const void *in) return crypto_shash_import(desc, in); } +static int hmac_export_core(struct shash_desc *pdesc, void *out) +{ + struct shash_desc *desc = shash_desc_ctx(pdesc); + + return crypto_shash_export_core(desc, out); +} + +static int hmac_import_core(struct shash_desc *pdesc, const void *in) +{ + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); + struct shash_desc *desc = shash_desc_ctx(pdesc); + + desc->tfm = tctx->hash; + return crypto_shash_import_core(desc, in); +} + static int hmac_init(struct shash_desc *pdesc) { const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); @@ -105,20 +127,6 @@ static int hmac_update(struct shash_desc *pdesc, return crypto_shash_update(desc, data, nbytes); } -static int hmac_final(struct shash_desc *pdesc, u8 *out) -{ - struct crypto_shash *parent = pdesc->tfm; - int ds = crypto_shash_digestsize(parent); - int ss = crypto_shash_statesize(parent); - const struct hmac_ctx *tctx = crypto_shash_ctx(parent); - const u8 *opad = &tctx->pads[ss]; - struct shash_desc *desc = shash_desc_ctx(pdesc); - - return crypto_shash_final(desc, out) ?: - crypto_shash_import(desc, opad) ?: - crypto_shash_finup(desc, out, ds, out); -} - static int hmac_finup(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes, u8 *out) { @@ -146,9 +154,6 @@ static int hmac_init_tfm(struct crypto_shash *parent) if (IS_ERR(hash)) return PTR_ERR(hash); - parent->descsize = sizeof(struct shash_desc) + - crypto_shash_descsize(hash); - tctx->hash = hash; return 0; } @@ -174,26 +179,23 @@ static void hmac_exit_tfm(struct crypto_shash *parent) crypto_free_shash(tctx->hash); } -static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +static int __hmac_create_shash(struct crypto_template *tmpl, + struct rtattr **tb, u32 mask) { struct shash_instance *inst; struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; - u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); - if (err) - return err; - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = shash_instance_ctx(inst); + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; err = crypto_grab_shash(spawn, shash_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) @@ -212,7 +214,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) ss < alg->cra_blocksize) goto err_free_inst; - err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); + err = crypto_inst_setname(shash_crypto_instance(inst), "hmac", + "hmac-shash", alg); if (err) goto err_free_inst; @@ -222,12 +225,14 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.digestsize = ds; inst->alg.statesize = ss; + inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize; inst->alg.init = hmac_init; inst->alg.update = hmac_update; - inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; + inst->alg.export_core = hmac_export_core; + inst->alg.import_core = hmac_import_core; inst->alg.setkey = hmac_setkey; inst->alg.init_tfm = hmac_init_tfm; inst->alg.clone_tfm = hmac_clone_tfm; @@ -243,23 +248,332 @@ err_free_inst: return err; } -static struct crypto_template hmac_tmpl = { - .name = "hmac", - .create = hmac_create, - .module = THIS_MODULE, +static int hmac_setkey_ahash(struct crypto_ahash *parent, + const u8 *inkey, unsigned int keylen) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash); + int ds = crypto_ahash_digestsize(parent); + int bs = crypto_ahash_blocksize(parent); + int ss = crypto_ahash_statesize(parent); + HASH_REQUEST_ON_STACK(req, fb); + u8 *opad = &tctx->pads[ss]; + u8 *ipad = &tctx->pads[0]; + int err, i; + + if (fips_enabled && (keylen < 112 / 8)) + return -EINVAL; + + ahash_request_set_callback(req, 0, NULL, NULL); + + if (keylen > bs) { + ahash_request_set_virt(req, inkey, ipad, keylen); + err = crypto_ahash_digest(req); + if (err) + goto out_zero_req; + + keylen = ds; + } else + memcpy(ipad, inkey, keylen); + + memset(ipad + keylen, 0, bs - keylen); + memcpy(opad, ipad, bs); + + for (i = 0; i < bs; i++) { + ipad[i] ^= HMAC_IPAD_VALUE; + opad[i] ^= HMAC_OPAD_VALUE; + } + + ahash_request_set_virt(req, ipad, NULL, bs); + err = crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, ipad); + + ahash_request_set_virt(req, opad, NULL, bs); + err = err ?: + crypto_ahash_init(req) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export(req, opad); + +out_zero_req: + HASH_REQUEST_ZERO(req); + return err; +} + +static int hmac_export_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export(ahash_request_ctx(preq), out); +} + +static int hmac_import_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import(req, in); +} + +static int hmac_export_core_ahash(struct ahash_request *preq, void *out) +{ + return crypto_ahash_export_core(ahash_request_ctx(preq), out); +} + +static int hmac_import_core_ahash(struct ahash_request *preq, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_tfm(req, tctx->hash); + return crypto_ahash_import_core(req, in); +} + +static int hmac_init_ahash(struct ahash_request *preq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + + return hmac_import_ahash(preq, &tctx->pads[0]); +} + +static int hmac_update_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + preq->base.complete, preq->base.data); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes); + return crypto_ahash_update(req); +} + +static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq); + struct ahash_request *req = ahash_request_ctx(preq); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm); + int ds = crypto_ahash_digestsize(tfm); + int ss = crypto_ahash_statesize(tfm); + const u8 *opad = &tctx->pads[ss]; + + ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask, + preq->base.complete, preq->base.data); + ahash_request_set_virt(req, preq->result, preq->result, ds); + return crypto_ahash_import(req, opad) ?: + crypto_ahash_finup(req); + +} + +static void hmac_finup_done(void *data, int err) +{ + struct ahash_request *preq = data; + + if (err) + goto out; + + err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + ahash_request_complete(preq, err); +} + +static int hmac_finup_ahash(struct ahash_request *preq) +{ + struct ahash_request *req = ahash_request_ctx(preq); + + ahash_request_set_callback(req, ahash_request_flags(preq), + hmac_finup_done, preq); + if (ahash_request_isvirt(preq)) + ahash_request_set_virt(req, preq->svirt, preq->result, + preq->nbytes); + else + ahash_request_set_crypt(req, preq->src, preq->result, + preq->nbytes); + return crypto_ahash_finup(req) ?: + hmac_finup_finish(preq, 0); +} + +static int hmac_digest_ahash(struct ahash_request *preq) +{ + return hmac_init_ahash(preq) ?: + hmac_finup_ahash(preq); +} + +static int hmac_init_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_instance *inst = ahash_alg_instance(parent); + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + struct crypto_ahash *hash; + + hash = crypto_spawn_ahash(ahash_instance_ctx(inst)); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) + + crypto_ahash_reqsize(hash)) + return -EINVAL; + + tctx->hash = hash; + return 0; +} + +static int hmac_clone_ahash_tfm(struct crypto_ahash *dst, + struct crypto_ahash *src) +{ + struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src); + struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst); + struct crypto_ahash *hash; + + hash = crypto_clone_ahash(sctx->hash); + if (IS_ERR(hash)) + return PTR_ERR(hash); + + dctx->hash = hash; + return 0; +} + +static void hmac_exit_ahash_tfm(struct crypto_ahash *parent) +{ + struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent); + + crypto_free_ahash(tctx->hash); +} + +static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb, + u32 mask) +{ + struct crypto_ahash_spawn *spawn; + struct ahash_instance *inst; + struct crypto_alg *alg; + struct hash_alg_common *halg; + int ds, ss, err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + spawn = ahash_instance_ctx(inst); + + mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + halg = crypto_spawn_ahash_alg(spawn); + alg = &halg->base; + + /* The underlying hash algorithm must not require a key */ + err = -EINVAL; + if (crypto_hash_alg_needs_key(halg)) + goto err_free_inst; + + ds = halg->digestsize; + ss = halg->statesize; + if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) + goto err_free_inst; + + err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg); + if (err) + goto err_free_inst; + + inst->alg.halg.base.cra_flags = alg->cra_flags & + CRYPTO_ALG_INHERITED_FLAGS; + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT; + inst->alg.halg.base.cra_priority = alg->cra_priority + 100; + inst->alg.halg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) + + (ss * 2); + inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) + + alg->cra_reqsize; + + inst->alg.halg.digestsize = ds; + inst->alg.halg.statesize = ss; + inst->alg.init = hmac_init_ahash; + inst->alg.update = hmac_update_ahash; + inst->alg.finup = hmac_finup_ahash; + inst->alg.digest = hmac_digest_ahash; + inst->alg.export = hmac_export_ahash; + inst->alg.import = hmac_import_ahash; + inst->alg.export_core = hmac_export_core_ahash; + inst->alg.import_core = hmac_import_core_ahash; + inst->alg.setkey = hmac_setkey_ahash; + inst->alg.init_tfm = hmac_init_ahash_tfm; + inst->alg.clone_tfm = hmac_clone_ahash_tfm; + inst->alg.exit_tfm = hmac_exit_ahash_tfm; + + inst->free = ahash_free_singlespawn_instance; + + err = ahash_register_instance(tmpl, inst); + if (err) { +err_free_inst: + ahash_free_singlespawn_instance(inst); + } + return err; +} + +static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + u32 mask; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + mask = crypto_algt_inherited_mask(algt); + + if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK)) + return hmac_create_ahash(tmpl, tb, mask); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) & + algt->mask & CRYPTO_ALG_TYPE_MASK) + return -EINVAL; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); + if (err) + return err == -EINVAL ? -ENOENT : err; + + return __hmac_create_shash(tmpl, tb, mask); +} + +static struct crypto_template hmac_tmpls[] = { + { + .name = "hmac", + .create = hmac_create, + .module = THIS_MODULE, + }, + { + .name = "hmac-shash", + .create = hmac_create_shash, + .module = THIS_MODULE, + }, }; static int __init hmac_module_init(void) { - return crypto_register_template(&hmac_tmpl); + return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } static void __exit hmac_module_exit(void) { - crypto_unregister_template(&hmac_tmpl); + crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls)); } -subsys_initcall(hmac_module_init); +module_init(hmac_module_init); module_exit(hmac_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/internal.h b/crypto/internal.h index 11567ea24fc3..b9afd68767c1 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -46,6 +46,7 @@ struct crypto_type { unsigned int maskclear; unsigned int maskset; unsigned int tfmsize; + unsigned int algsize; }; enum { @@ -66,8 +67,7 @@ extern struct blocking_notifier_head crypto_chain; int alg_test(const char *driver, const char *alg, u32 type, u32 mask); -#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || \ - IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) +#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) static inline bool crypto_boot_test_finished(void) { return true; @@ -86,7 +86,7 @@ static inline void set_crypto_boot_test_finished(void) static_branch_enable(&__crypto_boot_test_finished); } #endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || - * IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) + * !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) */ #ifdef CONFIG_PROC_FS @@ -128,7 +128,6 @@ void *crypto_create_tfm_node(struct crypto_alg *alg, const struct crypto_type *frontend, int node); void *crypto_clone_tfm(const struct crypto_type *frontend, struct crypto_tfm *otfm); -void crypto_destroy_alg(struct crypto_alg *alg); static inline void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) @@ -163,6 +162,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) return alg; } +void crypto_destroy_alg(struct crypto_alg *alg); + static inline void crypto_alg_put(struct crypto_alg *alg) { if (refcount_dec_and_test(&alg->cra_refcnt)) diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c index c3f9938e1ad2..b7a6bf9da773 100644 --- a/crypto/kdf_sp800108.c +++ b/crypto/kdf_sp800108.c @@ -127,7 +127,7 @@ static int __init crypto_kdf108_init(void) { int ret; - if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) + if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)) return 0; ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)", diff --git a/crypto/khazad.c b/crypto/khazad.c index 7ad338ca2c18..024264ee9cd1 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -871,7 +871,7 @@ static void __exit khazad_mod_fini(void) } -subsys_initcall(khazad_mod_init); +module_init(khazad_mod_init); module_exit(khazad_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/kpp.c b/crypto/kpp.c index ecc63a1a948d..2e0cefe7a25f 100644 --- a/crypto/kpp.c +++ b/crypto/kpp.c @@ -80,6 +80,7 @@ static const struct crypto_type crypto_kpp_type = { .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_KPP, .tfmsize = offsetof(struct crypto_kpp, base), + .algsize = offsetof(struct kpp_alg, base), }; struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask) diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c index d07769bf149e..a1de55994d92 100644 --- a/crypto/krb5enc.c +++ b/crypto/krb5enc.c @@ -496,7 +496,7 @@ static void __exit crypto_krb5enc_module_exit(void) crypto_unregister_template(&crypto_krb5enc_tmpl); } -subsys_initcall(crypto_krb5enc_module_init); +module_init(crypto_krb5enc_module_init); module_exit(crypto_krb5enc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/lrw.c b/crypto/lrw.c index 391ae0f7641f..dd403b800513 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); @@ -420,7 +420,7 @@ static void __exit lrw_module_exit(void) crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(lrw_module_init); +module_init(lrw_module_init); module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index cdb4897c63e6..c2e2c38b5aa8 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -294,6 +294,7 @@ static const struct crypto_type crypto_lskcipher_type = { .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_LSKCIPHER, .tfmsize = offsetof(struct crypto_lskcipher, base), + .algsize = offsetof(struct lskcipher_alg, co.base), }; static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm) diff --git a/crypto/lz4.c b/crypto/lz4.c index 82588607fb2e..7a984ae5ae52 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c @@ -12,10 +12,6 @@ #include <linux/lz4.h> #include <crypto/internal/scompress.h> -struct lz4_ctx { - void *lz4_comp_mem; -}; - static void *lz4_alloc_ctx(void) { void *ctx; @@ -93,7 +89,7 @@ static void __exit lz4_mod_fini(void) crypto_unregister_scomp(&scomp); } -subsys_initcall(lz4_mod_init); +module_init(lz4_mod_init); module_exit(lz4_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index 997e76c0183a..9c61d05b6214 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c @@ -10,10 +10,6 @@ #include <linux/vmalloc.h> #include <linux/lz4.h> -struct lz4hc_ctx { - void *lz4hc_comp_mem; -}; - static void *lz4hc_alloc_ctx(void) { void *ctx; @@ -91,7 +87,7 @@ static void __exit lz4hc_mod_fini(void) crypto_unregister_scomp(&scomp); } -subsys_initcall(lz4hc_mod_init); +module_init(lz4hc_mod_init); module_exit(lz4hc_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index b1350ae278b8..ba013f2d5090 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -9,10 +9,6 @@ #include <linux/module.h> #include <linux/slab.h> -struct lzorle_ctx { - void *lzorle_comp_mem; -}; - static void *lzorle_alloc_ctx(void) { void *ctx; @@ -95,7 +91,7 @@ static void __exit lzorle_mod_fini(void) crypto_unregister_scomp(&scomp); } -subsys_initcall(lzorle_mod_init); +module_init(lzorle_mod_init); module_exit(lzorle_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/lzo.c b/crypto/lzo.c index dfe5a07ca35f..7867e2c67c4e 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -9,10 +9,6 @@ #include <linux/module.h> #include <linux/slab.h> -struct lzo_ctx { - void *lzo_comp_mem; -}; - static void *lzo_alloc_ctx(void) { void *ctx; @@ -95,7 +91,7 @@ static void __exit lzo_mod_fini(void) crypto_unregister_scomp(&scomp); } -subsys_initcall(lzo_mod_init); +module_init(lzo_mod_init); module_exit(lzo_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/md4.c b/crypto/md4.c index 2e7f2f319f95..55bf47e23c13 100644 --- a/crypto/md4.c +++ b/crypto/md4.c @@ -233,7 +233,7 @@ static void __exit md4_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(md4_mod_init); +module_init(md4_mod_init); module_exit(md4_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/md5.c b/crypto/md5.c index 72c0c46fb5ee..32c0819f5118 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -17,11 +17,9 @@ */ #include <crypto/internal/hash.h> #include <crypto/md5.h> -#include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> -#include <linux/types.h> -#include <asm/byteorder.h> const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, @@ -120,10 +118,11 @@ static void md5_transform(__u32 *hash, __u32 const *in) hash[3] += d; } -static inline void md5_transform_helper(struct md5_state *ctx) +static inline void md5_transform_helper(struct md5_state *ctx, + u32 block[MD5_BLOCK_WORDS]) { - le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); - md5_transform(ctx->hash, ctx->block); + le32_to_cpu_array(block, MD5_BLOCK_WORDS); + md5_transform(ctx->hash, block); } static int md5_init(struct shash_desc *desc) @@ -142,76 +141,53 @@ static int md5_init(struct shash_desc *desc) static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *mctx = shash_desc_ctx(desc); - const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + u32 block[MD5_BLOCK_WORDS]; mctx->byte_count += len; - - if (avail > len) { - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, len); - return 0; - } - - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, avail); - - md5_transform_helper(mctx); - data += avail; - len -= avail; - - while (len >= sizeof(mctx->block)) { - memcpy(mctx->block, data, sizeof(mctx->block)); - md5_transform_helper(mctx); - data += sizeof(mctx->block); - len -= sizeof(mctx->block); - } - - memcpy(mctx->block, data, len); - - return 0; + do { + memcpy(block, data, sizeof(block)); + md5_transform_helper(mctx, block); + data += sizeof(block); + len -= sizeof(block); + } while (len >= sizeof(block)); + memzero_explicit(block, sizeof(block)); + mctx->byte_count -= len; + return len; } -static int md5_final(struct shash_desc *desc, u8 *out) +static int md5_finup(struct shash_desc *desc, const u8 *data, unsigned int len, + u8 *out) { struct md5_state *mctx = shash_desc_ctx(desc); - const unsigned int offset = mctx->byte_count & 0x3f; - char *p = (char *)mctx->block + offset; - int padding = 56 - (offset + 1); + u32 block[MD5_BLOCK_WORDS]; + unsigned int offset; + int padding; + char *p; + + memcpy(block, data, len); + + offset = len; + p = (char *)block + offset; + padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); - md5_transform_helper(mctx); - p = (char *)mctx->block; + md5_transform_helper(mctx, block); + p = (char *)block; padding = 56; } memset(p, 0, padding); - mctx->block[14] = mctx->byte_count << 3; - mctx->block[15] = mctx->byte_count >> 29; - le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - - sizeof(u64)) / sizeof(u32)); - md5_transform(mctx->hash, mctx->block); + mctx->byte_count += len; + block[14] = mctx->byte_count << 3; + block[15] = mctx->byte_count >> 29; + le32_to_cpu_array(block, (sizeof(block) - sizeof(u64)) / sizeof(u32)); + md5_transform(mctx->hash, block); + memzero_explicit(block, sizeof(block)); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); memcpy(out, mctx->hash, sizeof(mctx->hash)); - memset(mctx, 0, sizeof(*mctx)); - - return 0; -} - -static int md5_export(struct shash_desc *desc, void *out) -{ - struct md5_state *ctx = shash_desc_ctx(desc); - - memcpy(out, ctx, sizeof(*ctx)); - return 0; -} - -static int md5_import(struct shash_desc *desc, const void *in) -{ - struct md5_state *ctx = shash_desc_ctx(desc); - memcpy(ctx, in, sizeof(*ctx)); return 0; } @@ -219,14 +195,12 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_init, .update = md5_update, - .final = md5_final, - .export = md5_export, - .import = md5_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .finup = md5_finup, + .descsize = MD5_STATE_SIZE, .base = { .cra_name = "md5", .cra_driver_name = "md5-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -242,7 +216,7 @@ static void __exit md5_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(md5_mod_init); +module_init(md5_mod_init); module_exit(md5_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index 0d14e980d4d6..69ad35f524d7 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -167,7 +167,7 @@ static void __exit michael_mic_exit(void) } -subsys_initcall(michael_mic_init); +module_init(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c index a661d4f667cd..2b648615b5ec 100644 --- a/crypto/nhpoly1305.c +++ b/crypto/nhpoly1305.c @@ -245,7 +245,7 @@ static void __exit nhpoly1305_mod_exit(void) crypto_unregister_shash(&nhpoly1305_alg); } -subsys_initcall(nhpoly1305_mod_init); +module_init(nhpoly1305_mod_init); module_exit(nhpoly1305_mod_exit); MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function"); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 9d2e56d6744a..d092717ea4fc 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -186,7 +186,7 @@ static void __exit crypto_pcbc_module_exit(void) crypto_unregister_template(&crypto_pcbc_tmpl); } -subsys_initcall(crypto_pcbc_module_init); +module_init(crypto_pcbc_module_init); module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 7fc79e7dce44..c33d29a523e0 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -381,7 +381,7 @@ static void __exit pcrypt_exit(void) kset_unregister(pcrypt_kset); } -subsys_initcall(pcrypt_init); +module_init(pcrypt_init); module_exit(pcrypt_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c deleted file mode 100644 index e6f29a98725a..000000000000 --- a/crypto/poly1305_generic.c +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Poly1305 authenticator algorithm, RFC7539 - * - * Copyright (C) 2015 Martin Willi - * - * Based on public domain code by Andrew Moon and Daniel J. Bernstein. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/unaligned.h> - -static int crypto_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - poly1305_core_init(&dctx->h); - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, - const u8 *src, unsigned int srclen) -{ - if (!dctx->sset) { - if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { - poly1305_core_setkey(&dctx->core_r, src); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->rset = 2; - } - if (srclen >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } - return srclen; -} - -static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) -{ - unsigned int datalen; - - if (unlikely(!dctx->sset)) { - datalen = crypto_poly1305_setdesckey(dctx, src, srclen); - src += srclen - datalen; - srclen = datalen; - } - - poly1305_core_blocks(&dctx->h, &dctx->core_r, src, - srclen / POLY1305_BLOCK_SIZE, 1); -} - -static int crypto_poly1305_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes; - - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - srclen -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks(dctx, dctx->buf, - POLY1305_BLOCK_SIZE); - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { - poly1305_blocks(dctx, src, srclen); - src += srclen - (srclen % POLY1305_BLOCK_SIZE); - srclen %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(srclen)) { - dctx->buflen = srclen; - memcpy(dctx->buf, src, srclen); - } - - return 0; -} - -static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_generic(dctx, dst); - return 0; -} - -static struct shash_alg poly1305_alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_init, - .update = crypto_poly1305_update, - .final = crypto_poly1305_final, - .descsize = sizeof(struct poly1305_desc_ctx), - .base = { - .cra_name = "poly1305", - .cra_driver_name = "poly1305-generic", - .cra_priority = 100, - .cra_blocksize = POLY1305_BLOCK_SIZE, - .cra_module = THIS_MODULE, - }, -}; - -static int __init poly1305_mod_init(void) -{ - return crypto_register_shash(&poly1305_alg); -} - -static void __exit poly1305_mod_exit(void) -{ - crypto_unregister_shash(&poly1305_alg); -} - -subsys_initcall(poly1305_mod_init); -module_exit(poly1305_mod_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); -MODULE_DESCRIPTION("Poly1305 authenticator"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-generic"); diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c index 4f98910bcdb5..db8adb56e4ca 100644 --- a/crypto/polyval-generic.c +++ b/crypto/polyval-generic.c @@ -44,15 +44,15 @@ * */ -#include <linux/unaligned.h> -#include <crypto/algapi.h> #include <crypto/gf128mul.h> -#include <crypto/polyval.h> #include <crypto/internal/hash.h> -#include <linux/crypto.h> -#include <linux/init.h> +#include <crypto/polyval.h> +#include <crypto/utils.h> +#include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> struct polyval_tfm_ctx { struct gf128mul_4k *gf128; @@ -63,7 +63,6 @@ struct polyval_desc_ctx { u8 buffer[POLYVAL_BLOCK_SIZE]; be128 buffer128; }; - u32 bytes; }; static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE], @@ -76,46 +75,6 @@ static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE], put_unaligned(swab64(b), (u64 *)&dst[0]); } -/* - * Performs multiplication in the POLYVAL field using the GHASH field as a - * subroutine. This function is used as a fallback for hardware accelerated - * implementations when simd registers are unavailable. - * - * Note: This function is not used for polyval-generic, instead we use the 4k - * lookup table implementation for finite field multiplication. - */ -void polyval_mul_non4k(u8 *op1, const u8 *op2) -{ - be128 a, b; - - // Assume one argument is in Montgomery form and one is not. - copy_and_reverse((u8 *)&a, op1); - copy_and_reverse((u8 *)&b, op2); - gf128mul_x_lle(&a, &a); - gf128mul_lle(&a, &b); - copy_and_reverse(op1, (u8 *)&a); -} -EXPORT_SYMBOL_GPL(polyval_mul_non4k); - -/* - * Perform a POLYVAL update using non4k multiplication. This function is used - * as a fallback for hardware accelerated implementations when simd registers - * are unavailable. - * - * Note: This function is not used for polyval-generic, instead we use the 4k - * lookup table implementation of finite field multiplication. - */ -void polyval_update_non4k(const u8 *key, const u8 *in, - size_t nblocks, u8 *accumulator) -{ - while (nblocks--) { - crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE); - polyval_mul_non4k(accumulator, key); - in += POLYVAL_BLOCK_SIZE; - } -} -EXPORT_SYMBOL_GPL(polyval_update_non4k); - static int polyval_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -154,56 +113,53 @@ static int polyval_update(struct shash_desc *desc, { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm); - u8 *pos; u8 tmp[POLYVAL_BLOCK_SIZE]; - int n; - - if (dctx->bytes) { - n = min(srclen, dctx->bytes); - pos = dctx->buffer + dctx->bytes - 1; - - dctx->bytes -= n; - srclen -= n; - - while (n--) - *pos-- ^= *src++; - if (!dctx->bytes) - gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); - } - - while (srclen >= POLYVAL_BLOCK_SIZE) { + do { copy_and_reverse(tmp, src); crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE); gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); src += POLYVAL_BLOCK_SIZE; srclen -= POLYVAL_BLOCK_SIZE; - } + } while (srclen >= POLYVAL_BLOCK_SIZE); + + return srclen; +} - if (srclen) { - dctx->bytes = POLYVAL_BLOCK_SIZE - srclen; - pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1; - while (srclen--) - *pos-- ^= *src++; +static int polyval_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *dst) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + + if (len) { + u8 tmp[POLYVAL_BLOCK_SIZE] = {}; + + memcpy(tmp, src, len); + polyval_update(desc, tmp, POLYVAL_BLOCK_SIZE); } + copy_and_reverse(dst, dctx->buffer); + return 0; +} + +static int polyval_export(struct shash_desc *desc, void *out) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + copy_and_reverse(out, dctx->buffer); return 0; } -static int polyval_final(struct shash_desc *desc, u8 *dst) +static int polyval_import(struct shash_desc *desc, const void *in) { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm); - if (dctx->bytes) - gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); - copy_and_reverse(dst, dctx->buffer); + copy_and_reverse(dctx->buffer, in); return 0; } -static void polyval_exit_tfm(struct crypto_tfm *tfm) +static void polyval_exit_tfm(struct crypto_shash *tfm) { - struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm); gf128mul_free_4k(ctx->gf128); } @@ -212,17 +168,21 @@ static struct shash_alg polyval_alg = { .digestsize = POLYVAL_DIGEST_SIZE, .init = polyval_init, .update = polyval_update, - .final = polyval_final, + .finup = polyval_finup, .setkey = polyval_setkey, + .export = polyval_export, + .import = polyval_import, + .exit_tfm = polyval_exit_tfm, + .statesize = sizeof(struct polyval_desc_ctx), .descsize = sizeof(struct polyval_desc_ctx), .base = { .cra_name = "polyval", .cra_driver_name = "polyval-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = POLYVAL_BLOCK_SIZE, .cra_ctxsize = sizeof(struct polyval_tfm_ctx), .cra_module = THIS_MODULE, - .cra_exit = polyval_exit_tfm, }, }; @@ -236,7 +196,7 @@ static void __exit polyval_mod_exit(void) crypto_unregister_shash(&polyval_alg); } -subsys_initcall(polyval_mod_init); +module_init(polyval_mod_init); module_exit(polyval_mod_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/rmd160.c b/crypto/rmd160.c index c5fe4034b153..9860b60c9be4 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -9,18 +9,14 @@ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> */ #include <crypto/internal/hash.h> -#include <linux/init.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <asm/byteorder.h> - +#include <linux/string.h> #include "ripemd.h" struct rmd160_ctx { u64 byte_count; u32 state[5]; - __le32 buffer[16]; }; #define K1 RMD_K1 @@ -265,72 +261,59 @@ static int rmd160_init(struct shash_desc *desc) rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H4; - memset(rctx->buffer, 0, sizeof(rctx->buffer)); - return 0; } static int rmd160_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + int remain = len - round_down(len, RMD160_BLOCK_SIZE); struct rmd160_ctx *rctx = shash_desc_ctx(desc); - const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); - - rctx->byte_count += len; + __le32 buffer[RMD160_BLOCK_SIZE / 4]; - /* Enough space in buffer? If so copy and we're done */ - if (avail > len) { - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, len); - goto out; - } - - memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), - data, avail); + rctx->byte_count += len - remain; - rmd160_transform(rctx->state, rctx->buffer); - data += avail; - len -= avail; - - while (len >= sizeof(rctx->buffer)) { - memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd160_transform(rctx->state, rctx->buffer); - data += sizeof(rctx->buffer); - len -= sizeof(rctx->buffer); - } + do { + memcpy(buffer, data, sizeof(buffer)); + rmd160_transform(rctx->state, buffer); + data += sizeof(buffer); + len -= sizeof(buffer); + } while (len >= sizeof(buffer)); - memcpy(rctx->buffer, data, len); - -out: - return 0; + memzero_explicit(buffer, sizeof(buffer)); + return remain; } /* Add padding and return the message digest. */ -static int rmd160_final(struct shash_desc *desc, u8 *out) +static int rmd160_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { + unsigned int bit_offset = RMD160_BLOCK_SIZE / 8 - 1; struct rmd160_ctx *rctx = shash_desc_ctx(desc); - u32 i, index, padlen; - __le64 bits; + union { + __le64 l64[RMD160_BLOCK_SIZE / 4]; + __le32 l32[RMD160_BLOCK_SIZE / 2]; + u8 u8[RMD160_BLOCK_SIZE * 2]; + } block = {}; __le32 *dst = (__le32 *)out; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_le64(rctx->byte_count << 3); - - /* Pad out to 56 mod 64 */ - index = rctx->byte_count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd160_update(desc, padding, padlen); + u32 i; - /* Append length */ - rmd160_update(desc, (const u8 *)&bits, sizeof(bits)); + rctx->byte_count += len; + if (len >= bit_offset * 8) + bit_offset += RMD160_BLOCK_SIZE / 8; + memcpy(&block, src, len); + block.u8[len] = 0x80; + block.l64[bit_offset] = cpu_to_le64(rctx->byte_count << 3); + + rmd160_transform(rctx->state, block.l32); + if (bit_offset > RMD160_BLOCK_SIZE / 8) + rmd160_transform(rctx->state, + block.l32 + RMD160_BLOCK_SIZE / 4); + memzero_explicit(&block, sizeof(block)); /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); - - /* Wipe context */ - memset(rctx, 0, sizeof(*rctx)); - return 0; } @@ -338,11 +321,12 @@ static struct shash_alg alg = { .digestsize = RMD160_DIGEST_SIZE, .init = rmd160_init, .update = rmd160_update, - .final = rmd160_final, + .finup = rmd160_finup, .descsize = sizeof(struct rmd160_ctx), .base = { .cra_name = "rmd160", .cra_driver_name = "rmd160-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = RMD160_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -358,7 +342,7 @@ static void __exit rmd160_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(rmd160_mod_init); +module_init(rmd160_mod_init); module_exit(rmd160_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/rng.c b/crypto/rng.c index 9d8804e46422..b8ae6ebc091d 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -98,6 +98,7 @@ static const struct crypto_type crypto_rng_type = { .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_RNG, .tfmsize = offsetof(struct crypto_rng, base), + .algsize = offsetof(struct rng_alg, base), }; struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) diff --git a/crypto/rsa.c b/crypto/rsa.c index b7d21529c552..6c7734083c98 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -430,7 +430,7 @@ static void __exit rsa_exit(void) crypto_unregister_akcipher(&rsa); } -subsys_initcall(rsa_init); +module_init(rsa_init); module_exit(rsa_exit); MODULE_ALIAS_CRYPTO("rsa"); MODULE_LICENSE("GPL"); diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c index d01ac75635e0..94fa5e9600e7 100644 --- a/crypto/rsassa-pkcs1.c +++ b/crypto/rsassa-pkcs1.c @@ -301,7 +301,7 @@ static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm) { struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); - return ctx->key_size; + return ctx->key_size * BITS_PER_BYTE; } static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm, diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 8225801488d5..1d010e2a1b1a 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -10,10 +10,25 @@ */ #include <crypto/scatterwalk.h> +#include <linux/crypto.h> +#include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/scatterlist.h> +#include <linux/slab.h> + +enum { + SKCIPHER_WALK_SLOW = 1 << 0, + SKCIPHER_WALK_COPY = 1 << 1, + SKCIPHER_WALK_DIFF = 1 << 2, + SKCIPHER_WALK_SLEEP = 1 << 3, +}; + +static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) +{ + return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; +} void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes) { @@ -89,27 +104,23 @@ EXPORT_SYMBOL_GPL(memcpy_to_sglist); void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct scatter_walk swalk; - struct scatter_walk dwalk; + struct skcipher_walk walk = {}; if (unlikely(nbytes == 0)) /* in case sg == NULL */ return; - scatterwalk_start(&swalk, src); - scatterwalk_start(&dwalk, dst); + walk.total = nbytes; + + scatterwalk_start(&walk.in, src); + scatterwalk_start(&walk.out, dst); + skcipher_walk_first(&walk, true); do { - unsigned int slen, dlen; - unsigned int len; - - slen = scatterwalk_next(&swalk, nbytes); - dlen = scatterwalk_next(&dwalk, nbytes); - len = min(slen, dlen); - memcpy(dwalk.addr, swalk.addr, len); - scatterwalk_done_dst(&dwalk, len); - scatterwalk_done_src(&swalk, len); - nbytes -= len; - } while (nbytes); + if (walk.src.virt.addr != walk.dst.virt.addr) + memcpy(walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + skcipher_walk_done(&walk, 0); + } while (walk.nbytes); } EXPORT_SYMBOL_GPL(memcpy_sglist); @@ -135,3 +146,236 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], return dst; } EXPORT_SYMBOL_GPL(scatterwalk_ffwd); + +static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) +{ + unsigned alignmask = walk->alignmask; + unsigned n; + void *buffer; + + if (!walk->buffer) + walk->buffer = walk->page; + buffer = walk->buffer; + if (!buffer) { + /* Min size for a buffer of bsize bytes aligned to alignmask */ + n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + buffer = kzalloc(n, skcipher_walk_gfp(walk)); + if (!buffer) + return skcipher_walk_done(walk, -ENOMEM); + walk->buffer = buffer; + } + + buffer = PTR_ALIGN(buffer, alignmask + 1); + memcpy_from_scatterwalk(buffer, &walk->in, bsize); + walk->out.__addr = buffer; + walk->in.__addr = walk->out.addr; + + walk->nbytes = bsize; + walk->flags |= SKCIPHER_WALK_SLOW; + + return 0; +} + +static int skcipher_next_copy(struct skcipher_walk *walk) +{ + void *tmp = walk->page; + + scatterwalk_map(&walk->in); + memcpy(tmp, walk->in.addr, walk->nbytes); + scatterwalk_unmap(&walk->in); + /* + * walk->in is advanced later when the number of bytes actually + * processed (which might be less than walk->nbytes) is known. + */ + + walk->in.__addr = tmp; + walk->out.__addr = tmp; + return 0; +} + +static int skcipher_next_fast(struct skcipher_walk *walk) +{ + unsigned long diff; + + diff = offset_in_page(walk->in.offset) - + offset_in_page(walk->out.offset); + diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - + (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); + + scatterwalk_map(&walk->out); + walk->in.__addr = walk->out.__addr; + + if (diff) { + walk->flags |= SKCIPHER_WALK_DIFF; + scatterwalk_map(&walk->in); + } + + return 0; +} + +static int skcipher_walk_next(struct skcipher_walk *walk) +{ + unsigned int bsize; + unsigned int n; + + n = walk->total; + bsize = min(walk->stride, max(n, walk->blocksize)); + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (unlikely(n < bsize)) { + if (unlikely(walk->total < walk->blocksize)) + return skcipher_walk_done(walk, -EINVAL); + +slow_path: + return skcipher_next_slow(walk, bsize); + } + walk->nbytes = n; + + if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { + if (!walk->page) { + gfp_t gfp = skcipher_walk_gfp(walk); + + walk->page = (void *)__get_free_page(gfp); + if (!walk->page) + goto slow_path; + } + walk->flags |= SKCIPHER_WALK_COPY; + return skcipher_next_copy(walk); + } + + return skcipher_next_fast(walk); +} + +static int skcipher_copy_iv(struct skcipher_walk *walk) +{ + unsigned alignmask = walk->alignmask; + unsigned ivsize = walk->ivsize; + unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); + unsigned size; + u8 *iv; + + /* Min size for a buffer of stride + ivsize, aligned to alignmask */ + size = aligned_stride + ivsize + + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); + if (!walk->buffer) + return -ENOMEM; + + iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; + + walk->iv = memcpy(iv, walk->iv, walk->ivsize); + return 0; +} + +int skcipher_walk_first(struct skcipher_walk *walk, bool atomic) +{ + if (WARN_ON_ONCE(in_hardirq())) + return -EDEADLK; + + walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP; + + walk->buffer = NULL; + if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { + int err = skcipher_copy_iv(walk); + if (err) + return err; + } + + walk->page = NULL; + + return skcipher_walk_next(walk); +} +EXPORT_SYMBOL_GPL(skcipher_walk_first); + +/** + * skcipher_walk_done() - finish one step of a skcipher_walk + * @walk: the skcipher_walk + * @res: number of bytes *not* processed (>= 0) from walk->nbytes, + * or a -errno value to terminate the walk due to an error + * + * This function cleans up after one step of walking through the source and + * destination scatterlists, and advances to the next step if applicable. + * walk->nbytes is set to the number of bytes available in the next step, + * walk->total is set to the new total number of bytes remaining, and + * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there + * is no more data, or if an error occurred (i.e. -errno return), then + * walk->nbytes and walk->total are set to 0 and all resources owned by the + * skcipher_walk are freed. + * + * Return: 0 or a -errno value. If @res was a -errno value then it will be + * returned, but other errors may occur too. + */ +int skcipher_walk_done(struct skcipher_walk *walk, int res) +{ + unsigned int n = walk->nbytes; /* num bytes processed this step */ + unsigned int total = 0; /* new total remaining */ + + if (!n) + goto finish; + + if (likely(res >= 0)) { + n -= res; /* subtract num bytes *not* processed */ + total = walk->total - n; + } + + if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { + scatterwalk_advance(&walk->in, n); + } else if (walk->flags & SKCIPHER_WALK_DIFF) { + scatterwalk_done_src(&walk->in, n); + } else if (walk->flags & SKCIPHER_WALK_COPY) { + scatterwalk_advance(&walk->in, n); + scatterwalk_map(&walk->out); + memcpy(walk->out.addr, walk->page, n); + } else { /* SKCIPHER_WALK_SLOW */ + if (res > 0) { + /* + * Didn't process all bytes. Either the algorithm is + * broken, or this was the last step and it turned out + * the message wasn't evenly divisible into blocks but + * the algorithm requires it. + */ + res = -EINVAL; + total = 0; + } else + memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); + goto dst_done; + } + + scatterwalk_done_dst(&walk->out, n); +dst_done: + + if (res > 0) + res = 0; + + walk->total = total; + walk->nbytes = 0; + + if (total) { + if (walk->flags & SKCIPHER_WALK_SLEEP) + cond_resched(); + walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF); + return skcipher_walk_next(walk); + } + +finish: + /* Short-circuit for the common/fast path. */ + if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) + goto out; + + if (walk->iv != walk->oiv) + memcpy(walk->oiv, walk->iv, walk->ivsize); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); + +out: + return res; +} +EXPORT_SYMBOL_GPL(skcipher_walk_done); diff --git a/crypto/scompress.c b/crypto/scompress.c index ffeedcf20b0f..c651e7f2197a 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -7,9 +7,9 @@ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> */ -#include <crypto/internal/acompress.h> #include <crypto/internal/scompress.h> #include <crypto/scatterwalk.h> +#include <linux/cpumask.h> #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/highmem.h> @@ -20,20 +20,17 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/vmalloc.h> +#include <linux/workqueue.h> #include <net/netlink.h> #include "compress.h" -#define SCOMP_SCRATCH_SIZE 65400 - struct scomp_scratch { spinlock_t lock; union { void *src; unsigned long saddr; }; - void *dst; }; static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = { @@ -44,6 +41,10 @@ static const struct crypto_type crypto_scomp_type; static int scomp_scratch_users; static DEFINE_MUTEX(scomp_lock); +static cpumask_t scomp_scratch_want; +static void scomp_scratch_workfn(struct work_struct *work); +static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn); + static int __maybe_unused crypto_scomp_report( struct sk_buff *skb, struct crypto_alg *alg) { @@ -74,82 +75,48 @@ static void crypto_scomp_free_scratches(void) scratch = per_cpu_ptr(&scomp_scratch, i); free_page(scratch->saddr); - vfree(scratch->dst); scratch->src = NULL; - scratch->dst = NULL; } } -static int crypto_scomp_alloc_scratches(void) +static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu) { - struct scomp_scratch *scratch; - int i; - - for_each_possible_cpu(i) { - struct page *page; - void *mem; - - scratch = per_cpu_ptr(&scomp_scratch, i); + int node = cpu_to_node(cpu); + struct page *page; - page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0); - if (!page) - goto error; - scratch->src = page_address(page); - mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!mem) - goto error; - scratch->dst = mem; - } + page = alloc_pages_node(node, GFP_KERNEL, 0); + if (!page) + return -ENOMEM; + spin_lock_bh(&scratch->lock); + scratch->src = page_address(page); + spin_unlock_bh(&scratch->lock); return 0; -error: - crypto_scomp_free_scratches(); - return -ENOMEM; } -static void scomp_free_streams(struct scomp_alg *alg) +static void scomp_scratch_workfn(struct work_struct *work) { - struct crypto_acomp_stream __percpu *stream = alg->stream; - int i; + int cpu; - alg->stream = NULL; - if (!stream) - return; + for_each_cpu(cpu, &scomp_scratch_want) { + struct scomp_scratch *scratch; - for_each_possible_cpu(i) { - struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); - - if (IS_ERR_OR_NULL(ps->ctx)) + scratch = per_cpu_ptr(&scomp_scratch, cpu); + if (scratch->src) + continue; + if (scomp_alloc_scratch(scratch, cpu)) break; - alg->free_ctx(ps->ctx); + cpumask_clear_cpu(cpu, &scomp_scratch_want); } - - free_percpu(stream); } -static int scomp_alloc_streams(struct scomp_alg *alg) +static int crypto_scomp_alloc_scratches(void) { - struct crypto_acomp_stream __percpu *stream; - int i; - - stream = alloc_percpu(struct crypto_acomp_stream); - if (!stream) - return -ENOMEM; - - alg->stream = stream; - - for_each_possible_cpu(i) { - struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); - - ps->ctx = alg->alloc_ctx(); - if (IS_ERR(ps->ctx)) { - scomp_free_streams(alg); - return PTR_ERR(ps->ctx); - } + unsigned int i = cpumask_first(cpu_possible_mask); + struct scomp_scratch *scratch; - spin_lock_init(&ps->lock); - } - return 0; + scratch = per_cpu_ptr(&scomp_scratch, i); + return scomp_alloc_scratch(scratch, i); } static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) @@ -158,11 +125,9 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) int ret = 0; mutex_lock(&scomp_lock); - if (!alg->stream) { - ret = scomp_alloc_streams(alg); - if (ret) - goto unlock; - } + ret = crypto_acomp_alloc_streams(&alg->streams); + if (ret) + goto unlock; if (!scomp_scratch_users++) { ret = crypto_scomp_alloc_scratches(); if (ret) @@ -174,13 +139,40 @@ unlock: return ret; } +static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch) +{ + int cpu = raw_smp_processor_id(); + struct scomp_scratch *scratch; + + scratch = per_cpu_ptr(&scomp_scratch, cpu); + spin_lock(&scratch->lock); + if (likely(scratch->src)) + return scratch; + spin_unlock(&scratch->lock); + + cpumask_set_cpu(cpu, &scomp_scratch_want); + schedule_work(&scomp_scratch_work); + + scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask)); + spin_lock(&scratch->lock); + return scratch; +} + +static inline void scomp_unlock_scratch(struct scomp_scratch *scratch) + __releases(scratch) +{ + spin_unlock(&scratch->lock); +} + static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) { - struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch); struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); + bool src_isvirt = acomp_request_src_isvirt(req); + bool dst_isvirt = acomp_request_dst_isvirt(req); struct crypto_scomp *scomp = *tfm_ctx; struct crypto_acomp_stream *stream; + struct scomp_scratch *scratch; unsigned int slen = req->slen; unsigned int dlen = req->dlen; struct page *spage, *dpage; @@ -197,15 +189,32 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) if (!req->dst || !dlen) return -EINVAL; - if (acomp_request_src_isvirt(req)) + if (dst_isvirt) + dst = req->dvirt; + else { + if (dlen <= req->dst->length) { + dpage = sg_page(req->dst); + doff = req->dst->offset; + } else + return -ENOSYS; + + dpage = nth_page(dpage, doff / PAGE_SIZE); + doff = offset_in_page(doff); + + n = (dlen - 1) / PAGE_SIZE; + n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE; + if (PageHighMem(dpage + n) && + size_add(doff, dlen) > PAGE_SIZE) + return -ENOSYS; + dst = kmap_local_page(dpage) + doff; + } + + if (src_isvirt) src = req->svirt; else { - src = scratch->src; + src = NULL; do { - if (acomp_request_src_isfolio(req)) { - spage = folio_page(req->sfolio, 0); - soff = req->soff; - } else if (slen <= req->src->length) { + if (slen <= req->src->length) { spage = sg_page(req->src); soff = req->src->offset; } else @@ -223,59 +232,37 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) } while (0); } - if (acomp_request_dst_isvirt(req)) - dst = req->dvirt; - else { - unsigned int max = SCOMP_SCRATCH_SIZE; - - dst = scratch->dst; - do { - if (acomp_request_dst_isfolio(req)) { - dpage = folio_page(req->dfolio, 0); - doff = req->doff; - } else if (dlen <= req->dst->length) { - dpage = sg_page(req->dst); - doff = req->dst->offset; - } else - break; - - dpage = nth_page(dpage, doff / PAGE_SIZE); - doff = offset_in_page(doff); + stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams); - n = (dlen - 1) / PAGE_SIZE; - n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE; - if (PageHighMem(nth_page(dpage, n)) && - size_add(doff, dlen) > PAGE_SIZE) - break; - dst = kmap_local_page(dpage) + doff; - max = dlen; - } while (0); - dlen = min(dlen, max); - } - - spin_lock_bh(&scratch->lock); + if (!src_isvirt && !src) { + const u8 *src; - if (src == scratch->src) + scratch = scomp_lock_scratch(); + src = scratch->src; memcpy_from_sglist(scratch->src, req->src, 0, slen); - stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream); - spin_lock(&stream->lock); - if (dir) + if (dir) + ret = crypto_scomp_compress(scomp, src, slen, + dst, &dlen, stream->ctx); + else + ret = crypto_scomp_decompress(scomp, src, slen, + dst, &dlen, stream->ctx); + + scomp_unlock_scratch(scratch); + } else if (dir) ret = crypto_scomp_compress(scomp, src, slen, dst, &dlen, stream->ctx); else ret = crypto_scomp_decompress(scomp, src, slen, dst, &dlen, stream->ctx); - if (dst == scratch->dst) - memcpy_to_sglist(req->dst, 0, dst, dlen); - - spin_unlock(&stream->lock); - spin_unlock_bh(&scratch->lock); + crypto_acomp_unlock_stream_bh(stream); req->dlen = dlen; - if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) { + if (!src_isvirt && src) + kunmap_local(src); + if (!dst_isvirt) { kunmap_local(dst); dlen += doff; for (;;) { @@ -286,34 +273,18 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) dpage = nth_page(dpage, 1); } } - if (!acomp_request_src_isvirt(req) && src != scratch->src) - kunmap_local(src); return ret; } -static int scomp_acomp_chain(struct acomp_req *req, int dir) -{ - struct acomp_req *r2; - int err; - - err = scomp_acomp_comp_decomp(req, dir); - req->base.err = err; - - list_for_each_entry(r2, &req->base.list, base.list) - r2->base.err = scomp_acomp_comp_decomp(r2, dir); - - return err; -} - static int scomp_acomp_compress(struct acomp_req *req) { - return scomp_acomp_chain(req, 1); + return scomp_acomp_comp_decomp(req, 1); } static int scomp_acomp_decompress(struct acomp_req *req) { - return scomp_acomp_chain(req, 0); + return scomp_acomp_comp_decomp(req, 0); } static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) @@ -322,6 +293,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) crypto_free_scomp(*ctx); + flush_work(&scomp_scratch_work); mutex_lock(&scomp_lock); if (!--scomp_scratch_users) crypto_scomp_free_scratches(); @@ -355,7 +327,9 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) static void crypto_scomp_destroy(struct crypto_alg *alg) { - scomp_free_streams(__crypto_scomp_alg(alg)); + struct scomp_alg *scomp = __crypto_scomp_alg(alg); + + crypto_acomp_free_streams(&scomp->streams); } static const struct crypto_type crypto_scomp_type = { @@ -372,6 +346,7 @@ static const struct crypto_type crypto_scomp_type = { .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SCOMPRESS, .tfmsize = offsetof(struct crypto_scomp, base), + .algsize = offsetof(struct scomp_alg, base), }; static void scomp_prepare_alg(struct scomp_alg *alg) @@ -380,7 +355,7 @@ static void scomp_prepare_alg(struct scomp_alg *alg) comp_prepare_alg(&alg->calg); - base->cra_flags |= CRYPTO_ALG_REQ_CHAIN; + base->cra_flags |= CRYPTO_ALG_REQ_VIRT; } int crypto_register_scomp(struct scomp_alg *alg) diff --git a/crypto/seed.c b/crypto/seed.c index d05d8ed909fa..815391f213de 100644 --- a/crypto/seed.c +++ b/crypto/seed.c @@ -460,7 +460,7 @@ static void __exit seed_fini(void) crypto_unregister_alg(&seed_alg); } -subsys_initcall(seed_init); +module_init(seed_init); module_exit(seed_fini); MODULE_DESCRIPTION("SEED Cipher Algorithm"); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 17e11d51ddc3..2bae99e33526 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -64,20 +64,9 @@ static int seqiv_aead_encrypt(struct aead_request *req) data = req->base.data; info = req->iv; - if (req->src != req->dst) { - SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); - - skcipher_request_set_sync_tfm(nreq, ctx->sknull); - skcipher_request_set_callback(nreq, req->base.flags, - NULL, NULL); - skcipher_request_set_crypt(nreq, req->src, req->dst, - req->assoclen + req->cryptlen, - NULL); - - err = crypto_skcipher_encrypt(nreq); - if (err) - return err; - } + if (req->src != req->dst) + memcpy_sglist(req->dst, req->src, + req->assoclen + req->cryptlen); if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { @@ -179,7 +168,7 @@ static void __exit seqiv_module_exit(void) crypto_unregister_template(&seqiv_tmpl); } -subsys_initcall(seqiv_module_init); +module_init(seqiv_module_init); module_exit(seqiv_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index f6ef187be6fe..b21e7606c652 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -599,7 +599,7 @@ static void __exit serpent_mod_fini(void) crypto_unregister_alg(&srp_alg); } -subsys_initcall(serpent_mod_init); +module_init(serpent_mod_init); module_exit(serpent_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 325b57fe28dc..024e8043bab0 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -12,13 +12,11 @@ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <asm/byteorder.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, @@ -39,38 +37,31 @@ static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, memzero_explicit(temp, sizeof(temp)); } -int crypto_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - return sha1_base_do_update(desc, data, len, sha1_generic_block_fn); + return sha1_base_do_update_blocks(desc, data, len, + sha1_generic_block_fn); } -EXPORT_SYMBOL(crypto_sha1_update); -static int sha1_final(struct shash_desc *desc, u8 *out) +static int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - sha1_base_do_finalize(desc, sha1_generic_block_fn); + sha1_base_do_finup(desc, data, len, sha1_generic_block_fn); return sha1_base_finish(desc, out); } -int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - sha1_base_do_update(desc, data, len, sha1_generic_block_fn); - return sha1_final(desc, out); -} -EXPORT_SYMBOL(crypto_sha1_finup); - static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = crypto_sha1_update, - .final = sha1_final, .finup = crypto_sha1_finup, - .descsize = sizeof(struct sha1_state), + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -86,7 +77,7 @@ static void __exit sha1_generic_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(sha1_generic_mod_init); +module_init(sha1_generic_mod_init); module_exit(sha1_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/sha256.c b/crypto/sha256.c new file mode 100644 index 000000000000..4aeb213bab11 --- /dev/null +++ b/crypto/sha256.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Crypto API wrapper for the SHA-256 and SHA-224 library functions + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> + */ +#include <crypto/internal/hash.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +EXPORT_SYMBOL_GPL(sha224_zero_message_hash); + +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +EXPORT_SYMBOL_GPL(sha256_zero_message_hash); + +static int crypto_sha256_init(struct shash_desc *desc) +{ + sha256_block_init(shash_desc_ctx(desc)); + return 0; +} + +static inline int crypto_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len, bool force_generic) +{ + struct crypto_sha256_state *sctx = shash_desc_ctx(desc); + int remain = len % SHA256_BLOCK_SIZE; + + sctx->count += len - remain; + sha256_choose_blocks(sctx->state, data, len / SHA256_BLOCK_SIZE, + force_generic, !force_generic); + return remain; +} + +static int crypto_sha256_update_generic(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return crypto_sha256_update(desc, data, len, true); +} + +static int crypto_sha256_update_lib(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + sha256_update(shash_desc_ctx(desc), data, len); + return 0; +} + +static int crypto_sha256_update_arch(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return crypto_sha256_update(desc, data, len, false); +} + +static int crypto_sha256_final_lib(struct shash_desc *desc, u8 *out) +{ + sha256_final(shash_desc_ctx(desc), out); + return 0; +} + +static __always_inline int crypto_sha256_finup(struct shash_desc *desc, + const u8 *data, + unsigned int len, u8 *out, + bool force_generic) +{ + struct crypto_sha256_state *sctx = shash_desc_ctx(desc); + unsigned int remain = len; + u8 *buf; + + if (len >= SHA256_BLOCK_SIZE) + remain = crypto_sha256_update(desc, data, len, force_generic); + sctx->count += remain; + buf = memcpy(sctx + 1, data + len - remain, remain); + sha256_finup(sctx, buf, remain, out, + crypto_shash_digestsize(desc->tfm), force_generic, + !force_generic); + return 0; +} + +static int crypto_sha256_finup_generic(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return crypto_sha256_finup(desc, data, len, out, true); +} + +static int crypto_sha256_finup_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return crypto_sha256_finup(desc, data, len, out, false); +} + +static int crypto_sha256_digest_generic(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + crypto_sha256_init(desc); + return crypto_sha256_finup_generic(desc, data, len, out); +} + +static int crypto_sha256_digest_lib(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + sha256(data, len, out); + return 0; +} + +static int crypto_sha256_digest_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + crypto_sha256_init(desc); + return crypto_sha256_finup_arch(desc, data, len, out); +} + +static int crypto_sha224_init(struct shash_desc *desc) +{ + sha224_block_init(shash_desc_ctx(desc)); + return 0; +} + +static int crypto_sha224_final_lib(struct shash_desc *desc, u8 *out) +{ + sha224_final(shash_desc_ctx(desc), out); + return 0; +} + +static int crypto_sha256_import_lib(struct shash_desc *desc, const void *in) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + const u8 *p = in; + + memcpy(sctx, p, sizeof(*sctx)); + p += sizeof(*sctx); + sctx->count += *p; + return 0; +} + +static int crypto_sha256_export_lib(struct shash_desc *desc, void *out) +{ + struct sha256_state *sctx0 = shash_desc_ctx(desc); + struct sha256_state sctx = *sctx0; + unsigned int partial; + u8 *p = out; + + partial = sctx.count % SHA256_BLOCK_SIZE; + sctx.count -= partial; + memcpy(p, &sctx, sizeof(sctx)); + p += sizeof(sctx); + *p = partial; + return 0; +} + +static struct shash_alg algs[] = { + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-generic", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update_generic, + .finup = crypto_sha256_finup_generic, + .digest = crypto_sha256_digest_generic, + .descsize = sizeof(struct crypto_sha256_state), + }, + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-generic", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha256_update_generic, + .finup = crypto_sha256_finup_generic, + .descsize = sizeof(struct crypto_sha256_state), + }, + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-lib", + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update_lib, + .final = crypto_sha256_final_lib, + .digest = crypto_sha256_digest_lib, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct crypto_sha256_state) + + SHA256_BLOCK_SIZE + 1, + .import = crypto_sha256_import_lib, + .export = crypto_sha256_export_lib, + }, + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-lib", + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha256_update_lib, + .final = crypto_sha224_final_lib, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct crypto_sha256_state) + + SHA256_BLOCK_SIZE + 1, + .import = crypto_sha256_import_lib, + .export = crypto_sha256_export_lib, + }, + { + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA256_DIGEST_SIZE, + .init = crypto_sha256_init, + .update = crypto_sha256_update_arch, + .finup = crypto_sha256_finup_arch, + .digest = crypto_sha256_digest_arch, + .descsize = sizeof(struct crypto_sha256_state), + }, + { + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-" __stringify(ARCH), + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .digestsize = SHA224_DIGEST_SIZE, + .init = crypto_sha224_init, + .update = crypto_sha256_update_arch, + .finup = crypto_sha256_finup_arch, + .descsize = sizeof(struct crypto_sha256_state), + }, +}; + +static unsigned int num_algs; + +static int __init crypto_sha256_mod_init(void) +{ + /* register the arch flavours only if they differ from generic */ + num_algs = ARRAY_SIZE(algs); + BUILD_BUG_ON(ARRAY_SIZE(algs) <= 2); + if (!sha256_is_arch_optimized()) + num_algs -= 2; + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); +} +module_init(crypto_sha256_mod_init); + +static void __exit crypto_sha256_mod_exit(void) +{ + crypto_unregister_shashes(algs, num_algs); +} +module_exit(crypto_sha256_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto API wrapper for the SHA-256 and SHA-224 library functions"); + +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha256-generic"); +MODULE_ALIAS_CRYPTO("sha256-" __stringify(ARCH)); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha224-generic"); +MODULE_ALIAS_CRYPTO("sha224-" __stringify(ARCH)); diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c deleted file mode 100644 index b00521f1a6d4..000000000000 --- a/crypto/sha256_generic.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c - * - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> - */ -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <asm/byteorder.h> -#include <linux/unaligned.h> - -const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, - 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, - 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, - 0x2f -}; -EXPORT_SYMBOL_GPL(sha224_zero_message_hash); - -const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 -}; -EXPORT_SYMBOL_GPL(sha256_zero_message_hash); - -int crypto_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - sha256_update(shash_desc_ctx(desc), data, len); - return 0; -} -EXPORT_SYMBOL(crypto_sha256_update); - -static int crypto_sha256_final(struct shash_desc *desc, u8 *out) -{ - if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) - sha224_final(shash_desc_ctx(desc), out); - else - sha256_final(shash_desc_ctx(desc), out); - return 0; -} - -int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash) -{ - sha256_update(shash_desc_ctx(desc), data, len); - return crypto_sha256_final(desc, hash); -} -EXPORT_SYMBOL(crypto_sha256_finup); - -static struct shash_alg sha256_algs[2] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = crypto_sha256_update, - .final = crypto_sha256_final, - .finup = crypto_sha256_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-generic", - .cra_priority = 100, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = crypto_sha256_update, - .final = crypto_sha256_final, - .finup = crypto_sha256_finup, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-generic", - .cra_priority = 100, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init sha256_generic_mod_init(void) -{ - return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); -} - -static void __exit sha256_generic_mod_fini(void) -{ - crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); -} - -subsys_initcall(sha256_generic_mod_init); -module_exit(sha256_generic_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); - -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha224-generic"); -MODULE_ALIAS_CRYPTO("sha256"); -MODULE_ALIAS_CRYPTO("sha256-generic"); diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index b103642b56ea..41d1e506e6de 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -9,10 +9,10 @@ * Ard Biesheuvel <ard.biesheuvel@linaro.org> */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/types.h> #include <crypto/sha3.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> #include <linux/unaligned.h> /* @@ -161,68 +161,51 @@ static void keccakf(u64 st[25]) int crypto_sha3_init(struct shash_desc *desc) { struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - - sctx->rsiz = 200 - 2 * digest_size; - sctx->rsizw = sctx->rsiz / 8; - sctx->partial = 0; memset(sctx->st, 0, sizeof(sctx->st)); return 0; } EXPORT_SYMBOL(crypto_sha3_init); -int crypto_sha3_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { + unsigned int rsiz = crypto_shash_blocksize(desc->tfm); struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int done; - const u8 *src; - - done = 0; - src = data; - - if ((sctx->partial + len) > (sctx->rsiz - 1)) { - if (sctx->partial) { - done = -sctx->partial; - memcpy(sctx->buf + sctx->partial, data, - done + sctx->rsiz); - src = sctx->buf; - } + unsigned int rsizw = rsiz / 8; - do { - unsigned int i; + do { + int i; - for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= get_unaligned_le64(src + 8 * i); - keccakf(sctx->st); + for (i = 0; i < rsizw; i++) + sctx->st[i] ^= get_unaligned_le64(data + 8 * i); + keccakf(sctx->st); - done += sctx->rsiz; - src = data + done; - } while (done + (sctx->rsiz - 1) < len); - - sctx->partial = 0; - } - memcpy(sctx->buf + sctx->partial, src, len - done); - sctx->partial += (len - done); - - return 0; + data += rsiz; + len -= rsiz; + } while (len >= rsiz); + return len; } -EXPORT_SYMBOL(crypto_sha3_update); -int crypto_sha3_final(struct shash_desc *desc, u8 *out) +static int crypto_sha3_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha3_state *sctx = shash_desc_ctx(desc); - unsigned int i, inlen = sctx->partial; unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + unsigned int rsiz = crypto_shash_blocksize(desc->tfm); + struct sha3_state *sctx = shash_desc_ctx(desc); + __le64 block[SHA3_224_BLOCK_SIZE / 8] = {}; __le64 *digest = (__le64 *)out; + unsigned int rsizw = rsiz / 8; + u8 *p; + int i; - sctx->buf[inlen++] = 0x06; - memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); - sctx->buf[sctx->rsiz - 1] |= 0x80; + p = memcpy(block, src, len); + p[len++] = 0x06; + p[rsiz - 1] |= 0x80; - for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); + for (i = 0; i < rsizw; i++) + sctx->st[i] ^= le64_to_cpu(block[i]); + memzero_explicit(block, sizeof(block)); keccakf(sctx->st); @@ -232,49 +215,51 @@ int crypto_sha3_final(struct shash_desc *desc, u8 *out) if (digest_size & 4) put_unaligned_le32(sctx->st[i], (__le32 *)digest); - memset(sctx, 0, sizeof(*sctx)); return 0; } -EXPORT_SYMBOL(crypto_sha3_final); static struct shash_alg algs[] = { { .digestsize = SHA3_224_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-224", .base.cra_driver_name = "sha3-224-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_256_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-256", .base.cra_driver_name = "sha3-256-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_384_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-384", .base.cra_driver_name = "sha3-384-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_512_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, - .final = crypto_sha3_final, - .descsize = sizeof(struct sha3_state), + .finup = crypto_sha3_finup, + .descsize = SHA3_STATE_SIZE, .base.cra_name = "sha3-512", .base.cra_driver_name = "sha3-512-generic", + .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .base.cra_blocksize = SHA3_512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; @@ -289,7 +274,7 @@ static void __exit sha3_generic_mod_fini(void) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } -subsys_initcall(sha3_generic_mod_init); +module_init(sha3_generic_mod_init); module_exit(sha3_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index ed81813bd420..7368173f545e 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -6,16 +6,10 @@ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> */ #include <crypto/internal/hash.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/init.h> -#include <linux/crypto.h> -#include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <linux/percpu.h> -#include <asm/byteorder.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/unaligned.h> const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = { @@ -145,47 +139,42 @@ sha512_transform(u64 *state, const u8 *input) state[4] += e; state[5] += f; state[6] += g; state[7] += h; } -static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src, - int blocks) +void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src, + int blocks) { - while (blocks--) { + do { sha512_transform(sst->state, src); src += SHA512_BLOCK_SIZE; - } + } while (--blocks); } +EXPORT_SYMBOL_GPL(sha512_generic_block_fn); -int crypto_sha512_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int crypto_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - return sha512_base_do_update(desc, data, len, sha512_generic_block_fn); + return sha512_base_do_update_blocks(desc, data, len, + sha512_generic_block_fn); } -EXPORT_SYMBOL(crypto_sha512_update); -static int sha512_final(struct shash_desc *desc, u8 *hash) +static int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash) { - sha512_base_do_finalize(desc, sha512_generic_block_fn); + sha512_base_do_finup(desc, data, len, sha512_generic_block_fn); return sha512_base_finish(desc, hash); } -int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash) -{ - sha512_base_do_update(desc, data, len, sha512_generic_block_fn); - return sha512_final(desc, hash); -} -EXPORT_SYMBOL(crypto_sha512_finup); - static struct shash_alg sha512_algs[2] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = crypto_sha512_update, - .final = sha512_final, .finup = crypto_sha512_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name = "sha512-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -193,13 +182,14 @@ static struct shash_alg sha512_algs[2] = { { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = crypto_sha512_update, - .final = sha512_final, .finup = crypto_sha512_finup, - .descsize = sizeof(struct sha512_state), + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name = "sha384-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -215,7 +205,7 @@ static void __exit sha512_generic_mod_fini(void) crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs)); } -subsys_initcall(sha512_generic_mod_init); +module_init(sha512_generic_mod_init); module_exit(sha512_generic_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/shash.c b/crypto/shash.c index 301ab42bf849..37537d7995c7 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -16,6 +16,24 @@ #include "hash.h" +static inline bool crypto_shash_block_only(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_BLOCK_ONLY; +} + +static inline bool crypto_shash_final_nonzero(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_FINAL_NONZERO; +} + +static inline bool crypto_shash_finup_max(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_FINUP_MAX; +} + int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -46,18 +64,27 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_shash_setkey); -int crypto_shash_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int __crypto_shash_init(struct shash_desc *desc) { - return crypto_shash_alg(desc->tfm)->update(desc, data, len); + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_block_only(tfm)) { + u8 *buf = shash_desc_ctx(desc); + + buf += crypto_shash_descsize(tfm) - 1; + *buf = 0; + } + + return crypto_shash_alg(tfm)->init(desc); } -EXPORT_SYMBOL_GPL(crypto_shash_update); -int crypto_shash_final(struct shash_desc *desc, u8 *out) +int crypto_shash_init(struct shash_desc *desc) { - return crypto_shash_alg(desc->tfm)->final(desc, out); + if (crypto_shash_get_flags(desc->tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return __crypto_shash_init(desc); } -EXPORT_SYMBOL_GPL(crypto_shash_final); +EXPORT_SYMBOL_GPL(crypto_shash_init); static int shash_default_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) @@ -68,20 +95,89 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data, shash->final(desc, out); } -int crypto_shash_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) +static int crypto_shash_op_and_zero( + int (*op)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out), + struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out); + int err; + + err = op(desc, data, len, out); + memset(shash_desc_ctx(desc), 0, crypto_shash_descsize(desc->tfm)); + return err; +} + +int crypto_shash_finup(struct shash_desc *restrict desc, const u8 *data, + unsigned int len, u8 *restrict out) +{ + struct crypto_shash *tfm = desc->tfm; + u8 *blenp = shash_desc_ctx(desc); + bool finup_max, nonzero; + unsigned int bs; + int err; + u8 *buf; + + if (!crypto_shash_block_only(tfm)) { + if (out) + goto finup; + return crypto_shash_alg(tfm)->update(desc, data, len); + } + + finup_max = out && crypto_shash_finup_max(tfm); + + /* Retain extra block for final nonzero algorithms. */ + nonzero = crypto_shash_final_nonzero(tfm); + + /* + * The partial block buffer follows the algorithm desc context. + * The byte following that contains the length. + */ + blenp += crypto_shash_descsize(tfm) - 1; + bs = crypto_shash_blocksize(tfm); + buf = blenp - bs; + + if (likely(!*blenp && finup_max)) + goto finup; + + while ((*blenp + len) >= bs + nonzero) { + unsigned int nbytes = len - nonzero; + const u8 *src = data; + + if (*blenp) { + memcpy(buf + *blenp, data, bs - *blenp); + nbytes = bs; + src = buf; + } + + err = crypto_shash_alg(tfm)->update(desc, src, nbytes); + if (err < 0) + return err; + + data += nbytes - err - *blenp; + len -= nbytes - err - *blenp; + *blenp = 0; + } + + if (*blenp || !out) { + memcpy(buf + *blenp, data, len); + *blenp += len; + if (!out) + return 0; + data = buf; + len = *blenp; + } + +finup: + return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->finup, desc, + data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_finup); static int shash_default_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - struct shash_alg *shash = crypto_shash_alg(desc->tfm); - - return shash->init(desc) ?: - shash->finup(desc, data, len, out); + return __crypto_shash_init(desc) ?: + crypto_shash_finup(desc, data, len, out); } int crypto_shash_digest(struct shash_desc *desc, const u8 *data, @@ -92,7 +188,8 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_shash_alg(tfm)->digest(desc, data, len, out); + return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->digest, desc, + data, len, out); } EXPORT_SYMBOL_GPL(crypto_shash_digest); @@ -100,44 +197,104 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, unsigned int len, u8 *out) { SHASH_DESC_ON_STACK(desc, tfm); - int err; desc->tfm = tfm; + return crypto_shash_digest(desc, data, len, out); +} +EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); - err = crypto_shash_digest(desc, data, len, out); +static int __crypto_shash_export(struct shash_desc *desc, void *out, + int (*export)(struct shash_desc *desc, + void *out)) +{ + struct crypto_shash *tfm = desc->tfm; + u8 *buf = shash_desc_ctx(desc); + unsigned int plen, ss; + + plen = crypto_shash_blocksize(tfm) + 1; + ss = crypto_shash_statesize(tfm); + if (crypto_shash_block_only(tfm)) + ss -= plen; + if (!export) { + memcpy(out, buf, ss); + return 0; + } - shash_desc_zero(desc); + return export(desc, out); +} - return err; +int crypto_shash_export_core(struct shash_desc *desc, void *out) +{ + return __crypto_shash_export(desc, out, + crypto_shash_alg(desc->tfm)->export_core); } -EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); +EXPORT_SYMBOL_GPL(crypto_shash_export_core); int crypto_shash_export(struct shash_desc *desc, void *out) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - if (shash->export) - return shash->export(desc, out); + if (crypto_shash_block_only(tfm)) { + unsigned int plen = crypto_shash_blocksize(tfm) + 1; + unsigned int descsize = crypto_shash_descsize(tfm); + unsigned int ss = crypto_shash_statesize(tfm); + u8 *buf = shash_desc_ctx(desc); - memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm)); - return 0; + memcpy(out + ss - plen, buf + descsize - plen, plen); + } + return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export); } EXPORT_SYMBOL_GPL(crypto_shash_export); -int crypto_shash_import(struct shash_desc *desc, const void *in) +static int __crypto_shash_import(struct shash_desc *desc, const void *in, + int (*import)(struct shash_desc *desc, + const void *in)) { struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned int descsize, plen, ss; + u8 *buf = shash_desc_ctx(desc); if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - if (shash->import) - return shash->import(desc, in); + plen = crypto_shash_blocksize(tfm) + 1; + descsize = crypto_shash_descsize(tfm); + ss = crypto_shash_statesize(tfm); + buf[descsize - 1] = 0; + if (crypto_shash_block_only(tfm)) + ss -= plen; + if (!import) { + memcpy(buf, in, ss); + return 0; + } - memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); - return 0; + return import(desc, in); +} + +int crypto_shash_import_core(struct shash_desc *desc, const void *in) +{ + return __crypto_shash_import(desc, in, + crypto_shash_alg(desc->tfm)->import_core); +} +EXPORT_SYMBOL_GPL(crypto_shash_import_core); + +int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + struct crypto_shash *tfm = desc->tfm; + int err; + + err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import); + if (crypto_shash_block_only(tfm)) { + unsigned int plen = crypto_shash_blocksize(tfm) + 1; + unsigned int descsize = crypto_shash_descsize(tfm); + unsigned int ss = crypto_shash_statesize(tfm); + u8 *buf = shash_desc_ctx(desc); + + memcpy(buf + descsize - plen, in + ss - plen, plen); + if (buf[descsize - 1] >= plen) + err = -EOVERFLOW; + } + return err; } EXPORT_SYMBOL_GPL(crypto_shash_import); @@ -153,9 +310,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); struct shash_alg *alg = crypto_shash_alg(hash); - int err; - - hash->descsize = alg->descsize; shash_set_needkey(hash, alg); @@ -165,18 +319,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) if (!alg->init_tfm) return 0; - err = alg->init_tfm(hash); - if (err) - return err; - - /* ->init_tfm() may have increased the descsize. */ - if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) { - if (alg->exit_tfm) - alg->exit_tfm(hash); - return -EINVAL; - } - - return 0; + return alg->init_tfm(hash); } static void crypto_shash_free_instance(struct crypto_instance *inst) @@ -227,6 +370,7 @@ const struct crypto_type crypto_shash_type = { .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SHASH, .tfmsize = offsetof(struct crypto_shash, base), + .algsize = offsetof(struct shash_alg, base), }; int crypto_grab_shash(struct crypto_shash_spawn *spawn, @@ -273,8 +417,6 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash) if (IS_ERR(nhash)) return nhash; - nhash->descsize = hash->descsize; - if (alg->clone_tfm) { err = alg->clone_tfm(nhash, hash); if (err) { @@ -283,6 +425,9 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash) } } + if (alg->exit_tfm) + crypto_shash_tfm(nhash)->exit = crypto_shash_exit_tfm; + return nhash; } EXPORT_SYMBOL_GPL(crypto_clone_shash); @@ -303,14 +448,21 @@ int hash_prepare_alg(struct hash_alg_common *alg) return 0; } +static int shash_default_export_core(struct shash_desc *desc, void *out) +{ + return -ENOSYS; +} + +static int shash_default_import_core(struct shash_desc *desc, const void *in) +{ + return -ENOSYS; +} + static int shash_prepare_alg(struct shash_alg *alg) { struct crypto_alg *base = &alg->halg.base; int err; - if (alg->descsize > HASH_MAX_DESCSIZE) - return -EINVAL; - if ((alg->export && !alg->import) || (alg->import && !alg->export)) return -EINVAL; @@ -320,6 +472,7 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_type = &crypto_shash_type; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + base->cra_flags |= CRYPTO_ALG_REQ_VIRT; /* * Handle missing optional functions. For each one we can either @@ -336,11 +489,30 @@ static int shash_prepare_alg(struct shash_alg *alg) alg->finup = shash_default_finup; if (!alg->digest) alg->digest = shash_default_digest; - if (!alg->export) + if (!alg->export && !alg->halg.statesize) alg->halg.statesize = alg->descsize; if (!alg->setkey) alg->setkey = shash_no_setkey; + if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) { + BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256); + alg->descsize += base->cra_blocksize + 1; + alg->statesize += base->cra_blocksize + 1; + alg->export_core = alg->export; + alg->import_core = alg->import; + } else if (!alg->export_core || !alg->import_core) { + alg->export_core = shash_default_export_core; + alg->import_core = shash_default_import_core; + base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE; + } + + if (alg->descsize > HASH_MAX_DESCSIZE) + return -EINVAL; + if (alg->statesize > HASH_MAX_STATESIZE) + return -EINVAL; + + base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize; + return 0; } diff --git a/crypto/sig.c b/crypto/sig.c index dfc7cae90802..beba745b6405 100644 --- a/crypto/sig.c +++ b/crypto/sig.c @@ -74,6 +74,7 @@ static const struct crypto_type crypto_sig_type = { .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SIG, .tfmsize = offsetof(struct crypto_sig, base), + .algsize = offsetof(struct sig_alg, base), }; struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask) @@ -102,6 +103,11 @@ static int sig_default_set_key(struct crypto_sig *tfm, return -ENOSYS; } +static unsigned int sig_default_size(struct crypto_sig *tfm) +{ + return DIV_ROUND_UP_POW2(crypto_sig_keysize(tfm), BITS_PER_BYTE); +} + static int sig_prepare_alg(struct sig_alg *alg) { struct crypto_alg *base = &alg->base; @@ -117,9 +123,9 @@ static int sig_prepare_alg(struct sig_alg *alg) if (!alg->key_size) return -EINVAL; if (!alg->max_size) - alg->max_size = alg->key_size; + alg->max_size = sig_default_size; if (!alg->digest_size) - alg->digest_size = alg->key_size; + alg->digest_size = sig_default_size; base->cra_type = &crypto_sig_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 132075a905d9..de5fc91bba26 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -17,7 +17,6 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> -#include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> @@ -28,258 +27,14 @@ #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e -enum { - SKCIPHER_WALK_SLOW = 1 << 0, - SKCIPHER_WALK_COPY = 1 << 1, - SKCIPHER_WALK_DIFF = 1 << 2, - SKCIPHER_WALK_SLEEP = 1 << 3, -}; - static const struct crypto_type crypto_skcipher_type; -static int skcipher_walk_next(struct skcipher_walk *walk); - -static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) -{ - return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; -} - static inline struct skcipher_alg *__crypto_skcipher_alg( struct crypto_alg *alg) { return container_of(alg, struct skcipher_alg, base); } -/** - * skcipher_walk_done() - finish one step of a skcipher_walk - * @walk: the skcipher_walk - * @res: number of bytes *not* processed (>= 0) from walk->nbytes, - * or a -errno value to terminate the walk due to an error - * - * This function cleans up after one step of walking through the source and - * destination scatterlists, and advances to the next step if applicable. - * walk->nbytes is set to the number of bytes available in the next step, - * walk->total is set to the new total number of bytes remaining, and - * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there - * is no more data, or if an error occurred (i.e. -errno return), then - * walk->nbytes and walk->total are set to 0 and all resources owned by the - * skcipher_walk are freed. - * - * Return: 0 or a -errno value. If @res was a -errno value then it will be - * returned, but other errors may occur too. - */ -int skcipher_walk_done(struct skcipher_walk *walk, int res) -{ - unsigned int n = walk->nbytes; /* num bytes processed this step */ - unsigned int total = 0; /* new total remaining */ - - if (!n) - goto finish; - - if (likely(res >= 0)) { - n -= res; /* subtract num bytes *not* processed */ - total = walk->total - n; - } - - if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | - SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF)))) { - scatterwalk_advance(&walk->in, n); - } else if (walk->flags & SKCIPHER_WALK_DIFF) { - scatterwalk_done_src(&walk->in, n); - } else if (walk->flags & SKCIPHER_WALK_COPY) { - scatterwalk_advance(&walk->in, n); - scatterwalk_map(&walk->out); - memcpy(walk->out.addr, walk->page, n); - } else { /* SKCIPHER_WALK_SLOW */ - if (res > 0) { - /* - * Didn't process all bytes. Either the algorithm is - * broken, or this was the last step and it turned out - * the message wasn't evenly divisible into blocks but - * the algorithm requires it. - */ - res = -EINVAL; - total = 0; - } else - memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); - goto dst_done; - } - - scatterwalk_done_dst(&walk->out, n); -dst_done: - - if (res > 0) - res = 0; - - walk->total = total; - walk->nbytes = 0; - - if (total) { - if (walk->flags & SKCIPHER_WALK_SLEEP) - cond_resched(); - walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF); - return skcipher_walk_next(walk); - } - -finish: - /* Short-circuit for the common/fast path. */ - if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) - goto out; - - if (walk->iv != walk->oiv) - memcpy(walk->oiv, walk->iv, walk->ivsize); - if (walk->buffer != walk->page) - kfree(walk->buffer); - if (walk->page) - free_page((unsigned long)walk->page); - -out: - return res; -} -EXPORT_SYMBOL_GPL(skcipher_walk_done); - -static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) -{ - unsigned alignmask = walk->alignmask; - unsigned n; - void *buffer; - - if (!walk->buffer) - walk->buffer = walk->page; - buffer = walk->buffer; - if (!buffer) { - /* Min size for a buffer of bsize bytes aligned to alignmask */ - n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); - - buffer = kzalloc(n, skcipher_walk_gfp(walk)); - if (!buffer) - return skcipher_walk_done(walk, -ENOMEM); - walk->buffer = buffer; - } - - buffer = PTR_ALIGN(buffer, alignmask + 1); - memcpy_from_scatterwalk(buffer, &walk->in, bsize); - walk->out.__addr = buffer; - walk->in.__addr = walk->out.addr; - - walk->nbytes = bsize; - walk->flags |= SKCIPHER_WALK_SLOW; - - return 0; -} - -static int skcipher_next_copy(struct skcipher_walk *walk) -{ - void *tmp = walk->page; - - scatterwalk_map(&walk->in); - memcpy(tmp, walk->in.addr, walk->nbytes); - scatterwalk_unmap(&walk->in); - /* - * walk->in is advanced later when the number of bytes actually - * processed (which might be less than walk->nbytes) is known. - */ - - walk->in.__addr = tmp; - walk->out.__addr = tmp; - return 0; -} - -static int skcipher_next_fast(struct skcipher_walk *walk) -{ - unsigned long diff; - - diff = offset_in_page(walk->in.offset) - - offset_in_page(walk->out.offset); - diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - - (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); - - scatterwalk_map(&walk->out); - walk->in.__addr = walk->out.__addr; - - if (diff) { - walk->flags |= SKCIPHER_WALK_DIFF; - scatterwalk_map(&walk->in); - } - - return 0; -} - -static int skcipher_walk_next(struct skcipher_walk *walk) -{ - unsigned int bsize; - unsigned int n; - - n = walk->total; - bsize = min(walk->stride, max(n, walk->blocksize)); - n = scatterwalk_clamp(&walk->in, n); - n = scatterwalk_clamp(&walk->out, n); - - if (unlikely(n < bsize)) { - if (unlikely(walk->total < walk->blocksize)) - return skcipher_walk_done(walk, -EINVAL); - -slow_path: - return skcipher_next_slow(walk, bsize); - } - walk->nbytes = n; - - if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { - if (!walk->page) { - gfp_t gfp = skcipher_walk_gfp(walk); - - walk->page = (void *)__get_free_page(gfp); - if (!walk->page) - goto slow_path; - } - walk->flags |= SKCIPHER_WALK_COPY; - return skcipher_next_copy(walk); - } - - return skcipher_next_fast(walk); -} - -static int skcipher_copy_iv(struct skcipher_walk *walk) -{ - unsigned alignmask = walk->alignmask; - unsigned ivsize = walk->ivsize; - unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); - unsigned size; - u8 *iv; - - /* Min size for a buffer of stride + ivsize, aligned to alignmask */ - size = aligned_stride + ivsize + - (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); - - walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); - if (!walk->buffer) - return -ENOMEM; - - iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; - - walk->iv = memcpy(iv, walk->iv, walk->ivsize); - return 0; -} - -static int skcipher_walk_first(struct skcipher_walk *walk) -{ - if (WARN_ON_ONCE(in_hardirq())) - return -EDEADLK; - - walk->buffer = NULL; - if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { - int err = skcipher_copy_iv(walk); - if (err) - return err; - } - - walk->page = NULL; - - return skcipher_walk_next(walk); -} - int skcipher_walk_virt(struct skcipher_walk *__restrict walk, struct skcipher_request *__restrict req, bool atomic) { @@ -294,10 +49,8 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk, walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; - if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) - walk->flags = SKCIPHER_WALK_SLEEP; - else - walk->flags = 0; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)) + atomic = true; if (unlikely(!walk->total)) return 0; @@ -314,7 +67,7 @@ int skcipher_walk_virt(struct skcipher_walk *__restrict walk, else walk->stride = alg->walksize; - return skcipher_walk_first(walk); + return skcipher_walk_first(walk, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_virt); @@ -327,10 +80,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; - if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) - walk->flags = SKCIPHER_WALK_SLEEP; - else - walk->flags = 0; + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)) + atomic = true; if (unlikely(!walk->total)) return 0; @@ -343,7 +94,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); - return skcipher_walk_first(walk); + return skcipher_walk_first(walk, atomic); } int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, @@ -620,6 +371,7 @@ static const struct crypto_type crypto_skcipher_type = { .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), + .algsize = offsetof(struct skcipher_alg, base), }; int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index a2d23a46924e..7529139fcc96 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -9,15 +9,10 @@ */ #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sm3.h> #include <crypto/sm3_base.h> -#include <linux/bitops.h> -#include <asm/byteorder.h> -#include <linux/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, @@ -30,38 +25,28 @@ EXPORT_SYMBOL_GPL(sm3_zero_message_hash); static int crypto_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - sm3_update(shash_desc_ctx(desc), data, len); - return 0; -} - -static int crypto_sm3_final(struct shash_desc *desc, u8 *out) -{ - sm3_final(shash_desc_ctx(desc), out); - return 0; + return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic); } static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) { - struct sm3_state *sctx = shash_desc_ctx(desc); - - if (len) - sm3_update(sctx, data, len); - sm3_final(sctx, hash); - return 0; + sm3_base_do_finup(desc, data, len, sm3_block_generic); + return sm3_base_finish(desc, hash); } static struct shash_alg sm3_alg = { .digestsize = SM3_DIGEST_SIZE, .init = sm3_base_init, .update = crypto_sm3_update, - .final = crypto_sm3_final, .finup = crypto_sm3_finup, - .descsize = sizeof(struct sm3_state), + .descsize = SM3_STATE_SIZE, .base = { .cra_name = "sm3", .cra_driver_name = "sm3-generic", .cra_priority = 100, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SM3_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -77,7 +62,7 @@ static void __exit sm3_generic_mod_fini(void) crypto_unregister_shash(&sm3_alg); } -subsys_initcall(sm3_generic_mod_init); +module_init(sm3_generic_mod_init); module_exit(sm3_generic_mod_fini); MODULE_LICENSE("GPL v2"); diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c index 7df86369ac00..d57444e8428c 100644 --- a/crypto/sm4_generic.c +++ b/crypto/sm4_generic.c @@ -83,7 +83,7 @@ static void __exit sm4_fini(void) crypto_unregister_alg(&sm4_alg); } -subsys_initcall(sm4_init); +module_init(sm4_init); module_exit(sm4_fini); MODULE_DESCRIPTION("SM4 Cipher Algorithm"); diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c index dc625ffc54ad..57bbf70f4c22 100644 --- a/crypto/streebog_generic.c +++ b/crypto/streebog_generic.c @@ -13,9 +13,10 @@ */ #include <crypto/internal/hash.h> -#include <linux/module.h> -#include <linux/crypto.h> #include <crypto/streebog.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> static const struct streebog_uint512 buffer0 = { { 0, 0, 0, 0, 0, 0, 0, 0 @@ -919,17 +920,6 @@ static int streebog_init(struct shash_desc *desc) return 0; } -static void streebog_pad(struct streebog_state *ctx) -{ - if (ctx->fillsize >= STREEBOG_BLOCK_SIZE) - return; - - memset(ctx->buffer + ctx->fillsize, 0, - sizeof(ctx->buffer) - ctx->fillsize); - - ctx->buffer[ctx->fillsize] = 1; -} - static void streebog_add512(const struct streebog_uint512 *x, const struct streebog_uint512 *y, struct streebog_uint512 *r) @@ -984,16 +974,23 @@ static void streebog_stage2(struct streebog_state *ctx, const u8 *data) streebog_add512(&ctx->Sigma, &m, &ctx->Sigma); } -static void streebog_stage3(struct streebog_state *ctx) +static void streebog_stage3(struct streebog_state *ctx, const u8 *src, + unsigned int len) { struct streebog_uint512 buf = { { 0 } }; + union { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 m; + } u = {}; - buf.qword[0] = cpu_to_le64(ctx->fillsize << 3); - streebog_pad(ctx); + buf.qword[0] = cpu_to_le64(len << 3); + memcpy(u.buffer, src, len); + u.buffer[len] = 1; - streebog_g(&ctx->h, &ctx->N, &ctx->m); + streebog_g(&ctx->h, &ctx->N, &u.m); streebog_add512(&ctx->N, &buf, &ctx->N); - streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma); + streebog_add512(&ctx->Sigma, &u.m, &ctx->Sigma); + memzero_explicit(&u, sizeof(u)); streebog_g(&ctx->h, &buffer0, &ctx->N); streebog_g(&ctx->h, &buffer0, &ctx->Sigma); memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512)); @@ -1003,42 +1000,22 @@ static int streebog_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct streebog_state *ctx = shash_desc_ctx(desc); - size_t chunksize; - if (ctx->fillsize) { - chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize; - if (chunksize > len) - chunksize = len; - memcpy(&ctx->buffer[ctx->fillsize], data, chunksize); - ctx->fillsize += chunksize; - len -= chunksize; - data += chunksize; - - if (ctx->fillsize == STREEBOG_BLOCK_SIZE) { - streebog_stage2(ctx, ctx->buffer); - ctx->fillsize = 0; - } - } - - while (len >= STREEBOG_BLOCK_SIZE) { + do { streebog_stage2(ctx, data); data += STREEBOG_BLOCK_SIZE; len -= STREEBOG_BLOCK_SIZE; - } + } while (len >= STREEBOG_BLOCK_SIZE); - if (len) { - memcpy(&ctx->buffer, data, len); - ctx->fillsize = len; - } - return 0; + return len; } -static int streebog_final(struct shash_desc *desc, u8 *digest) +static int streebog_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *digest) { struct streebog_state *ctx = shash_desc_ctx(desc); - streebog_stage3(ctx); - ctx->fillsize = 0; + streebog_stage3(ctx, src, len); if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE) memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE); else @@ -1050,11 +1027,12 @@ static struct shash_alg algs[2] = { { .digestsize = STREEBOG256_DIGEST_SIZE, .init = streebog_init, .update = streebog_update, - .final = streebog_final, + .finup = streebog_finup, .descsize = sizeof(struct streebog_state), .base = { .cra_name = "streebog256", .cra_driver_name = "streebog256-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = STREEBOG_BLOCK_SIZE, .cra_module = THIS_MODULE, }, @@ -1062,11 +1040,12 @@ static struct shash_alg algs[2] = { { .digestsize = STREEBOG512_DIGEST_SIZE, .init = streebog_init, .update = streebog_update, - .final = streebog_final, + .finup = streebog_finup, .descsize = sizeof(struct streebog_state), .base = { .cra_name = "streebog512", .cra_driver_name = "streebog512-generic", + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = STREEBOG_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1082,7 +1061,7 @@ static void __exit streebog_mod_fini(void) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } -subsys_initcall(streebog_mod_init); +module_init(streebog_mod_init); module_exit(streebog_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 96f4a66be14c..d1d88debbd71 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Quick & dirty crypto testing module. + * Quick & dirty crypto benchmarking module. * - * This will only exist until we have a better testing mechanism + * This will only exist until we have a better benchmarking mechanism * (e.g. a char device). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> @@ -39,7 +39,7 @@ #include "tcrypt.h" /* - * Need slab memory for testing (size in number of pages). + * Need slab memory for benchmarking (size in number of pages). */ #define TVMEMSIZE 4 @@ -716,207 +716,6 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) return crypto_wait_req(ret, wait); } -struct test_mb_ahash_data { - struct scatterlist sg[XBUFSIZE]; - char result[64]; - struct ahash_request *req; - struct crypto_wait wait; - char *xbuf[XBUFSIZE]; -}; - -static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb, - int *rc) -{ - int i, err; - - /* Fire up a bunch of concurrent requests */ - err = crypto_ahash_digest(data[0].req); - - /* Wait for all requests to finish */ - err = crypto_wait_req(err, &data[0].wait); - if (num_mb < 2) - return err; - - for (i = 0; i < num_mb; i++) { - rc[i] = ahash_request_err(data[i].req); - if (rc[i]) { - pr_info("concurrent request %d error %d\n", i, rc[i]); - err = rc[i]; - } - } - - return err; -} - -static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, - int secs, u32 num_mb) -{ - unsigned long start, end; - int bcount; - int ret = 0; - int *rc; - - rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - for (start = jiffies, end = start + secs * HZ, bcount = 0; - time_before(jiffies, end); bcount++) { - ret = do_mult_ahash_op(data, num_mb, rc); - if (ret) - goto out; - } - - pr_cont("%d operations in %d seconds (%llu bytes)\n", - bcount * num_mb, secs, (u64)bcount * blen * num_mb); - -out: - kfree(rc); - return ret; -} - -static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen, - u32 num_mb) -{ - unsigned long cycles = 0; - int ret = 0; - int i; - int *rc; - - rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - /* Warm-up run. */ - for (i = 0; i < 4; i++) { - ret = do_mult_ahash_op(data, num_mb, rc); - if (ret) - goto out; - } - - /* The real thing. */ - for (i = 0; i < 8; i++) { - cycles_t start, end; - - start = get_cycles(); - ret = do_mult_ahash_op(data, num_mb, rc); - end = get_cycles(); - - if (ret) - goto out; - - cycles += end - start; - } - - pr_cont("1 operation in %lu cycles (%d bytes)\n", - (cycles + 4) / (8 * num_mb), blen); - -out: - kfree(rc); - return ret; -} - -static void test_mb_ahash_speed(const char *algo, unsigned int secs, - struct hash_speed *speed, u32 num_mb) -{ - struct test_mb_ahash_data *data; - struct crypto_ahash *tfm; - unsigned int i, j, k; - int ret; - - data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); - if (!data) - return; - - tfm = crypto_alloc_ahash(algo, 0, 0); - if (IS_ERR(tfm)) { - pr_err("failed to load transform for %s: %ld\n", - algo, PTR_ERR(tfm)); - goto free_data; - } - - for (i = 0; i < num_mb; ++i) { - if (testmgr_alloc_buf(data[i].xbuf)) - goto out; - - crypto_init_wait(&data[i].wait); - - data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!data[i].req) { - pr_err("alg: hash: Failed to allocate request for %s\n", - algo); - goto out; - } - - - if (i) { - ahash_request_set_callback(data[i].req, 0, NULL, NULL); - ahash_request_chain(data[i].req, data[0].req); - } else - ahash_request_set_callback(data[0].req, 0, - crypto_req_done, - &data[0].wait); - - sg_init_table(data[i].sg, XBUFSIZE); - for (j = 0; j < XBUFSIZE; j++) { - sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE); - memset(data[i].xbuf[j], 0xff, PAGE_SIZE); - } - } - - pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, - get_driver_name(crypto_ahash, tfm)); - - for (i = 0; speed[i].blen != 0; i++) { - /* For some reason this only tests digests. */ - if (speed[i].blen != speed[i].plen) - continue; - - if (speed[i].blen > XBUFSIZE * PAGE_SIZE) { - pr_err("template (%u) too big for tvmem (%lu)\n", - speed[i].blen, XBUFSIZE * PAGE_SIZE); - goto out; - } - - if (klen) - crypto_ahash_setkey(tfm, tvmem[0], klen); - - for (k = 0; k < num_mb; k++) - ahash_request_set_crypt(data[k].req, data[k].sg, - data[k].result, speed[i].blen); - - pr_info("test%3u " - "(%5u byte blocks,%5u bytes per update,%4u updates): ", - i, speed[i].blen, speed[i].plen, - speed[i].blen / speed[i].plen); - - if (secs) { - ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, - num_mb); - cond_resched(); - } else { - ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); - } - - - if (ret) { - pr_err("At least one hashing failed ret=%d\n", ret); - break; - } - } - -out: - ahash_request_free(data[0].req); - - for (k = 0; k < num_mb; ++k) - testmgr_free_buf(data[k].xbuf); - - crypto_free_ahash(tfm); - -free_data: - kfree(data); -} - static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, char *out, int secs) { @@ -2584,36 +2383,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("sm3", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; - case 450: - test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 451: - test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 452: - test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 453: - test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, - num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 454: - test_mb_ahash_speed("streebog256", sec, - generic_hash_speed_template, num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; - case 455: - test_mb_ahash_speed("streebog512", sec, - generic_hash_speed_template, num_mb); - if (mode > 400 && mode < 500) break; - fallthrough; case 499: break; @@ -3099,5 +2868,5 @@ module_param(klen, uint, 0); MODULE_PARM_DESC(klen, "Key length (defaults to 0)"); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Quick & dirty crypto testing module"); +MODULE_DESCRIPTION("Quick & dirty crypto benchmarking module"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 96c843a24607..7f938ac93e58 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Quick & dirty crypto testing module. + * Quick & dirty crypto benchmarking module. * - * This will only exist until we have a better testing mechanism + * This will only exist until we have a better benchmarking mechanism * (e.g. a char device). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> diff --git a/crypto/tea.c b/crypto/tea.c index b315da8c89eb..cb05140e3470 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -255,7 +255,7 @@ MODULE_ALIAS_CRYPTO("tea"); MODULE_ALIAS_CRYPTO("xtea"); MODULE_ALIAS_CRYPTO("xeta"); -subsys_initcall(tea_mod_init); +module_init(tea_mod_init); module_exit(tea_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 82977ea25db3..72005074a5c2 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -43,22 +43,17 @@ MODULE_IMPORT_NS("CRYPTO_INTERNAL"); static bool notests; module_param(notests, bool, 0644); -MODULE_PARM_DESC(notests, "disable crypto self-tests"); +MODULE_PARM_DESC(notests, "disable all crypto self-tests"); -static bool panic_on_fail; -module_param(panic_on_fail, bool, 0444); - -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS -static bool noextratests; -module_param(noextratests, bool, 0644); -MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests"); +static bool noslowtests; +module_param(noslowtests, bool, 0644); +MODULE_PARM_DESC(noslowtests, "disable slow crypto self-tests"); static unsigned int fuzz_iterations = 100; module_param(fuzz_iterations, uint, 0644); MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations"); -#endif -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#ifndef CONFIG_CRYPTO_SELFTESTS /* a perfect nop */ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) @@ -324,10 +319,9 @@ struct testvec_config { /* * The following are the lists of testvec_configs to test for each algorithm - * type when the basic crypto self-tests are enabled, i.e. when - * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test - * coverage, while keeping the test time much shorter than the full fuzz tests - * so that the basic tests can be enabled in a wider range of circumstances. + * type when the fast crypto self-tests are enabled. They aim to provide good + * test coverage, while keeping the test time much shorter than the full tests + * so that the fast tests can be used to fulfill FIPS 140 testing requirements. */ /* Configs for skciphers and aeads */ @@ -876,8 +870,6 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize, err; \ }) -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - /* * The fuzz tests use prandom instead of the normal Linux RNG since they don't * need cryptographically secure random numbers. This greatly improves the @@ -1242,15 +1234,6 @@ too_long: algname); return -ENAMETOOLONG; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static void crypto_disable_simd_for_test(void) -{ -} - -static void crypto_reenable_simd_for_test(void) -{ -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int build_hash_sglist(struct test_sglist *tsgl, const struct hash_testvec *vec, @@ -1691,8 +1674,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; @@ -1709,17 +1691,15 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS /* * Generate a hash test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated. */ static void generate_random_hash_testvec(struct rnd_state *rng, - struct shash_desc *desc, + struct ahash_request *req, struct hash_testvec *vec, unsigned int maxkeysize, unsigned int maxdatasize, @@ -1741,16 +1721,17 @@ static void generate_random_hash_testvec(struct rnd_state *rng, vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize); generate_random_bytes(rng, (u8 *)vec->key, vec->ksize); - vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, - vec->ksize); + vec->setkey_error = crypto_ahash_setkey( + crypto_ahash_reqtfm(req), vec->key, vec->ksize); /* If the key couldn't be set, no need to continue to digest. */ if (vec->setkey_error) goto done; } /* Digest */ - vec->digest_error = crypto_shash_digest(desc, vec->plaintext, - vec->psize, (u8 *)vec->digest); + vec->digest_error = crypto_hash_digest( + crypto_ahash_reqtfm(req), vec->plaintext, + vec->psize, (u8 *)vec->digest); done: snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"", vec->psize, vec->ksize); @@ -1775,8 +1756,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver, const char *driver = crypto_ahash_driver_name(tfm); struct rnd_state rng; char _generic_driver[CRYPTO_MAX_ALG_NAME]; - struct crypto_shash *generic_tfm = NULL; - struct shash_desc *generic_desc = NULL; + struct ahash_request *generic_req = NULL; + struct crypto_ahash *generic_tfm = NULL; unsigned int i; struct hash_testvec vec = { 0 }; char vec_name[64]; @@ -1784,7 +1765,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, char cfgname[TESTVEC_CONFIG_NAMELEN]; int err; - if (noextratests) + if (noslowtests) return 0; init_rnd_state(&rng); @@ -1799,7 +1780,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */ return 0; - generic_tfm = crypto_alloc_shash(generic_driver, 0, 0); + generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0); if (IS_ERR(generic_tfm)) { err = PTR_ERR(generic_tfm); if (err == -ENOENT) { @@ -1818,27 +1799,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver, goto out; } - generic_desc = kzalloc(sizeof(*desc) + - crypto_shash_descsize(generic_tfm), GFP_KERNEL); - if (!generic_desc) { + generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL); + if (!generic_req) { err = -ENOMEM; goto out; } - generic_desc->tfm = generic_tfm; /* Check the algorithm properties for consistency. */ - if (digestsize != crypto_shash_digestsize(generic_tfm)) { + if (digestsize != crypto_ahash_digestsize(generic_tfm)) { pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n", driver, digestsize, - crypto_shash_digestsize(generic_tfm)); + crypto_ahash_digestsize(generic_tfm)); err = -EINVAL; goto out; } - if (blocksize != crypto_shash_blocksize(generic_tfm)) { + if (blocksize != crypto_ahash_blocksize(generic_tfm)) { pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n", - driver, blocksize, crypto_shash_blocksize(generic_tfm)); + driver, blocksize, crypto_ahash_blocksize(generic_tfm)); err = -EINVAL; goto out; } @@ -1857,7 +1836,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, } for (i = 0; i < fuzz_iterations * 8; i++) { - generate_random_hash_testvec(&rng, generic_desc, &vec, + generate_random_hash_testvec(&rng, generic_req, &vec, maxkeysize, maxdatasize, vec_name, sizeof(vec_name)); generate_random_testvec_config(&rng, cfg, cfgname, @@ -1875,21 +1854,10 @@ out: kfree(vec.key); kfree(vec.plaintext); kfree(vec.digest); - crypto_free_shash(generic_tfm); - kfree_sensitive(generic_desc); + ahash_request_free(generic_req); + crypto_free_ahash(generic_tfm); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_hash_vs_generic_impl(const char *generic_driver, - unsigned int maxkeysize, - struct ahash_request *req, - struct shash_desc *desc, - struct test_sglist *tsgl, - u8 *hashstate) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int alloc_shash(const char *driver, u32 type, u32 mask, struct crypto_shash **tfm_ret, @@ -1900,7 +1868,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask, tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { - if (PTR_ERR(tfm) == -ENOENT) { + if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) { /* * This algorithm is only available through the ahash * API, not the shash API, so skip the shash tests. @@ -2263,8 +2231,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; @@ -2281,13 +2248,10 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - -struct aead_extra_tests_ctx { +struct aead_slow_tests_ctx { struct rnd_state rng; struct aead_request *req; struct crypto_aead *tfm; @@ -2462,8 +2426,7 @@ static void generate_random_aead_testvec(struct rnd_state *rng, vec->alen, vec->plen, authsize, vec->klen, vec->novrfy); } -static void try_to_generate_inauthentic_testvec( - struct aead_extra_tests_ctx *ctx) +static void try_to_generate_inauthentic_testvec(struct aead_slow_tests_ctx *ctx) { int i; @@ -2482,7 +2445,7 @@ static void try_to_generate_inauthentic_testvec( * Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the * result of an encryption with the key) and verify that decryption fails. */ -static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) +static int test_aead_inauthentic_inputs(struct aead_slow_tests_ctx *ctx) { unsigned int i; int err; @@ -2517,7 +2480,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx) * Test the AEAD algorithm against the corresponding generic implementation, if * one is available. */ -static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx) +static int test_aead_vs_generic_impl(struct aead_slow_tests_ctx *ctx) { struct crypto_aead *tfm = ctx->tfm; const char *algname = crypto_aead_alg(tfm)->base.cra_name; @@ -2621,15 +2584,15 @@ out: return err; } -static int test_aead_extra(const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) +static int test_aead_slow(const struct alg_test_desc *test_desc, + struct aead_request *req, + struct cipher_test_sglists *tsgls) { - struct aead_extra_tests_ctx *ctx; + struct aead_slow_tests_ctx *ctx; unsigned int i; int err; - if (noextratests) + if (noslowtests) return 0; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -2671,14 +2634,6 @@ out: kfree(ctx); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_aead_extra(const struct alg_test_desc *test_desc, - struct aead_request *req, - struct cipher_test_sglists *tsgls) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int test_aead(int enc, const struct aead_test_suite *suite, struct aead_request *req, @@ -2744,7 +2699,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, if (err) goto out; - err = test_aead_extra(desc, req, tsgls); + err = test_aead_slow(desc, req, tsgls); out: free_cipher_test_sglists(tsgls); aead_request_free(req); @@ -3018,8 +2973,7 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec, return err; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - if (!noextratests) { + if (!noslowtests) { struct rnd_state rng; struct testvec_config cfg; char cfgname[TESTVEC_CONFIG_NAMELEN]; @@ -3036,11 +2990,9 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec, cond_resched(); } } -#endif return 0; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS /* * Generate a symmetric cipher test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated. @@ -3123,7 +3075,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver, char cfgname[TESTVEC_CONFIG_NAMELEN]; int err; - if (noextratests) + if (noslowtests) return 0; init_rnd_state(&rng); @@ -3239,14 +3191,6 @@ out: skcipher_request_free(generic_req); return err; } -#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ -static int test_skcipher_vs_generic_impl(const char *generic_driver, - struct skcipher_request *req, - struct cipher_test_sglists *tsgls) -{ - return 0; -} -#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int test_skcipher(int enc, const struct cipher_test_suite *suite, struct skcipher_request *req, @@ -5407,12 +5351,6 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "poly1305", - .test = alg_test_hash, - .suite = { - .hash = __VECS(poly1305_tv_template) - } - }, { .alg = "polyval", .test = alg_test_hash, .suite = { @@ -5769,9 +5707,8 @@ static void testmgr_onetime_init(void) alg_check_test_descs_order(); alg_check_testvec_configs(); -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n"); -#endif + if (!noslowtests) + pr_warn("alg: full crypto tests enabled. This is intended for developer use only.\n"); } static int alg_find_test(const char *alg) @@ -5860,11 +5797,10 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) test_done: if (rc) { - if (fips_enabled || panic_on_fail) { + if (fips_enabled) { fips_fail_notify(); - panic("alg: self-tests for %s (%s) failed in %s mode!\n", - driver, alg, - fips_enabled ? "fips" : "panic_on_fail"); + panic("alg: self-tests for %s (%s) failed in fips mode!\n", + driver, alg); } pr_warn("alg: self-tests for %s using %s failed (rc=%d)", alg, driver, rc); @@ -5909,6 +5845,6 @@ non_fips_alg: return alg_fips_disabled(driver, alg); } -#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */ +#endif /* CONFIG_CRYPTO_SELFTESTS */ EXPORT_SYMBOL_GPL(alg_test); diff --git a/crypto/testmgr.h b/crypto/testmgr.h index afc10af59b0a..32d099ac9e73 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -8836,294 +8836,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = { }, }; -/* - * Poly1305 test vectors from RFC7539 A.3. - */ - -static const struct hash_testvec poly1305_tv_template[] = { - { /* Test Vector #1 */ - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 96, - .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #2 */ - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e" - "\x41\x6e\x79\x20\x73\x75\x62\x6d" - "\x69\x73\x73\x69\x6f\x6e\x20\x74" - "\x6f\x20\x74\x68\x65\x20\x49\x45" - "\x54\x46\x20\x69\x6e\x74\x65\x6e" - "\x64\x65\x64\x20\x62\x79\x20\x74" - "\x68\x65\x20\x43\x6f\x6e\x74\x72" - "\x69\x62\x75\x74\x6f\x72\x20\x66" - "\x6f\x72\x20\x70\x75\x62\x6c\x69" - "\x63\x61\x74\x69\x6f\x6e\x20\x61" - "\x73\x20\x61\x6c\x6c\x20\x6f\x72" - "\x20\x70\x61\x72\x74\x20\x6f\x66" - "\x20\x61\x6e\x20\x49\x45\x54\x46" - "\x20\x49\x6e\x74\x65\x72\x6e\x65" - "\x74\x2d\x44\x72\x61\x66\x74\x20" - "\x6f\x72\x20\x52\x46\x43\x20\x61" - "\x6e\x64\x20\x61\x6e\x79\x20\x73" - "\x74\x61\x74\x65\x6d\x65\x6e\x74" - "\x20\x6d\x61\x64\x65\x20\x77\x69" - "\x74\x68\x69\x6e\x20\x74\x68\x65" - "\x20\x63\x6f\x6e\x74\x65\x78\x74" - "\x20\x6f\x66\x20\x61\x6e\x20\x49" - "\x45\x54\x46\x20\x61\x63\x74\x69" - "\x76\x69\x74\x79\x20\x69\x73\x20" - "\x63\x6f\x6e\x73\x69\x64\x65\x72" - "\x65\x64\x20\x61\x6e\x20\x22\x49" - "\x45\x54\x46\x20\x43\x6f\x6e\x74" - "\x72\x69\x62\x75\x74\x69\x6f\x6e" - "\x22\x2e\x20\x53\x75\x63\x68\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x63\x6c\x75" - "\x64\x65\x20\x6f\x72\x61\x6c\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x20\x49\x45" - "\x54\x46\x20\x73\x65\x73\x73\x69" - "\x6f\x6e\x73\x2c\x20\x61\x73\x20" - "\x77\x65\x6c\x6c\x20\x61\x73\x20" - "\x77\x72\x69\x74\x74\x65\x6e\x20" - "\x61\x6e\x64\x20\x65\x6c\x65\x63" - "\x74\x72\x6f\x6e\x69\x63\x20\x63" - "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" - "\x74\x69\x6f\x6e\x73\x20\x6d\x61" - "\x64\x65\x20\x61\x74\x20\x61\x6e" - "\x79\x20\x74\x69\x6d\x65\x20\x6f" - "\x72\x20\x70\x6c\x61\x63\x65\x2c" - "\x20\x77\x68\x69\x63\x68\x20\x61" - "\x72\x65\x20\x61\x64\x64\x72\x65" - "\x73\x73\x65\x64\x20\x74\x6f", - .psize = 407, - .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e", - }, { /* Test Vector #3 */ - .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70" - "\xf0\xef\xca\x96\x22\x7a\x86\x3e" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x41\x6e\x79\x20\x73\x75\x62\x6d" - "\x69\x73\x73\x69\x6f\x6e\x20\x74" - "\x6f\x20\x74\x68\x65\x20\x49\x45" - "\x54\x46\x20\x69\x6e\x74\x65\x6e" - "\x64\x65\x64\x20\x62\x79\x20\x74" - "\x68\x65\x20\x43\x6f\x6e\x74\x72" - "\x69\x62\x75\x74\x6f\x72\x20\x66" - "\x6f\x72\x20\x70\x75\x62\x6c\x69" - "\x63\x61\x74\x69\x6f\x6e\x20\x61" - "\x73\x20\x61\x6c\x6c\x20\x6f\x72" - "\x20\x70\x61\x72\x74\x20\x6f\x66" - "\x20\x61\x6e\x20\x49\x45\x54\x46" - "\x20\x49\x6e\x74\x65\x72\x6e\x65" - "\x74\x2d\x44\x72\x61\x66\x74\x20" - "\x6f\x72\x20\x52\x46\x43\x20\x61" - "\x6e\x64\x20\x61\x6e\x79\x20\x73" - "\x74\x61\x74\x65\x6d\x65\x6e\x74" - "\x20\x6d\x61\x64\x65\x20\x77\x69" - "\x74\x68\x69\x6e\x20\x74\x68\x65" - "\x20\x63\x6f\x6e\x74\x65\x78\x74" - "\x20\x6f\x66\x20\x61\x6e\x20\x49" - "\x45\x54\x46\x20\x61\x63\x74\x69" - "\x76\x69\x74\x79\x20\x69\x73\x20" - "\x63\x6f\x6e\x73\x69\x64\x65\x72" - "\x65\x64\x20\x61\x6e\x20\x22\x49" - "\x45\x54\x46\x20\x43\x6f\x6e\x74" - "\x72\x69\x62\x75\x74\x69\x6f\x6e" - "\x22\x2e\x20\x53\x75\x63\x68\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x63\x6c\x75" - "\x64\x65\x20\x6f\x72\x61\x6c\x20" - "\x73\x74\x61\x74\x65\x6d\x65\x6e" - "\x74\x73\x20\x69\x6e\x20\x49\x45" - "\x54\x46\x20\x73\x65\x73\x73\x69" - "\x6f\x6e\x73\x2c\x20\x61\x73\x20" - "\x77\x65\x6c\x6c\x20\x61\x73\x20" - "\x77\x72\x69\x74\x74\x65\x6e\x20" - "\x61\x6e\x64\x20\x65\x6c\x65\x63" - "\x74\x72\x6f\x6e\x69\x63\x20\x63" - "\x6f\x6d\x6d\x75\x6e\x69\x63\x61" - "\x74\x69\x6f\x6e\x73\x20\x6d\x61" - "\x64\x65\x20\x61\x74\x20\x61\x6e" - "\x79\x20\x74\x69\x6d\x65\x20\x6f" - "\x72\x20\x70\x6c\x61\x63\x65\x2c" - "\x20\x77\x68\x69\x63\x68\x20\x61" - "\x72\x65\x20\x61\x64\x64\x72\x65" - "\x73\x73\x65\x64\x20\x74\x6f", - .psize = 407, - .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf" - "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0", - }, { /* Test Vector #4 */ - .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" - "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" - "\x47\x39\x17\xc1\x40\x2b\x80\x09" - "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" - "\x27\x54\x77\x61\x73\x20\x62\x72" - "\x69\x6c\x6c\x69\x67\x2c\x20\x61" - "\x6e\x64\x20\x74\x68\x65\x20\x73" - "\x6c\x69\x74\x68\x79\x20\x74\x6f" - "\x76\x65\x73\x0a\x44\x69\x64\x20" - "\x67\x79\x72\x65\x20\x61\x6e\x64" - "\x20\x67\x69\x6d\x62\x6c\x65\x20" - "\x69\x6e\x20\x74\x68\x65\x20\x77" - "\x61\x62\x65\x3a\x0a\x41\x6c\x6c" - "\x20\x6d\x69\x6d\x73\x79\x20\x77" - "\x65\x72\x65\x20\x74\x68\x65\x20" - "\x62\x6f\x72\x6f\x67\x6f\x76\x65" - "\x73\x2c\x0a\x41\x6e\x64\x20\x74" - "\x68\x65\x20\x6d\x6f\x6d\x65\x20" - "\x72\x61\x74\x68\x73\x20\x6f\x75" - "\x74\x67\x72\x61\x62\x65\x2e", - .psize = 159, - .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61" - "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62", - }, { /* Test Vector #5 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .psize = 48, - .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #6 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 48, - .digest = "\x03\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #7 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xf0\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\x11\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 80, - .digest = "\x05\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #8 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe" - "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .psize = 80, - .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #9 */ - .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xfd\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .psize = 48, - .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - }, { /* Test Vector #10 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x04\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x33\x94\xd7\x50\x5e\x43\x79\xcd" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 96, - .digest = "\x14\x00\x00\x00\x00\x00\x00\x00" - "\x55\x00\x00\x00\x00\x00\x00\x00", - }, { /* Test Vector #11 */ - .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x04\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\xe3\x35\x94\xd7\x50\x5e\x43\xb9" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x33\x94\xd7\x50\x5e\x43\x79\xcd" - "\x01\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 80, - .digest = "\x13\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - }, { /* Regression test for overflow in AVX2 implementation */ - .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff", - .psize = 300, - .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8" - "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1", - } -}; - /* NHPoly1305 test vectors from https://github.com/google/adiantum */ static const struct hash_testvec nhpoly1305_tv_template[] = { { diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index 19f2b365e140..368018cfa9bf 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -187,7 +187,7 @@ static void __exit twofish_mod_fini(void) crypto_unregister_alg(&alg); } -subsys_initcall(twofish_mod_init); +module_init(twofish_mod_init); module_exit(twofish_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/wp512.c b/crypto/wp512.c index 07994e5ebf4e..41f13d490333 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -1169,7 +1169,7 @@ MODULE_ALIAS_CRYPTO("wp512"); MODULE_ALIAS_CRYPTO("wp384"); MODULE_ALIAS_CRYPTO("wp256"); -subsys_initcall(wp512_mod_init); +module_init(wp512_mod_init); module_exit(wp512_mod_fini); MODULE_LICENSE("GPL"); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index fc785667b134..6c5f6766fdd6 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -8,9 +8,12 @@ #include <crypto/internal/cipher.h> #include <crypto/internal/hash.h> +#include <crypto/utils.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x02020202, 0x02020202, @@ -30,22 +33,6 @@ struct xcbc_tfm_ctx { u8 consts[]; }; -/* - * +------------------------ - * | <shash desc> - * +------------------------ - * | xcbc_desc_ctx - * +------------------------ - * | odds (block size) - * +------------------------ - * | prev (block size) - * +------------------------ - */ -struct xcbc_desc_ctx { - unsigned int len; - u8 odds[]; -}; - #define XCBC_BLOCKSIZE 16 static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, @@ -70,13 +57,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = &ctx->odds[bs]; + u8 *prev = shash_desc_ctx(pdesc); - ctx->len = 0; memset(prev, 0, bs); - return 0; } @@ -85,77 +69,36 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, { struct crypto_shash *parent = pdesc->tfm; struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; - - /* checking the data can fill the block */ - if ((ctx->len + len) <= bs) { - memcpy(odds + ctx->len, p, len); - ctx->len += len; - return 0; - } - - /* filling odds with new data and encrypting it */ - memcpy(odds + ctx->len, p, bs - ctx->len); - len -= bs - ctx->len; - p += bs - ctx->len; - - crypto_xor(prev, odds, bs); - crypto_cipher_encrypt_one(tfm, prev, prev); + u8 *prev = shash_desc_ctx(pdesc); - /* clearing the length */ - ctx->len = 0; - - /* encrypting the rest of data */ - while (len > bs) { + do { crypto_xor(prev, p, bs); crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; - } - - /* keeping the surplus of blocksize */ - if (len) { - memcpy(odds, p, len); - ctx->len = len; - } - - return 0; + } while (len >= bs); + return len; } -static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) +static int crypto_xcbc_digest_finup(struct shash_desc *pdesc, const u8 *src, + unsigned int len, u8 *out) { struct crypto_shash *parent = pdesc->tfm; struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); - struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = ctx->odds; - u8 *prev = odds + bs; + u8 *prev = shash_desc_ctx(pdesc); unsigned int offset = 0; - if (ctx->len != bs) { - unsigned int rlen; - u8 *p = odds + ctx->len; - - *p = 0x80; - p++; - - rlen = bs - ctx->len -1; - if (rlen) - memset(p, 0, rlen); - + crypto_xor(prev, src, len); + if (len != bs) { + prev[len] ^= 0x80; offset += bs; } - - crypto_xor(prev, odds, bs); crypto_xor(prev, &tctx->consts[offset], bs); - crypto_cipher_encrypt_one(tfm, out, prev); - return 0; } @@ -216,17 +159,18 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) + alg->cra_blocksize * 2; + inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = sizeof(struct xcbc_desc_ctx) + - alg->cra_blocksize * 2; + inst->alg.descsize = alg->cra_blocksize; inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; inst->alg.init = crypto_xcbc_digest_init; inst->alg.update = crypto_xcbc_digest_update; - inst->alg.final = crypto_xcbc_digest_final; + inst->alg.finup = crypto_xcbc_digest_finup; inst->alg.setkey = crypto_xcbc_digest_setkey; inst->free = shash_free_singlespawn_instance; @@ -255,7 +199,7 @@ static void __exit crypto_xcbc_module_exit(void) crypto_unregister_template(&crypto_xcbc_tmpl); } -subsys_initcall(crypto_xcbc_module_init); +module_init(crypto_xcbc_module_init); module_exit(crypto_xcbc_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/xctr.c b/crypto/xctr.c index 9c536ab6d2e5..607ab82cb19b 100644 --- a/crypto/xctr.c +++ b/crypto/xctr.c @@ -182,7 +182,7 @@ static void __exit crypto_xctr_module_exit(void) crypto_unregister_template(&crypto_xctr_tmpl); } -subsys_initcall(crypto_xctr_module_init); +module_init(crypto_xctr_module_init); module_exit(crypto_xctr_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/xts.c b/crypto/xts.c index 31529c9ef08f..3da8f5e053d6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); - if (err == -ENOENT) { + if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) /* Alas we screwed up the naming so we have to mangle the * cipher name. */ - if (!strncmp(cipher_name, "ecb(", 4)) { + if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(name, cipher_name + 4, sizeof(name)); @@ -466,7 +466,7 @@ static void __exit xts_module_exit(void) crypto_unregister_template(&xts_tmpl); } -subsys_initcall(xts_module_init); +module_init(xts_module_init); module_exit(xts_module_exit); MODULE_LICENSE("GPL"); diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c index ac206ad4184d..175bb7ae0fcd 100644 --- a/crypto/xxhash_generic.c +++ b/crypto/xxhash_generic.c @@ -96,7 +96,7 @@ static void __exit xxhash_mod_fini(void) crypto_unregister_shash(&alg); } -subsys_initcall(xxhash_mod_init); +module_init(xxhash_mod_init); module_exit(xxhash_mod_fini); MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); diff --git a/crypto/zstd.c b/crypto/zstd.c index 90bb4f36f846..7570e11b4ee6 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -196,7 +196,7 @@ static void __exit zstd_mod_fini(void) crypto_unregister_scomp(&scomp); } -subsys_initcall(zstd_mod_init); +module_init(zstd_mod_init); module_exit(zstd_mod_fini); MODULE_LICENSE("GPL"); diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig index be85336107f9..1919fbb169c7 100644 --- a/drivers/accel/habanalabs/Kconfig +++ b/drivers/accel/habanalabs/Kconfig @@ -6,7 +6,7 @@ config DRM_ACCEL_HABANALABS tristate "HabanaLabs AI accelerators" depends on DRM_ACCEL - depends on X86_64 + depends on X86 && X86_64 depends on PCI && HAS_IOMEM select GENERIC_ALLOCATOR select HWMON diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c index 8729a0c57d78..dc80ca921d90 100644 --- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c @@ -17,8 +17,6 @@ #include <linux/uaccess.h> #include <linux/vmalloc.h> -#include <asm/msr.h> - /* make sure there is space for all the signed info */ static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ); diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index f7fb7205028d..f6b9562779de 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -15,6 +15,7 @@ #include <acpi/ghes.h> #include <asm/cpu.h> #include <asm/mce.h> +#include <asm/msr.h> #include "apei/apei-internal.h" #include <ras/ras_event.h> @@ -234,7 +235,7 @@ static int __init extlog_init(void) u64 cap; int rc; - if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) || + if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) || !(cap & MCG_ELOG_P) || !extlog_get_l1addr()) return -ENODEV; diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index 794962c5c88e..b8d98b1b48ae 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem) return 0; } - err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter); + err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter); if (!err) { u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset + residency_info_ffh.gaddr. bit_width - 1, diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 3fde4496f8a2..6f8bbe1247a5 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -19,7 +19,7 @@ #include <linux/acpi.h> #include <linux/perf_event.h> #include <linux/platform_device.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/mwait.h> #include <xen/xen.h> diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 53996f1a2d80..64b8d1e19594 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -20,6 +20,7 @@ #include <acpi/processor.h> #ifdef CONFIG_X86 #include <asm/cpufeature.h> +#include <asm/msr.h> #endif #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 00d045e5f524..d1541a386fbc 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -18,9 +18,12 @@ #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/acpi.h> +#include <linux/uaccess.h> #include <acpi/processor.h> #include <asm/io.h> -#include <linux/uaccess.h> +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif /* ignore_tpc: * 0 -> acpi processor driver doesn't ignore _TPC values diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 94c6446604fc..98da8c4eea59 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -187,7 +187,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode, inode_lock(d_inode(root)); /* look it up */ - dentry = lookup_one_len(name, root, name_len); + dentry = lookup_noperm(&QSTR(name), root); if (IS_ERR(dentry)) { inode_unlock(d_inode(root)); ret = PTR_ERR(dentry); @@ -487,7 +487,7 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent, { struct dentry *dentry; - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) return dentry; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 50651435577c..7779ab0ca7ce 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(ghostwrite); +CPU_SHOW_VULN_FALLBACK(old_microcode); CPU_SHOW_VULN_FALLBACK(indirect_target_selection); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); @@ -617,6 +618,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); +static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { @@ -635,6 +637,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_gather_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr, &dev_attr_ghostwrite.attr, + &dev_attr_old_microcode.attr, &dev_attr_indirect_target_selection.attr, NULL }; diff --git a/drivers/base/node.c b/drivers/base/node.c index cd13ef287011..618712071a1e 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -468,7 +468,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_PAGETABLE)), nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), nid, 0UL, - nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), + nid, 0UL, nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), nid, K(sreclaimable + node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 0e60dd650b5e..70db08f3ac6f 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); void platform_device_msi_free_irqs_all(struct device *dev) { msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); + msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN); } EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index e48b24be45ee..0f70e2374e7f 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -407,4 +407,23 @@ config BLKDEV_UBLK_LEGACY_OPCODES source "drivers/block/rnbd/Kconfig" +config BLK_DEV_ZONED_LOOP + tristate "Zoned loopback device support" + depends on BLK_DEV_ZONED + help + Saying Y here will allow you to use create a zoned block device using + regular files for zones (one file per zones). This is useful to test + file systems, device mapper and applications that support zoned block + devices. To create a zoned loop device, no user utility is needed, a + zoned loop device can be created (or re-started) using a command + like: + + echo "add id=0,zone_size_mb=256,capacity_mb=16384,conv_zones=11" > \ + /dev/zloop-control + + See Documentation/admin-guide/blockdev/zoned_loop.rst for usage + details. + + If unsure, say N. + endif # BLK_DEV diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 1105a2d4fdcb..097707aca725 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -41,5 +41,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/ obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/ obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o +obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 292f127cae0a..b1be6c510372 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -54,32 +54,33 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) /* * Insert a new page for a given sector, if one does not already exist. */ -static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) +static struct page *brd_insert_page(struct brd_device *brd, sector_t sector, + blk_opf_t opf) + __releases(rcu) + __acquires(rcu) { - pgoff_t idx = sector >> PAGE_SECTORS_SHIFT; - struct page *page; - int ret = 0; - - page = brd_lookup_page(brd, sector); - if (page) - return 0; + gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO; + struct page *page, *ret; + rcu_read_unlock(); page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM); + rcu_read_lock(); if (!page) - return -ENOMEM; + return ERR_PTR(-ENOMEM); xa_lock(&brd->brd_pages); - ret = __xa_insert(&brd->brd_pages, idx, page, gfp); - if (!ret) - brd->brd_nr_pages++; - xa_unlock(&brd->brd_pages); - - if (ret < 0) { + ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL, + page, gfp); + if (ret) { + xa_unlock(&brd->brd_pages); __free_page(page); - if (ret == -EBUSY) - ret = 0; + if (xa_is_err(ret)) + return ERR_PTR(xa_err(ret)); + return ret; } - return ret; + brd->brd_nr_pages++; + xa_unlock(&brd->brd_pages); + return page; } /* @@ -100,143 +101,77 @@ static void brd_free_pages(struct brd_device *brd) } /* - * copy_to_brd_setup must be called before copy_to_brd. It may sleep. + * Process a single segment. The segment is capped to not cross page boundaries + * in both the bio and the brd backing memory. */ -static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n, - gfp_t gfp) -{ - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; - size_t copy; - int ret; - - copy = min_t(size_t, n, PAGE_SIZE - offset); - ret = brd_insert_page(brd, sector, gfp); - if (ret) - return ret; - if (copy < n) { - sector += copy >> SECTOR_SHIFT; - ret = brd_insert_page(brd, sector, gfp); - } - return ret; -} - -/* - * Copy n bytes from src to the brd starting at sector. Does not sleep. - */ -static void copy_to_brd(struct brd_device *brd, const void *src, - sector_t sector, size_t n) +static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio) { + struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter); + sector_t sector = bio->bi_iter.bi_sector; + u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT; + blk_opf_t opf = bio->bi_opf; struct page *page; - void *dst; - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; - size_t copy; + void *kaddr; - copy = min_t(size_t, n, PAGE_SIZE - offset); - page = brd_lookup_page(brd, sector); - BUG_ON(!page); - - dst = kmap_atomic(page); - memcpy(dst + offset, src, copy); - kunmap_atomic(dst); - - if (copy < n) { - src += copy; - sector += copy >> SECTOR_SHIFT; - copy = n - copy; - page = brd_lookup_page(brd, sector); - BUG_ON(!page); - - dst = kmap_atomic(page); - memcpy(dst, src, copy); - kunmap_atomic(dst); - } -} + bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); -/* - * Copy n bytes to dst from the brd starting at sector. Does not sleep. - */ -static void copy_from_brd(void *dst, struct brd_device *brd, - sector_t sector, size_t n) -{ - struct page *page; - void *src; - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; - size_t copy; - - copy = min_t(size_t, n, PAGE_SIZE - offset); + rcu_read_lock(); page = brd_lookup_page(brd, sector); - if (page) { - src = kmap_atomic(page); - memcpy(dst, src + offset, copy); - kunmap_atomic(src); - } else - memset(dst, 0, copy); - - if (copy < n) { - dst += copy; - sector += copy >> SECTOR_SHIFT; - copy = n - copy; - page = brd_lookup_page(brd, sector); - if (page) { - src = kmap_atomic(page); - memcpy(dst, src, copy); - kunmap_atomic(src); - } else - memset(dst, 0, copy); + if (!page && op_is_write(opf)) { + page = brd_insert_page(brd, sector, opf); + if (IS_ERR(page)) + goto out_error; } -} - -/* - * Process a single bvec of a bio. - */ -static int brd_do_bvec(struct brd_device *brd, struct page *page, - unsigned int len, unsigned int off, blk_opf_t opf, - sector_t sector) -{ - void *mem; - int err = 0; + kaddr = bvec_kmap_local(&bv); if (op_is_write(opf)) { - /* - * Must use NOIO because we don't want to recurse back into the - * block or filesystem layers from page reclaim. - */ - gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO; - - err = copy_to_brd_setup(brd, sector, len, gfp); - if (err) - goto out; - } - - mem = kmap_atomic(page); - if (!op_is_write(opf)) { - copy_from_brd(mem + off, brd, sector, len); - flush_dcache_page(page); + memcpy_to_page(page, offset, kaddr, bv.bv_len); } else { - flush_dcache_page(page); - copy_to_brd(brd, mem + off, sector, len); + if (page) + memcpy_from_page(kaddr, page, offset, bv.bv_len); + else + memset(kaddr, 0, bv.bv_len); } - kunmap_atomic(mem); + kunmap_local(kaddr); + rcu_read_unlock(); + + bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len); + return true; + +out_error: + rcu_read_unlock(); + if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT)) + bio_wouldblock_error(bio); + else + bio_io_error(bio); + return false; +} -out: - return err; +static void brd_free_one_page(struct rcu_head *head) +{ + struct page *page = container_of(head, struct page, rcu_head); + + __free_page(page); } static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) { - sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS; + sector_t aligned_sector = round_up(sector, PAGE_SECTORS); + sector_t aligned_end = round_down( + sector + (size >> SECTOR_SHIFT), PAGE_SECTORS); struct page *page; - size -= (aligned_sector - sector) * SECTOR_SIZE; + if (aligned_end <= aligned_sector) + return; + xa_lock(&brd->brd_pages); - while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) { + while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) { page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); if (page) { - __free_page(page); + call_rcu(&page->rcu_head, brd_free_one_page); brd->brd_nr_pages--; } aligned_sector += PAGE_SECTORS; - size -= PAGE_SIZE; } xa_unlock(&brd->brd_pages); } @@ -244,36 +179,18 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; - sector_t sector = bio->bi_iter.bi_sector; - struct bio_vec bvec; - struct bvec_iter iter; if (unlikely(op_is_discard(bio->bi_opf))) { - brd_do_discard(brd, sector, bio->bi_iter.bi_size); + brd_do_discard(brd, bio->bi_iter.bi_sector, + bio->bi_iter.bi_size); bio_endio(bio); return; } - bio_for_each_segment(bvec, bio, iter) { - unsigned int len = bvec.bv_len; - int err; - - /* Don't support un-aligned buffer */ - WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || - (len & (SECTOR_SIZE - 1))); - - err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, - bio->bi_opf, sector); - if (err) { - if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) { - bio_wouldblock_error(bio); - return; - } - bio_io_error(bio); + do { + if (!brd_rw_bvec(brd, bio)) return; - } - sector += len >> SECTOR_SHIFT; - } + } while (bio->bi_iter.bi_size); bio_endio(bio); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index b8ba7de08753..e2b1f377f585 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -979,9 +979,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, if (!file) return -EBADF; - if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter) - return -EINVAL; - error = loop_check_backing_file(file); if (error) return error; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 65b96c083b3c..d5cc7bd2875c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -725,7 +725,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * scmd = blk_mq_rq_to_pdu(rq); if (cgc->buflen) { - ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, + ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen, GFP_NOIO); if (ret) goto out; diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 2ee6e9bd4e28..2df8941a6b14 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -147,12 +147,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1, rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL); - if (bio_add_page(bio, virt_to_page(data), datalen, - offset_in_page(data)) != datalen) { - rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n"); - err = -EINVAL; - goto bio_put; - } + bio_add_virt_nofail(bio, data, datalen); bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw)); if (bio_has_data(bio) && diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index dc104c025cd5..6f51072776f1 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -50,6 +50,8 @@ /* private ioctl command mirror */ #define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC) +#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE) +#define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV) #define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF) #define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF) @@ -64,7 +66,10 @@ | UBLK_F_CMD_IOCTL_ENCODE \ | UBLK_F_USER_COPY \ | UBLK_F_ZONED \ - | UBLK_F_USER_RECOVERY_FAIL_IO) + | UBLK_F_USER_RECOVERY_FAIL_IO \ + | UBLK_F_UPDATE_SIZE \ + | UBLK_F_AUTO_BUF_REG \ + | UBLK_F_QUIESCE) #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \ | UBLK_F_USER_RECOVERY_REISSUE \ @@ -77,7 +82,11 @@ UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT) struct ublk_rq_data { - struct kref ref; + refcount_t ref; + + /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */ + u16 buf_index; + void *buf_ctx_handle; }; struct ublk_uring_cmd_pdu { @@ -99,6 +108,9 @@ struct ublk_uring_cmd_pdu { * setup in ublk uring_cmd handler */ struct ublk_queue *ubq; + + struct ublk_auto_buf_reg buf; + u16 tag; }; @@ -131,6 +143,14 @@ struct ublk_uring_cmd_pdu { */ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08 +/* + * request buffer is registered automatically, so we have to unregister it + * before completing this request. + * + * io_uring will unregister buffer automatically for us during exiting. + */ +#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10 + /* atomic RW with ubq->cancel_lock */ #define UBLK_IO_FLAG_CANCELED 0x80000000 @@ -140,7 +160,12 @@ struct ublk_io { unsigned int flags; int res; - struct io_uring_cmd *cmd; + union { + /* valid if UBLK_IO_FLAG_ACTIVE is set */ + struct io_uring_cmd *cmd; + /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */ + struct request *req; + }; }; struct ublk_queue { @@ -198,13 +223,19 @@ struct ublk_params_header { __u32 types; }; +static void ublk_io_release(void *priv); static void ublk_stop_dev_unlocked(struct ublk_device *ub); static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq); static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, const struct ublk_queue *ubq, int tag, size_t offset); static inline unsigned int ublk_req_build_flags(struct request *req); -static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, - int tag); + +static inline struct ublksrv_io_desc * +ublk_get_iod(const struct ublk_queue *ubq, unsigned tag) +{ + return &ubq->io_cmd_buf[tag]; +} + static inline bool ublk_dev_is_zoned(const struct ublk_device *ub) { return ub->dev_info.flags & UBLK_F_ZONED; @@ -356,8 +387,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, if (ret) goto free_req; - ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, - GFP_KERNEL); + ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL); if (ret) goto erase_desc; @@ -477,7 +507,6 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, #endif static inline void __ublk_complete_rq(struct request *req); -static void ublk_complete_rq(struct kref *ref); static dev_t ublk_chr_devt; static const struct class ublk_chr_class = { @@ -609,6 +638,11 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq) return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY; } +static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq) +{ + return ubq->flags & UBLK_F_AUTO_BUF_REG; +} + static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) { return ubq->flags & UBLK_F_USER_COPY; @@ -616,7 +650,8 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) static inline bool ublk_need_map_io(const struct ublk_queue *ubq) { - return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq); + return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) && + !ublk_support_auto_buf_reg(ubq); } static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) @@ -627,8 +662,13 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) * * for zero copy, request buffer need to be registered to io_uring * buffer table, so reference is needed + * + * For auto buffer register, ublk server still may issue + * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up, + * so reference is required too. */ - return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq); + return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) || + ublk_support_auto_buf_reg(ubq); } static inline void ublk_init_req_ref(const struct ublk_queue *ubq, @@ -637,7 +677,7 @@ static inline void ublk_init_req_ref(const struct ublk_queue *ubq, if (ublk_need_req_ref(ubq)) { struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - kref_init(&data->ref); + refcount_set(&data->ref, 1); } } @@ -647,7 +687,7 @@ static inline bool ublk_get_req_ref(const struct ublk_queue *ubq, if (ublk_need_req_ref(ubq)) { struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - return kref_get_unless_zero(&data->ref); + return refcount_inc_not_zero(&data->ref); } return true; @@ -659,7 +699,8 @@ static inline void ublk_put_req_ref(const struct ublk_queue *ubq, if (ublk_need_req_ref(ubq)) { struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - kref_put(&data->ref, ublk_complete_rq); + if (refcount_dec_and_test(&data->ref)) + __ublk_complete_rq(req); } else { __ublk_complete_rq(req); } @@ -695,12 +736,6 @@ static inline bool ublk_rq_has_data(const struct request *rq) return bio_has_data(rq->bio); } -static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, - int tag) -{ - return &ubq->io_cmd_buf[tag]; -} - static inline struct ublksrv_io_desc * ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) { @@ -1117,18 +1152,12 @@ exit: blk_mq_end_request(req, res); } -static void ublk_complete_rq(struct kref *ref) +static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req, + int res, unsigned issue_flags) { - struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data, - ref); - struct request *req = blk_mq_rq_from_pdu(data); + /* read cmd first because req will overwrite it */ + struct io_uring_cmd *cmd = io->cmd; - __ublk_complete_rq(req); -} - -static void ubq_complete_io_cmd(struct ublk_io *io, int res, - unsigned issue_flags) -{ /* mark this cmd owned by ublksrv */ io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV; @@ -1138,8 +1167,10 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res, */ io->flags &= ~UBLK_IO_FLAG_ACTIVE; + io->req = req; + /* tell ublksrv one io request is coming */ - io_uring_cmd_done(io->cmd, res, 0, issue_flags); + io_uring_cmd_done(cmd, res, 0, issue_flags); } #define UBLK_REQUEUE_DELAY_MS 3 @@ -1154,16 +1185,91 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq, blk_mq_end_request(rq, BLK_STS_IOERR); } +static void ublk_auto_buf_reg_fallback(struct request *req) +{ + const struct ublk_queue *ubq = req->mq_hctx->driver_data; + struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + iod->op_flags |= UBLK_IO_F_NEED_REG_BUF; + refcount_set(&data->ref, 1); +} + +static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io, + unsigned int issue_flags) +{ + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd); + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + int ret; + + ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release, + pdu->buf.index, issue_flags); + if (ret) { + if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) { + ublk_auto_buf_reg_fallback(req); + return true; + } + blk_mq_end_request(req, BLK_STS_IOERR); + return false; + } + /* one extra reference is dropped by ublk_io_release */ + refcount_set(&data->ref, 2); + + data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd); + /* store buffer index in request payload */ + data->buf_index = pdu->buf.index; + io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG; + return true; +} + +static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq, + struct request *req, struct ublk_io *io, + unsigned int issue_flags) +{ + if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) + return ublk_auto_buf_reg(req, io, issue_flags); + + ublk_init_req_ref(ubq, req); + return true; +} + +static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req, + struct ublk_io *io) +{ + unsigned mapped_bytes = ublk_map_io(ubq, req, io); + + /* partially mapped, update io descriptor */ + if (unlikely(mapped_bytes != blk_rq_bytes(req))) { + /* + * Nothing mapped, retry until we succeed. + * + * We may never succeed in mapping any bytes here because + * of OOM. TODO: reserve one buffer with single page pinned + * for providing forward progress guarantee. + */ + if (unlikely(!mapped_bytes)) { + blk_mq_requeue_request(req, false); + blk_mq_delay_kick_requeue_list(req->q, + UBLK_REQUEUE_DELAY_MS); + return false; + } + + ublk_get_iod(ubq, req->tag)->nr_sectors = + mapped_bytes >> 9; + } + + return true; +} + static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req, unsigned int issue_flags) { int tag = req->tag; struct ublk_io *io = &ubq->ios[tag]; - unsigned int mapped_bytes; - pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n", - __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, + pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n", + __func__, ubq->q_id, req->tag, io->flags, ublk_get_iod(ubq, req->tag)->addr); /* @@ -1183,54 +1289,22 @@ static void ublk_dispatch_req(struct ublk_queue *ubq, if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) { /* * We have not handled UBLK_IO_NEED_GET_DATA command yet, - * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv + * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv * and notify it. */ - if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) { - io->flags |= UBLK_IO_FLAG_NEED_GET_DATA; - pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n", - __func__, io->cmd->cmd_op, ubq->q_id, - req->tag, io->flags); - ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags); - return; - } - /* - * We have handled UBLK_IO_NEED_GET_DATA command, - * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just - * do the copy work. - */ - io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA; - /* update iod->addr because ublksrv may have passed a new io buffer */ - ublk_get_iod(ubq, req->tag)->addr = io->addr; - pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n", - __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, - ublk_get_iod(ubq, req->tag)->addr); + io->flags |= UBLK_IO_FLAG_NEED_GET_DATA; + pr_devel("%s: need get data. qid %d tag %d io_flags %x\n", + __func__, ubq->q_id, req->tag, io->flags); + ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA, + issue_flags); + return; } - mapped_bytes = ublk_map_io(ubq, req, io); - - /* partially mapped, update io descriptor */ - if (unlikely(mapped_bytes != blk_rq_bytes(req))) { - /* - * Nothing mapped, retry until we succeed. - * - * We may never succeed in mapping any bytes here because - * of OOM. TODO: reserve one buffer with single page pinned - * for providing forward progress guarantee. - */ - if (unlikely(!mapped_bytes)) { - blk_mq_requeue_request(req, false); - blk_mq_delay_kick_requeue_list(req->q, - UBLK_REQUEUE_DELAY_MS); - return; - } - - ublk_get_iod(ubq, req->tag)->nr_sectors = - mapped_bytes >> 9; - } + if (!ublk_start_io(ubq, req, io)) + return; - ublk_init_req_ref(ubq, req); - ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags); + if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags)) + ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags); } static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd, @@ -1590,30 +1664,6 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma) return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); } -static void ublk_commit_completion(struct ublk_device *ub, - const struct ublksrv_io_cmd *ub_cmd) -{ - u32 qid = ub_cmd->q_id, tag = ub_cmd->tag; - struct ublk_queue *ubq = ublk_get_queue(ub, qid); - struct ublk_io *io = &ubq->ios[tag]; - struct request *req; - - /* now this cmd slot is owned by nbd driver */ - io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; - io->res = ub_cmd->result; - - /* find the io request and complete */ - req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag); - if (WARN_ON_ONCE(unlikely(!req))) - return; - - if (req_op(req) == REQ_OP_ZONE_APPEND) - req->__sector = ub_cmd->zone_append_lba; - - if (likely(!blk_should_fake_timeout(req->q))) - ublk_put_req_ref(ubq, req); -} - static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, struct request *req) { @@ -1642,17 +1692,8 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) for (i = 0; i < ubq->q_depth; i++) { struct ublk_io *io = &ubq->ios[i]; - if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) { - struct request *rq; - - /* - * Either we fail the request or ublk_rq_task_work_cb - * will do it - */ - rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); - if (rq && blk_mq_request_started(rq)) - __ublk_fail_req(ubq, io, rq); - } + if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) + __ublk_fail_req(ubq, io, io->req); } } @@ -1940,6 +1981,20 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd, io_uring_cmd_mark_cancelable(cmd, issue_flags); } +static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd) +{ + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + + pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr)); + + if (pdu->buf.reserved0 || pdu->buf.reserved1) + return -EINVAL; + + if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK) + return -EINVAL; + return 0; +} + static void ublk_io_release(void *priv) { struct request *rq = priv; @@ -1953,16 +2008,12 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd, unsigned int index, unsigned int issue_flags) { struct ublk_device *ub = cmd->file->private_data; - const struct ublk_io *io = &ubq->ios[tag]; struct request *req; int ret; if (!ublk_support_zero_copy(ubq)) return -EINVAL; - if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) - return -EINVAL; - req = __ublk_check_and_get_req(ub, ubq, tag, 0); if (!req) return -EINVAL; @@ -1978,17 +2029,12 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd, } static int ublk_unregister_io_buf(struct io_uring_cmd *cmd, - const struct ublk_queue *ubq, unsigned int tag, + const struct ublk_queue *ubq, unsigned int index, unsigned int issue_flags) { - const struct ublk_io *io = &ubq->ios[tag]; - if (!ublk_support_zero_copy(ubq)) return -EINVAL; - if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) - return -EINVAL; - return io_buffer_unregister_bvec(cmd, index, issue_flags); } @@ -2031,6 +2077,12 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq, goto out; } + if (ublk_support_auto_buf_reg(ubq)) { + ret = ublk_set_auto_buf_reg(cmd); + if (ret) + goto out; + } + ublk_fill_io_cmd(io, cmd, buf_addr); ublk_mark_io_ready(ub, ubq); out: @@ -2038,6 +2090,90 @@ out: return ret; } +static int ublk_commit_and_fetch(const struct ublk_queue *ubq, + struct ublk_io *io, struct io_uring_cmd *cmd, + const struct ublksrv_io_cmd *ub_cmd, + unsigned int issue_flags) +{ + struct request *req = io->req; + + if (ublk_need_map_io(ubq)) { + /* + * COMMIT_AND_FETCH_REQ has to provide IO buffer if + * NEED GET DATA is not enabled or it is Read IO. + */ + if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || + req_op(req) == REQ_OP_READ)) + return -EINVAL; + } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) { + /* + * User copy requires addr to be unset when command is + * not zone append + */ + return -EINVAL; + } + + if (ublk_support_auto_buf_reg(ubq)) { + int ret; + + /* + * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ` + * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same + * `io_ring_ctx`. + * + * If this uring_cmd's io_ring_ctx isn't same with the + * one for registering the buffer, it is ublk server's + * responsibility for unregistering the buffer, otherwise + * this ublk request gets stuck. + */ + if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) { + struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); + + if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd)) + io_buffer_unregister_bvec(cmd, data->buf_index, + issue_flags); + io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG; + } + + ret = ublk_set_auto_buf_reg(cmd); + if (ret) + return ret; + } + + ublk_fill_io_cmd(io, cmd, ub_cmd->addr); + + /* now this cmd slot is owned by ublk driver */ + io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; + io->res = ub_cmd->result; + + if (req_op(req) == REQ_OP_ZONE_APPEND) + req->__sector = ub_cmd->zone_append_lba; + + if (likely(!blk_should_fake_timeout(req->q))) + ublk_put_req_ref(ubq, req); + + return 0; +} + +static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io) +{ + struct request *req = io->req; + + /* + * We have handled UBLK_IO_NEED_GET_DATA command, + * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just + * do the copy work. + */ + io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA; + /* update iod->addr because ublksrv may have passed a new io buffer */ + ublk_get_iod(ubq, req->tag)->addr = io->addr; + pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n", + __func__, ubq->q_id, req->tag, io->flags, + ublk_get_iod(ubq, req->tag)->addr); + + return ublk_start_io(ubq, req, io); +} + static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags, const struct ublksrv_io_cmd *ub_cmd) @@ -2048,7 +2184,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, u32 cmd_op = cmd->cmd_op; unsigned tag = ub_cmd->tag; int ret = -EINVAL; - struct request *req; pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n", __func__, cmd->cmd_op, ub_cmd->q_id, tag, @@ -2058,9 +2193,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, goto out; ubq = ublk_get_queue(ub, ub_cmd->q_id); - if (!ubq || ub_cmd->q_id != ubq->q_id) - goto out; - if (ubq->ubq_daemon && ubq->ubq_daemon != current) goto out; @@ -2075,6 +2207,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, goto out; } + /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */ + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) && + _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ) + goto out; + /* * ensure that the user issues UBLK_IO_NEED_GET_DATA * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA. @@ -2092,45 +2229,23 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, case UBLK_IO_REGISTER_IO_BUF: return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); case UBLK_IO_UNREGISTER_IO_BUF: - return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); + return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags); case UBLK_IO_FETCH_REQ: ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr); if (ret) goto out; break; case UBLK_IO_COMMIT_AND_FETCH_REQ: - req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag); - - if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) - goto out; - - if (ublk_need_map_io(ubq)) { - /* - * COMMIT_AND_FETCH_REQ has to provide IO buffer if - * NEED GET DATA is not enabled or it is Read IO. - */ - if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || - req_op(req) == REQ_OP_READ)) - goto out; - } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) { - /* - * User copy requires addr to be unset when command is - * not zone append - */ - ret = -EINVAL; + ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags); + if (ret) goto out; - } - - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - ublk_commit_completion(ub, ub_cmd); break; case UBLK_IO_NEED_GET_DATA: - if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) - goto out; - ublk_fill_io_cmd(io, cmd, ub_cmd->addr); - req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag); - ublk_dispatch_req(ubq, req, issue_flags); - return -EIOCBQUEUED; + io->addr = ub_cmd->addr; + if (!ublk_get_data(ubq, io)) + return -EIOCBQUEUED; + + return UBLK_IO_RES_OK; default: goto out; } @@ -2728,6 +2843,11 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) return -EINVAL; } + if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) { + pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n"); + return -EINVAL; + } + /* * unprivileged device can't be trusted, but RECOVERY and * RECOVERY_REISSUE still may hang error handling, so can't @@ -2744,8 +2864,11 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) * For USER_COPY, we depends on userspace to fill request * buffer by pwrite() to ublk char device, which can't be * used for unprivileged device + * + * Same with zero copy or auto buffer register. */ - if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)) + if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | + UBLK_F_AUTO_BUF_REG)) return -EINVAL; } @@ -2803,7 +2926,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) UBLK_F_URING_CMD_COMP_IN_TASK; /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */ - if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)) + if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | + UBLK_F_AUTO_BUF_REG)) ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA; /* @@ -3106,6 +3230,127 @@ static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header) return 0; } +static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header) +{ + struct ublk_param_basic *p = &ub->params.basic; + u64 new_size = header->data[0]; + + mutex_lock(&ub->mutex); + p->dev_sectors = new_size; + set_capacity_and_notify(ub->ub_disk, p->dev_sectors); + mutex_unlock(&ub->mutex); +} + +struct count_busy { + const struct ublk_queue *ubq; + unsigned int nr_busy; +}; + +static bool ublk_count_busy_req(struct request *rq, void *data) +{ + struct count_busy *idle = data; + + if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq) + idle->nr_busy += 1; + return true; +} + +/* uring_cmd is guaranteed to be active if the associated request is idle */ +static bool ubq_has_idle_io(const struct ublk_queue *ubq) +{ + struct count_busy data = { + .ubq = ubq, + }; + + blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data); + return data.nr_busy < ubq->q_depth; +} + +/* Wait until each hw queue has at least one idle IO */ +static int ublk_wait_for_idle_io(struct ublk_device *ub, + unsigned int timeout_ms) +{ + unsigned int elapsed = 0; + int ret; + + while (elapsed < timeout_ms && !signal_pending(current)) { + unsigned int queues_cancelable = 0; + int i; + + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + struct ublk_queue *ubq = ublk_get_queue(ub, i); + + queues_cancelable += !!ubq_has_idle_io(ubq); + } + + /* + * Each queue needs at least one active command for + * notifying ublk server + */ + if (queues_cancelable == ub->dev_info.nr_hw_queues) + break; + + msleep(UBLK_REQUEUE_DELAY_MS); + elapsed += UBLK_REQUEUE_DELAY_MS; + } + + if (signal_pending(current)) + ret = -EINTR; + else if (elapsed >= timeout_ms) + ret = -EBUSY; + else + ret = 0; + + return ret; +} + +static int ublk_ctrl_quiesce_dev(struct ublk_device *ub, + const struct ublksrv_ctrl_cmd *header) +{ + /* zero means wait forever */ + u64 timeout_ms = header->data[0]; + struct gendisk *disk; + int i, ret = -ENODEV; + + if (!(ub->dev_info.flags & UBLK_F_QUIESCE)) + return -EOPNOTSUPP; + + mutex_lock(&ub->mutex); + disk = ublk_get_disk(ub); + if (!disk) + goto unlock; + if (ub->dev_info.state == UBLK_S_DEV_DEAD) + goto put_disk; + + ret = 0; + /* already in expected state */ + if (ub->dev_info.state != UBLK_S_DEV_LIVE) + goto put_disk; + + /* Mark all queues as canceling */ + blk_mq_quiesce_queue(disk->queue); + for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { + struct ublk_queue *ubq = ublk_get_queue(ub, i); + + ubq->canceling = true; + } + blk_mq_unquiesce_queue(disk->queue); + + if (!timeout_ms) + timeout_ms = UINT_MAX; + ret = ublk_wait_for_idle_io(ub, timeout_ms); + +put_disk: + ublk_put_disk(disk); +unlock: + mutex_unlock(&ub->mutex); + + /* Cancel pending uring_cmd */ + if (!ret) + ublk_cancel_dev(ub); + return ret; +} + /* * All control commands are sent via /dev/ublk-control, so we have to check * the destination device's permission @@ -3191,6 +3436,8 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub, case UBLK_CMD_SET_PARAMS: case UBLK_CMD_START_USER_RECOVERY: case UBLK_CMD_END_USER_RECOVERY: + case UBLK_CMD_UPDATE_SIZE: + case UBLK_CMD_QUIESCE_DEV: mask = MAY_READ | MAY_WRITE; break; default: @@ -3282,6 +3529,13 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, case UBLK_CMD_END_USER_RECOVERY: ret = ublk_ctrl_end_recovery(ub, header); break; + case UBLK_CMD_UPDATE_SIZE: + ublk_ctrl_set_size(ub, header); + ret = 0; + break; + case UBLK_CMD_QUIESCE_DEV: + ret = ublk_ctrl_quiesce_dev(ub, header); + break; default: ret = -EOPNOTSUPP; break; @@ -3315,6 +3569,7 @@ static int __init ublk_init(void) BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET + UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET); + BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8); init_waitqueue_head(&ublk_idr_wq); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7cffea01d868..30bca8cb7106 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -571,7 +571,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk, vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); - err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL); + err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL); if (err) goto out; @@ -817,7 +817,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); vbr->out_hdr.sector = 0; - err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); + err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); if (err) goto out; diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c new file mode 100644 index 000000000000..553b1a713ab9 --- /dev/null +++ b/drivers/block/zloop.c @@ -0,0 +1,1385 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025, Christoph Hellwig. + * Copyright (c) 2025, Western Digital Corporation or its affiliates. + * + * Zoned Loop Device driver - exports a zoned block device using one file per + * zone as backing storage. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/blk-mq.h> +#include <linux/blkzoned.h> +#include <linux/pagemap.h> +#include <linux/miscdevice.h> +#include <linux/falloc.h> +#include <linux/mutex.h> +#include <linux/parser.h> +#include <linux/seq_file.h> + +/* + * Options for adding (and removing) a device. + */ +enum { + ZLOOP_OPT_ERR = 0, + ZLOOP_OPT_ID = (1 << 0), + ZLOOP_OPT_CAPACITY = (1 << 1), + ZLOOP_OPT_ZONE_SIZE = (1 << 2), + ZLOOP_OPT_ZONE_CAPACITY = (1 << 3), + ZLOOP_OPT_NR_CONV_ZONES = (1 << 4), + ZLOOP_OPT_BASE_DIR = (1 << 5), + ZLOOP_OPT_NR_QUEUES = (1 << 6), + ZLOOP_OPT_QUEUE_DEPTH = (1 << 7), + ZLOOP_OPT_BUFFERED_IO = (1 << 8), +}; + +static const match_table_t zloop_opt_tokens = { + { ZLOOP_OPT_ID, "id=%d" }, + { ZLOOP_OPT_CAPACITY, "capacity_mb=%u" }, + { ZLOOP_OPT_ZONE_SIZE, "zone_size_mb=%u" }, + { ZLOOP_OPT_ZONE_CAPACITY, "zone_capacity_mb=%u" }, + { ZLOOP_OPT_NR_CONV_ZONES, "conv_zones=%u" }, + { ZLOOP_OPT_BASE_DIR, "base_dir=%s" }, + { ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" }, + { ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" }, + { ZLOOP_OPT_BUFFERED_IO, "buffered_io" }, + { ZLOOP_OPT_ERR, NULL } +}; + +/* Default values for the "add" operation. */ +#define ZLOOP_DEF_ID -1 +#define ZLOOP_DEF_ZONE_SIZE ((256ULL * SZ_1M) >> SECTOR_SHIFT) +#define ZLOOP_DEF_NR_ZONES 64 +#define ZLOOP_DEF_NR_CONV_ZONES 8 +#define ZLOOP_DEF_BASE_DIR "/var/local/zloop" +#define ZLOOP_DEF_NR_QUEUES 1 +#define ZLOOP_DEF_QUEUE_DEPTH 128 +#define ZLOOP_DEF_BUFFERED_IO false + +/* Arbitrary limit on the zone size (16GB). */ +#define ZLOOP_MAX_ZONE_SIZE_MB 16384 + +struct zloop_options { + unsigned int mask; + int id; + sector_t capacity; + sector_t zone_size; + sector_t zone_capacity; + unsigned int nr_conv_zones; + char *base_dir; + unsigned int nr_queues; + unsigned int queue_depth; + bool buffered_io; +}; + +/* + * Device states. + */ +enum { + Zlo_creating = 0, + Zlo_live, + Zlo_deleting, +}; + +enum zloop_zone_flags { + ZLOOP_ZONE_CONV = 0, + ZLOOP_ZONE_SEQ_ERROR, +}; + +struct zloop_zone { + struct file *file; + + unsigned long flags; + struct mutex lock; + enum blk_zone_cond cond; + sector_t start; + sector_t wp; + + gfp_t old_gfp_mask; +}; + +struct zloop_device { + unsigned int id; + unsigned int state; + + struct blk_mq_tag_set tag_set; + struct gendisk *disk; + + struct workqueue_struct *workqueue; + bool buffered_io; + + const char *base_dir; + struct file *data_dir; + + unsigned int zone_shift; + sector_t zone_size; + sector_t zone_capacity; + unsigned int nr_zones; + unsigned int nr_conv_zones; + unsigned int block_size; + + struct zloop_zone zones[] __counted_by(nr_zones); +}; + +struct zloop_cmd { + struct work_struct work; + atomic_t ref; + sector_t sector; + sector_t nr_sectors; + long ret; + struct kiocb iocb; + struct bio_vec *bvec; +}; + +static DEFINE_IDR(zloop_index_idr); +static DEFINE_MUTEX(zloop_ctl_mutex); + +static unsigned int rq_zone_no(struct request *rq) +{ + struct zloop_device *zlo = rq->q->queuedata; + + return blk_rq_pos(rq) >> zlo->zone_shift; +} + +static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + struct kstat stat; + sector_t file_sectors; + int ret; + + lockdep_assert_held(&zone->lock); + + ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0); + if (ret < 0) { + pr_err("Failed to get zone %u file stat (err=%d)\n", + zone_no, ret); + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + return ret; + } + + file_sectors = stat.size >> SECTOR_SHIFT; + if (file_sectors > zlo->zone_capacity) { + pr_err("Zone %u file too large (%llu sectors > %llu)\n", + zone_no, file_sectors, zlo->zone_capacity); + return -EINVAL; + } + + if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) { + pr_err("Zone %u file size not aligned to block size %u\n", + zone_no, zlo->block_size); + return -EINVAL; + } + + if (!file_sectors) { + zone->cond = BLK_ZONE_COND_EMPTY; + zone->wp = zone->start; + } else if (file_sectors == zlo->zone_capacity) { + zone->cond = BLK_ZONE_COND_FULL; + zone->wp = zone->start + zlo->zone_size; + } else { + zone->cond = BLK_ZONE_COND_CLOSED; + zone->wp = zone->start + file_sectors; + } + + return 0; +} + +static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + ret = zloop_update_seq_zone(zlo, zone_no); + if (ret) + goto unlock; + } + + switch (zone->cond) { + case BLK_ZONE_COND_EXP_OPEN: + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_CLOSED: + case BLK_ZONE_COND_IMP_OPEN: + zone->cond = BLK_ZONE_COND_EXP_OPEN; + break; + case BLK_ZONE_COND_FULL: + default: + ret = -EIO; + break; + } + +unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + ret = zloop_update_seq_zone(zlo, zone_no); + if (ret) + goto unlock; + } + + switch (zone->cond) { + case BLK_ZONE_COND_CLOSED: + break; + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + if (zone->wp == zone->start) + zone->cond = BLK_ZONE_COND_EMPTY; + else + zone->cond = BLK_ZONE_COND_CLOSED; + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_FULL: + default: + ret = -EIO; + break; + } + +unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) && + zone->cond == BLK_ZONE_COND_EMPTY) + goto unlock; + + if (vfs_truncate(&zone->file->f_path, 0)) { + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + ret = -EIO; + goto unlock; + } + + zone->cond = BLK_ZONE_COND_EMPTY; + zone->wp = zone->start; + clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + +unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static int zloop_reset_all_zones(struct zloop_device *zlo) +{ + unsigned int i; + int ret; + + for (i = zlo->nr_conv_zones; i < zlo->nr_zones; i++) { + ret = zloop_reset_zone(zlo, i); + if (ret) + return ret; + } + + return 0; +} + +static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int ret = 0; + + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) + return -EIO; + + mutex_lock(&zone->lock); + + if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) && + zone->cond == BLK_ZONE_COND_FULL) + goto unlock; + + if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) { + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + ret = -EIO; + goto unlock; + } + + zone->cond = BLK_ZONE_COND_FULL; + zone->wp = zone->start + zlo->zone_size; + clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + + unlock: + mutex_unlock(&zone->lock); + + return ret; +} + +static void zloop_put_cmd(struct zloop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + + if (!atomic_dec_and_test(&cmd->ref)) + return; + kfree(cmd->bvec); + cmd->bvec = NULL; + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); +} + +static void zloop_rw_complete(struct kiocb *iocb, long ret) +{ + struct zloop_cmd *cmd = container_of(iocb, struct zloop_cmd, iocb); + + cmd->ret = ret; + zloop_put_cmd(cmd); +} + +static void zloop_rw(struct zloop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + struct zloop_device *zlo = rq->q->queuedata; + unsigned int zone_no = rq_zone_no(rq); + sector_t sector = blk_rq_pos(rq); + sector_t nr_sectors = blk_rq_sectors(rq); + bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND; + bool is_write = req_op(rq) == REQ_OP_WRITE || is_append; + int rw = is_write ? ITER_SOURCE : ITER_DEST; + struct req_iterator rq_iter; + struct zloop_zone *zone; + struct iov_iter iter; + struct bio_vec tmp; + sector_t zone_end; + int nr_bvec = 0; + int ret; + + atomic_set(&cmd->ref, 2); + cmd->sector = sector; + cmd->nr_sectors = nr_sectors; + cmd->ret = 0; + + /* We should never get an I/O beyond the device capacity. */ + if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) { + ret = -EIO; + goto out; + } + zone = &zlo->zones[zone_no]; + zone_end = zone->start + zlo->zone_capacity; + + /* + * The block layer should never send requests that are not fully + * contained within the zone. + */ + if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) { + ret = -EIO; + goto out; + } + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + mutex_lock(&zone->lock); + ret = zloop_update_seq_zone(zlo, zone_no); + mutex_unlock(&zone->lock); + if (ret) + goto out; + } + + if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) { + mutex_lock(&zone->lock); + + if (is_append) { + sector = zone->wp; + cmd->sector = sector; + } + + /* + * Write operations must be aligned to the write pointer and + * fully contained within the zone capacity. + */ + if (sector != zone->wp || zone->wp + nr_sectors > zone_end) { + pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n", + zone_no, sector, zone->wp); + ret = -EIO; + goto unlock; + } + + /* Implicitly open the target zone. */ + if (zone->cond == BLK_ZONE_COND_CLOSED || + zone->cond == BLK_ZONE_COND_EMPTY) + zone->cond = BLK_ZONE_COND_IMP_OPEN; + + /* + * Advance the write pointer of sequential zones. If the write + * fails, the wp position will be corrected when the next I/O + * copmpletes. + */ + zone->wp += nr_sectors; + if (zone->wp == zone_end) + zone->cond = BLK_ZONE_COND_FULL; + } + + rq_for_each_bvec(tmp, rq, rq_iter) + nr_bvec++; + + if (rq->bio != rq->biotail) { + struct bio_vec *bvec; + + cmd->bvec = kmalloc_array(nr_bvec, sizeof(*cmd->bvec), GFP_NOIO); + if (!cmd->bvec) { + ret = -EIO; + goto unlock; + } + + /* + * The bios of the request may be started from the middle of + * the 'bvec' because of bio splitting, so we can't directly + * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec + * API will take care of all details for us. + */ + bvec = cmd->bvec; + rq_for_each_bvec(tmp, rq, rq_iter) { + *bvec = tmp; + bvec++; + } + iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq)); + } else { + /* + * Same here, this bio may be started from the middle of the + * 'bvec' because of bio splitting, so offset from the bvec + * must be passed to iov iterator + */ + iov_iter_bvec(&iter, rw, + __bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter), + nr_bvec, blk_rq_bytes(rq)); + iter.iov_offset = rq->bio->bi_iter.bi_bvec_done; + } + + cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT; + cmd->iocb.ki_filp = zone->file; + cmd->iocb.ki_complete = zloop_rw_complete; + if (!zlo->buffered_io) + cmd->iocb.ki_flags = IOCB_DIRECT; + cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + + if (rw == ITER_SOURCE) + ret = zone->file->f_op->write_iter(&cmd->iocb, &iter); + else + ret = zone->file->f_op->read_iter(&cmd->iocb, &iter); +unlock: + if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) + mutex_unlock(&zone->lock); +out: + if (ret != -EIOCBQUEUED) + zloop_rw_complete(&cmd->iocb, ret); + zloop_put_cmd(cmd); +} + +static void zloop_handle_cmd(struct zloop_cmd *cmd) +{ + struct request *rq = blk_mq_rq_from_pdu(cmd); + struct zloop_device *zlo = rq->q->queuedata; + + switch (req_op(rq)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + case REQ_OP_ZONE_APPEND: + /* + * zloop_rw() always executes asynchronously or completes + * directly. + */ + zloop_rw(cmd); + return; + case REQ_OP_FLUSH: + /* + * Sync the entire FS containing the zone files instead of + * walking all files + */ + cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb); + break; + case REQ_OP_ZONE_RESET: + cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq)); + break; + case REQ_OP_ZONE_RESET_ALL: + cmd->ret = zloop_reset_all_zones(zlo); + break; + case REQ_OP_ZONE_FINISH: + cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq)); + break; + case REQ_OP_ZONE_OPEN: + cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq)); + break; + case REQ_OP_ZONE_CLOSE: + cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq)); + break; + default: + WARN_ON_ONCE(1); + pr_err("Unsupported operation %d\n", req_op(rq)); + cmd->ret = -EOPNOTSUPP; + break; + } + + blk_mq_complete_request(rq); +} + +static void zloop_cmd_workfn(struct work_struct *work) +{ + struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work); + int orig_flags = current->flags; + + current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO; + zloop_handle_cmd(cmd); + current->flags = orig_flags; +} + +static void zloop_complete_rq(struct request *rq) +{ + struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct zloop_device *zlo = rq->q->queuedata; + unsigned int zone_no = cmd->sector >> zlo->zone_shift; + struct zloop_zone *zone = &zlo->zones[zone_no]; + blk_status_t sts = BLK_STS_OK; + + switch (req_op(rq)) { + case REQ_OP_READ: + if (cmd->ret < 0) + pr_err("Zone %u: failed read sector %llu, %llu sectors\n", + zone_no, cmd->sector, cmd->nr_sectors); + + if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) { + /* short read */ + struct bio *bio; + + __rq_for_each_bio(bio, rq) + zero_fill_bio(bio); + } + break; + case REQ_OP_WRITE: + case REQ_OP_ZONE_APPEND: + if (cmd->ret < 0) + pr_err("Zone %u: failed %swrite sector %llu, %llu sectors\n", + zone_no, + req_op(rq) == REQ_OP_WRITE ? "" : "append ", + cmd->sector, cmd->nr_sectors); + + if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) { + pr_err("Zone %u: partial write %ld/%u B\n", + zone_no, cmd->ret, blk_rq_bytes(rq)); + cmd->ret = -EIO; + } + + if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) { + /* + * A write to a sequential zone file failed: mark the + * zone as having an error. This will be corrected and + * cleared when the next IO is submitted. + */ + set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags); + break; + } + if (req_op(rq) == REQ_OP_ZONE_APPEND) + rq->__sector = cmd->sector; + + break; + default: + break; + } + + if (cmd->ret < 0) + sts = errno_to_blk_status(cmd->ret); + blk_mq_end_request(rq, sts); +} + +static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct request *rq = bd->rq; + struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq); + struct zloop_device *zlo = rq->q->queuedata; + + if (zlo->state == Zlo_deleting) + return BLK_STS_IOERR; + + blk_mq_start_request(rq); + + INIT_WORK(&cmd->work, zloop_cmd_workfn); + queue_work(zlo->workqueue, &cmd->work); + + return BLK_STS_OK; +} + +static const struct blk_mq_ops zloop_mq_ops = { + .queue_rq = zloop_queue_rq, + .complete = zloop_complete_rq, +}; + +static int zloop_open(struct gendisk *disk, blk_mode_t mode) +{ + struct zloop_device *zlo = disk->private_data; + int ret; + + ret = mutex_lock_killable(&zloop_ctl_mutex); + if (ret) + return ret; + + if (zlo->state != Zlo_live) + ret = -ENXIO; + mutex_unlock(&zloop_ctl_mutex); + return ret; +} + +static int zloop_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct zloop_device *zlo = disk->private_data; + struct blk_zone blkz = {}; + unsigned int first, i; + int ret; + + first = disk_zone_no(disk, sector); + if (first >= zlo->nr_zones) + return 0; + nr_zones = min(nr_zones, zlo->nr_zones - first); + + for (i = 0; i < nr_zones; i++) { + unsigned int zone_no = first + i; + struct zloop_zone *zone = &zlo->zones[zone_no]; + + mutex_lock(&zone->lock); + + if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) { + ret = zloop_update_seq_zone(zlo, zone_no); + if (ret) { + mutex_unlock(&zone->lock); + return ret; + } + } + + blkz.start = zone->start; + blkz.len = zlo->zone_size; + blkz.wp = zone->wp; + blkz.cond = zone->cond; + if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) { + blkz.type = BLK_ZONE_TYPE_CONVENTIONAL; + blkz.capacity = zlo->zone_size; + } else { + blkz.type = BLK_ZONE_TYPE_SEQWRITE_REQ; + blkz.capacity = zlo->zone_capacity; + } + + mutex_unlock(&zone->lock); + + ret = cb(&blkz, i, data); + if (ret) + return ret; + } + + return nr_zones; +} + +static void zloop_free_disk(struct gendisk *disk) +{ + struct zloop_device *zlo = disk->private_data; + unsigned int i; + + for (i = 0; i < zlo->nr_zones; i++) { + struct zloop_zone *zone = &zlo->zones[i]; + + mapping_set_gfp_mask(zone->file->f_mapping, + zone->old_gfp_mask); + fput(zone->file); + } + + fput(zlo->data_dir); + destroy_workqueue(zlo->workqueue); + kfree(zlo->base_dir); + kvfree(zlo); +} + +static const struct block_device_operations zloop_fops = { + .owner = THIS_MODULE, + .open = zloop_open, + .report_zones = zloop_report_zones, + .free_disk = zloop_free_disk, +}; + +__printf(3, 4) +static struct file *zloop_filp_open_fmt(int oflags, umode_t mode, + const char *fmt, ...) +{ + struct file *file; + va_list ap; + char *p; + + va_start(ap, fmt); + p = kvasprintf(GFP_KERNEL, fmt, ap); + va_end(ap); + + if (!p) + return ERR_PTR(-ENOMEM); + file = filp_open(p, oflags, mode); + kfree(p); + return file; +} + +static int zloop_get_block_size(struct zloop_device *zlo, + struct zloop_zone *zone) +{ + struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev; + struct kstat st; + + /* + * If the FS block size is lower than or equal to 4K, use that as the + * device block size. Otherwise, fallback to the FS direct IO alignment + * constraint if that is provided, and to the FS underlying device + * physical block size if the direct IO alignment is unknown. + */ + if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K) + zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize; + else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) && + (st.result_mask & STATX_DIOALIGN)) + zlo->block_size = st.dio_offset_align; + else if (sb_bdev) + zlo->block_size = bdev_physical_block_size(sb_bdev); + else + zlo->block_size = SECTOR_SIZE; + + if (zlo->zone_capacity & ((zlo->block_size >> SECTOR_SHIFT) - 1)) { + pr_err("Zone capacity is not aligned to block size %u\n", + zlo->block_size); + return -EINVAL; + } + + return 0; +} + +static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts, + unsigned int zone_no, bool restore) +{ + struct zloop_zone *zone = &zlo->zones[zone_no]; + int oflags = O_RDWR; + struct kstat stat; + sector_t file_sectors; + int ret; + + mutex_init(&zone->lock); + zone->start = (sector_t)zone_no << zlo->zone_shift; + + if (!restore) + oflags |= O_CREAT; + + if (!opts->buffered_io) + oflags |= O_DIRECT; + + if (zone_no < zlo->nr_conv_zones) { + /* Conventional zone file. */ + set_bit(ZLOOP_ZONE_CONV, &zone->flags); + zone->cond = BLK_ZONE_COND_NOT_WP; + zone->wp = U64_MAX; + + zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u", + zlo->base_dir, zlo->id, zone_no); + if (IS_ERR(zone->file)) { + pr_err("Failed to open zone %u file %s/%u/cnv-%06u (err=%ld)", + zone_no, zlo->base_dir, zlo->id, zone_no, + PTR_ERR(zone->file)); + return PTR_ERR(zone->file); + } + + if (!zlo->block_size) { + ret = zloop_get_block_size(zlo, zone); + if (ret) + return ret; + } + + ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0); + if (ret < 0) { + pr_err("Failed to get zone %u file stat\n", zone_no); + return ret; + } + file_sectors = stat.size >> SECTOR_SHIFT; + + if (restore && file_sectors != zlo->zone_size) { + pr_err("Invalid conventional zone %u file size (%llu sectors != %llu)\n", + zone_no, file_sectors, zlo->zone_capacity); + return ret; + } + + ret = vfs_truncate(&zone->file->f_path, + zlo->zone_size << SECTOR_SHIFT); + if (ret < 0) { + pr_err("Failed to truncate zone %u file (err=%d)\n", + zone_no, ret); + return ret; + } + + return 0; + } + + /* Sequential zone file. */ + zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u", + zlo->base_dir, zlo->id, zone_no); + if (IS_ERR(zone->file)) { + pr_err("Failed to open zone %u file %s/%u/seq-%06u (err=%ld)", + zone_no, zlo->base_dir, zlo->id, zone_no, + PTR_ERR(zone->file)); + return PTR_ERR(zone->file); + } + + if (!zlo->block_size) { + ret = zloop_get_block_size(zlo, zone); + if (ret) + return ret; + } + + zloop_get_block_size(zlo, zone); + + mutex_lock(&zone->lock); + ret = zloop_update_seq_zone(zlo, zone_no); + mutex_unlock(&zone->lock); + + return ret; +} + +static bool zloop_dev_exists(struct zloop_device *zlo) +{ + struct file *cnv, *seq; + bool exists; + + cnv = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/cnv-%06u", + zlo->base_dir, zlo->id, 0); + seq = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/seq-%06u", + zlo->base_dir, zlo->id, 0); + exists = !IS_ERR(cnv) || !IS_ERR(seq); + + if (!IS_ERR(cnv)) + fput(cnv); + if (!IS_ERR(seq)) + fput(seq); + + return exists; +} + +static int zloop_ctl_add(struct zloop_options *opts) +{ + struct queue_limits lim = { + .max_hw_sectors = SZ_1M >> SECTOR_SHIFT, + .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT, + .chunk_sectors = opts->zone_size, + .features = BLK_FEAT_ZONED, + }; + unsigned int nr_zones, i, j; + struct zloop_device *zlo; + int ret = -EINVAL; + bool restore; + + __module_get(THIS_MODULE); + + nr_zones = opts->capacity >> ilog2(opts->zone_size); + if (opts->nr_conv_zones >= nr_zones) { + pr_err("Invalid number of conventional zones %u\n", + opts->nr_conv_zones); + goto out; + } + + zlo = kvzalloc(struct_size(zlo, zones, nr_zones), GFP_KERNEL); + if (!zlo) { + ret = -ENOMEM; + goto out; + } + zlo->state = Zlo_creating; + + ret = mutex_lock_killable(&zloop_ctl_mutex); + if (ret) + goto out_free_dev; + + /* Allocate id, if @opts->id >= 0, we're requesting that specific id */ + if (opts->id >= 0) { + ret = idr_alloc(&zloop_index_idr, zlo, + opts->id, opts->id + 1, GFP_KERNEL); + if (ret == -ENOSPC) + ret = -EEXIST; + } else { + ret = idr_alloc(&zloop_index_idr, zlo, 0, 0, GFP_KERNEL); + } + mutex_unlock(&zloop_ctl_mutex); + if (ret < 0) + goto out_free_dev; + + zlo->id = ret; + zlo->zone_shift = ilog2(opts->zone_size); + zlo->zone_size = opts->zone_size; + if (opts->zone_capacity) + zlo->zone_capacity = opts->zone_capacity; + else + zlo->zone_capacity = zlo->zone_size; + zlo->nr_zones = nr_zones; + zlo->nr_conv_zones = opts->nr_conv_zones; + zlo->buffered_io = opts->buffered_io; + + zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE, + opts->nr_queues * opts->queue_depth, zlo->id); + if (!zlo->workqueue) { + ret = -ENOMEM; + goto out_free_idr; + } + + if (opts->base_dir) + zlo->base_dir = kstrdup(opts->base_dir, GFP_KERNEL); + else + zlo->base_dir = kstrdup(ZLOOP_DEF_BASE_DIR, GFP_KERNEL); + if (!zlo->base_dir) { + ret = -ENOMEM; + goto out_destroy_workqueue; + } + + zlo->data_dir = zloop_filp_open_fmt(O_RDONLY | O_DIRECTORY, 0, "%s/%u", + zlo->base_dir, zlo->id); + if (IS_ERR(zlo->data_dir)) { + ret = PTR_ERR(zlo->data_dir); + pr_warn("Failed to open directory %s/%u (err=%d)\n", + zlo->base_dir, zlo->id, ret); + goto out_free_base_dir; + } + + /* + * If we already have zone files, we are restoring a device created by a + * previous add operation. In this case, zloop_init_zone() will check + * that the zone files are consistent with the zone configuration given. + */ + restore = zloop_dev_exists(zlo); + for (i = 0; i < nr_zones; i++) { + ret = zloop_init_zone(zlo, opts, i, restore); + if (ret) + goto out_close_files; + } + + lim.physical_block_size = zlo->block_size; + lim.logical_block_size = zlo->block_size; + + zlo->tag_set.ops = &zloop_mq_ops; + zlo->tag_set.nr_hw_queues = opts->nr_queues; + zlo->tag_set.queue_depth = opts->queue_depth; + zlo->tag_set.numa_node = NUMA_NO_NODE; + zlo->tag_set.cmd_size = sizeof(struct zloop_cmd); + zlo->tag_set.driver_data = zlo; + + ret = blk_mq_alloc_tag_set(&zlo->tag_set); + if (ret) { + pr_err("blk_mq_alloc_tag_set failed (err=%d)\n", ret); + goto out_close_files; + } + + zlo->disk = blk_mq_alloc_disk(&zlo->tag_set, &lim, zlo); + if (IS_ERR(zlo->disk)) { + pr_err("blk_mq_alloc_disk failed (err=%d)\n", ret); + ret = PTR_ERR(zlo->disk); + goto out_cleanup_tags; + } + zlo->disk->flags = GENHD_FL_NO_PART; + zlo->disk->fops = &zloop_fops; + zlo->disk->private_data = zlo; + sprintf(zlo->disk->disk_name, "zloop%d", zlo->id); + set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones); + + ret = blk_revalidate_disk_zones(zlo->disk); + if (ret) + goto out_cleanup_disk; + + ret = add_disk(zlo->disk); + if (ret) { + pr_err("add_disk failed (err=%d)\n", ret); + goto out_cleanup_disk; + } + + mutex_lock(&zloop_ctl_mutex); + zlo->state = Zlo_live; + mutex_unlock(&zloop_ctl_mutex); + + pr_info("Added device %d: %u zones of %llu MB, %u B block size\n", + zlo->id, zlo->nr_zones, + ((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20, + zlo->block_size); + + return 0; + +out_cleanup_disk: + put_disk(zlo->disk); +out_cleanup_tags: + blk_mq_free_tag_set(&zlo->tag_set); +out_close_files: + for (j = 0; j < i; j++) { + struct zloop_zone *zone = &zlo->zones[j]; + + if (!IS_ERR_OR_NULL(zone->file)) + fput(zone->file); + } + fput(zlo->data_dir); +out_free_base_dir: + kfree(zlo->base_dir); +out_destroy_workqueue: + destroy_workqueue(zlo->workqueue); +out_free_idr: + mutex_lock(&zloop_ctl_mutex); + idr_remove(&zloop_index_idr, zlo->id); + mutex_unlock(&zloop_ctl_mutex); +out_free_dev: + kvfree(zlo); +out: + module_put(THIS_MODULE); + if (ret == -ENOENT) + ret = -EINVAL; + return ret; +} + +static int zloop_ctl_remove(struct zloop_options *opts) +{ + struct zloop_device *zlo; + int ret; + + if (!(opts->mask & ZLOOP_OPT_ID)) { + pr_err("No ID specified\n"); + return -EINVAL; + } + + ret = mutex_lock_killable(&zloop_ctl_mutex); + if (ret) + return ret; + + zlo = idr_find(&zloop_index_idr, opts->id); + if (!zlo || zlo->state == Zlo_creating) { + ret = -ENODEV; + } else if (zlo->state == Zlo_deleting) { + ret = -EINVAL; + } else { + idr_remove(&zloop_index_idr, zlo->id); + zlo->state = Zlo_deleting; + } + + mutex_unlock(&zloop_ctl_mutex); + if (ret) + return ret; + + del_gendisk(zlo->disk); + put_disk(zlo->disk); + blk_mq_free_tag_set(&zlo->tag_set); + + pr_info("Removed device %d\n", opts->id); + + module_put(THIS_MODULE); + + return 0; +} + +static int zloop_parse_options(struct zloop_options *opts, const char *buf) +{ + substring_t args[MAX_OPT_ARGS]; + char *options, *o, *p; + unsigned int token; + int ret = 0; + + /* Set defaults. */ + opts->mask = 0; + opts->id = ZLOOP_DEF_ID; + opts->capacity = ZLOOP_DEF_ZONE_SIZE * ZLOOP_DEF_NR_ZONES; + opts->zone_size = ZLOOP_DEF_ZONE_SIZE; + opts->nr_conv_zones = ZLOOP_DEF_NR_CONV_ZONES; + opts->nr_queues = ZLOOP_DEF_NR_QUEUES; + opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH; + opts->buffered_io = ZLOOP_DEF_BUFFERED_IO; + + if (!buf) + return 0; + + /* Skip leading spaces before the options. */ + while (isspace(*buf)) + buf++; + + options = o = kstrdup(buf, GFP_KERNEL); + if (!options) + return -ENOMEM; + + /* Parse the options, doing only some light invalid value checks. */ + while ((p = strsep(&o, ",\n")) != NULL) { + if (!*p) + continue; + + token = match_token(p, zloop_opt_tokens, args); + opts->mask |= token; + switch (token) { + case ZLOOP_OPT_ID: + if (match_int(args, &opts->id)) { + ret = -EINVAL; + goto out; + } + break; + case ZLOOP_OPT_CAPACITY: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid capacity\n"); + ret = -EINVAL; + goto out; + } + opts->capacity = + ((sector_t)token * SZ_1M) >> SECTOR_SHIFT; + break; + case ZLOOP_OPT_ZONE_SIZE: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token || token > ZLOOP_MAX_ZONE_SIZE_MB || + !is_power_of_2(token)) { + pr_err("Invalid zone size %u\n", token); + ret = -EINVAL; + goto out; + } + opts->zone_size = + ((sector_t)token * SZ_1M) >> SECTOR_SHIFT; + break; + case ZLOOP_OPT_ZONE_CAPACITY: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid zone capacity\n"); + ret = -EINVAL; + goto out; + } + opts->zone_capacity = + ((sector_t)token * SZ_1M) >> SECTOR_SHIFT; + break; + case ZLOOP_OPT_NR_CONV_ZONES: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + opts->nr_conv_zones = token; + break; + case ZLOOP_OPT_BASE_DIR: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->base_dir); + opts->base_dir = p; + break; + case ZLOOP_OPT_NR_QUEUES: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid number of queues\n"); + ret = -EINVAL; + goto out; + } + opts->nr_queues = min(token, num_online_cpus()); + break; + case ZLOOP_OPT_QUEUE_DEPTH: + if (match_uint(args, &token)) { + ret = -EINVAL; + goto out; + } + if (!token) { + pr_err("Invalid queue depth\n"); + ret = -EINVAL; + goto out; + } + opts->queue_depth = token; + break; + case ZLOOP_OPT_BUFFERED_IO: + opts->buffered_io = true; + break; + case ZLOOP_OPT_ERR: + default: + pr_warn("unknown parameter or missing value '%s'\n", p); + ret = -EINVAL; + goto out; + } + } + + ret = -EINVAL; + if (opts->capacity <= opts->zone_size) { + pr_err("Invalid capacity\n"); + goto out; + } + + if (opts->zone_capacity > opts->zone_size) { + pr_err("Invalid zone capacity\n"); + goto out; + } + + ret = 0; +out: + kfree(options); + return ret; +} + +enum { + ZLOOP_CTL_ADD, + ZLOOP_CTL_REMOVE, +}; + +static struct zloop_ctl_op { + int code; + const char *name; +} zloop_ctl_ops[] = { + { ZLOOP_CTL_ADD, "add" }, + { ZLOOP_CTL_REMOVE, "remove" }, + { -1, NULL }, +}; + +static ssize_t zloop_ctl_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *pos) +{ + struct zloop_options opts = { }; + struct zloop_ctl_op *op; + const char *buf, *opts_buf; + int i, ret; + + if (count > PAGE_SIZE) + return -ENOMEM; + + buf = memdup_user_nul(ubuf, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + for (i = 0; i < ARRAY_SIZE(zloop_ctl_ops); i++) { + op = &zloop_ctl_ops[i]; + if (!op->name) { + pr_err("Invalid operation\n"); + ret = -EINVAL; + goto out; + } + if (!strncmp(buf, op->name, strlen(op->name))) + break; + } + + if (count <= strlen(op->name)) + opts_buf = NULL; + else + opts_buf = buf + strlen(op->name); + + ret = zloop_parse_options(&opts, opts_buf); + if (ret) { + pr_err("Failed to parse options\n"); + goto out; + } + + switch (op->code) { + case ZLOOP_CTL_ADD: + ret = zloop_ctl_add(&opts); + break; + case ZLOOP_CTL_REMOVE: + ret = zloop_ctl_remove(&opts); + break; + default: + pr_err("Invalid operation\n"); + ret = -EINVAL; + goto out; + } + +out: + kfree(opts.base_dir); + kfree(buf); + return ret ? ret : count; +} + +static int zloop_ctl_show(struct seq_file *seq_file, void *private) +{ + const struct match_token *tok; + int i; + + /* Add operation */ + seq_printf(seq_file, "%s ", zloop_ctl_ops[0].name); + for (i = 0; i < ARRAY_SIZE(zloop_opt_tokens); i++) { + tok = &zloop_opt_tokens[i]; + if (!tok->pattern) + break; + if (i) + seq_putc(seq_file, ','); + seq_puts(seq_file, tok->pattern); + } + seq_putc(seq_file, '\n'); + + /* Remove operation */ + seq_puts(seq_file, zloop_ctl_ops[1].name); + seq_puts(seq_file, " id=%d\n"); + + return 0; +} + +static int zloop_ctl_open(struct inode *inode, struct file *file) +{ + file->private_data = NULL; + return single_open(file, zloop_ctl_show, NULL); +} + +static int zloop_ctl_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +static const struct file_operations zloop_ctl_fops = { + .owner = THIS_MODULE, + .open = zloop_ctl_open, + .release = zloop_ctl_release, + .write = zloop_ctl_write, + .read = seq_read, +}; + +static struct miscdevice zloop_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "zloop-control", + .fops = &zloop_ctl_fops, +}; + +static int __init zloop_init(void) +{ + int ret; + + ret = misc_register(&zloop_misc); + if (ret) { + pr_err("Failed to register misc device: %d\n", ret); + return ret; + } + pr_info("Module loaded\n"); + + return 0; +} + +static void __exit zloop_exit(void) +{ + misc_deregister(&zloop_misc); + idr_destroy(&zloop_index_idr); +} + +module_init(zloop_init); +module_exit(zloop_exit); + +MODULE_DESCRIPTION("Zoned loopback device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a42dedb78e0a..256b451bbe06 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3014,9 +3014,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev) static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) { int ret = 0; + unsigned int skip = 0; u8 pkt_type; - u8 *sk_ptr; - unsigned int sk_len; u16 seqno; u32 dump_size; @@ -3025,18 +3024,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) struct usb_device *udev = btdata->udev; pkt_type = hci_skb_pkt_type(skb); - sk_ptr = skb->data; - sk_len = skb->len; + skip = sizeof(struct hci_event_hdr); + if (pkt_type == HCI_ACLDATA_PKT) + skip += sizeof(struct hci_acl_hdr); - if (pkt_type == HCI_ACLDATA_PKT) { - sk_ptr += HCI_ACL_HDR_SIZE; - sk_len -= HCI_ACL_HDR_SIZE; - } - - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + skb_pull(skb, skip); + dump_hdr = (struct qca_dump_hdr *)skb->data; - dump_hdr = (struct qca_dump_hdr *)sk_ptr; seqno = le16_to_cpu(dump_hdr->seqno); if (seqno == 0) { set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); @@ -3056,16 +3050,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) btdata->qca_dump.ram_dump_size = dump_size; btdata->qca_dump.ram_dump_seqno = 0; - sk_ptr += offsetof(struct qca_dump_hdr, data0); - sk_len -= offsetof(struct qca_dump_hdr, data0); + + skb_pull(skb, offsetof(struct qca_dump_hdr, data0)); usb_disable_autosuspend(udev); bt_dev_info(hdev, "%s memdump size(%u)\n", (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", dump_size); } else { - sk_ptr += offsetof(struct qca_dump_hdr, data); - sk_len -= offsetof(struct qca_dump_hdr, data); + skb_pull(skb, offsetof(struct qca_dump_hdr, data)); } if (!btdata->qca_dump.ram_dump_size) { @@ -3085,7 +3078,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) return ret; } - skb_pull(skb, skb->len - sk_len); hci_devcd_append(hdev, skb); btdata->qca_dump.ram_dump_seqno++; if (seqno == QCA_LAST_SEQUENCE_NUM) { @@ -3113,68 +3105,58 @@ out: /* Return: true if the ACL packet is a dump packet, false otherwise. */ static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) { - u8 *sk_ptr; - unsigned int sk_len; - struct hci_event_hdr *event_hdr; struct hci_acl_hdr *acl_hdr; struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false; - sk_ptr = skb->data; - sk_len = skb->len; - - acl_hdr = hci_acl_hdr(skb); - if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) + if (!clone) return false; - sk_ptr += HCI_ACL_HDR_SIZE; - sk_len -= HCI_ACL_HDR_SIZE; - event_hdr = (struct hci_event_hdr *)sk_ptr; - - if ((event_hdr->evt != HCI_VENDOR_PKT) || - (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) - return false; + acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr)); + if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)) + goto out; - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out; - dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) || - (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || - (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return false; + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out; - return true; + is_dump = true; +out: + consume_skb(clone); + return is_dump; } /* Return: true if the event packet is a dump packet, false otherwise. */ static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) { - u8 *sk_ptr; - unsigned int sk_len; - struct hci_event_hdr *event_hdr; struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false; - sk_ptr = skb->data; - sk_len = skb->len; - - event_hdr = hci_event_hdr(skb); - - if ((event_hdr->evt != HCI_VENDOR_PKT) - || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) + if (!clone) return false; - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out; - dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) || - (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || - (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return false; + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out; - return true; + is_dump = true; +out: + consume_skb(clone); + return is_dump; } static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index 1e57ebfb7622..6c3e5c5dae10 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -737,9 +737,9 @@ static int moxtet_irq_setup(struct moxtet *moxtet) { int i, ret; - moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node, - MOXTET_NIRQS, 0, - &moxtet_irq_domain, moxtet); + moxtet->irq.domain = irq_domain_create_simple(of_fwnode_handle(moxtet->dev->of_node), + MOXTET_NIRQS, 0, + &moxtet_irq_domain, moxtet); if (moxtet->irq.domain == NULL) { dev_err(moxtet->dev, "Could not add IRQ domain\n"); return -ENOMEM; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index b163e043c687..21a10552da61 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -3677,8 +3677,7 @@ static void cdrom_sysctl_register(void) static void cdrom_sysctl_unregister(void) { - if (cdrom_sysctl_header) - unregister_sysctl_table(cdrom_sysctl_header); + unregister_sysctl_table(cdrom_sysctl_header); } #else /* CONFIG_SYSCTL */ diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 8e41731d3642..bf490967241a 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -16,7 +16,7 @@ #include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include <asm/e820/api.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/gart.h> #include "agp.h" diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index e424360fb4a1..4787391bb6b4 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c @@ -11,6 +11,7 @@ #include <linux/page-flags.h> #include <linux/mm.h> #include <linux/jiffies.h> +#include <asm/msr.h> #include "agp.h" /* NVIDIA registers */ diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index 143406bc6939..d2b00458761e 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -37,6 +37,7 @@ struct atmel_trng { struct clk *clk; void __iomem *base; struct hwrng rng; + struct device *dev; bool has_half_rate; }; @@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, u32 *data = buf; int ret; - ret = pm_runtime_get_sync((struct device *)trng->rng.priv); + ret = pm_runtime_get_sync(trng->dev); if (ret < 0) { - pm_runtime_put_sync((struct device *)trng->rng.priv); + pm_runtime_put_sync(trng->dev); return ret; } @@ -79,8 +80,8 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, ret = 4; out: - pm_runtime_mark_last_busy((struct device *)trng->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv); + pm_runtime_mark_last_busy(trng->dev); + pm_runtime_put_sync_autosuspend(trng->dev); return ret; } @@ -134,9 +135,9 @@ static int atmel_trng_probe(struct platform_device *pdev) return -ENODEV; trng->has_half_rate = data->has_half_rate; + trng->dev = &pdev->dev; trng->rng.name = pdev->name; trng->rng.read = atmel_trng_read; - trng->rng.priv = (unsigned long)&pdev->dev; platform_set_drvdata(pdev, trng); #ifndef CONFIG_PM diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index 1e3048f2bb38..b7fa1bc1122b 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -36,6 +36,7 @@ struct mtk_rng { void __iomem *base; struct clk *clk; struct hwrng rng; + struct device *dev; }; static int mtk_rng_init(struct hwrng *rng) @@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) struct mtk_rng *priv = to_mtk_rng(rng); int retval = 0; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max >= sizeof(u32)) { if (!mtk_rng_wait_ready(rng, wait)) @@ -97,8 +98,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max -= sizeof(u32); } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -112,13 +113,13 @@ static int mtk_rng_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + priv->dev = &pdev->dev; priv->rng.name = pdev->name; #ifndef CONFIG_PM priv->rng.init = mtk_rng_init; priv->rng.cleanup = mtk_rng_cleanup; #endif priv->rng.read = mtk_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; priv->rng.quality = 900; priv->clk = devm_clk_get(&pdev->dev, "rng"); diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index 9ff00f096f38..3e308c890bd2 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -32,6 +32,7 @@ struct npcm_rng { void __iomem *base; struct hwrng rng; + struct device *dev; u32 clkp; }; @@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) int retval = 0; int ready; - pm_runtime_get_sync((struct device *)priv->rng.priv); + pm_runtime_get_sync(priv->dev); while (max) { if (wait) { @@ -79,8 +80,8 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) max--; } - pm_runtime_mark_last_busy((struct device *)priv->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_sync_autosuspend(priv->dev); return retval || !wait ? retval : -EIO; } @@ -109,7 +110,7 @@ static int npcm_rng_probe(struct platform_device *pdev) #endif priv->rng.name = pdev->name; priv->rng.read = npcm_rng_read; - priv->rng.priv = (unsigned long)&pdev->dev; + priv->dev = &pdev->dev; priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev); writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG); diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index 161050591663..fb4a30b95507 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -93,6 +93,30 @@ #define TRNG_v1_VERSION_CODE 0x46bc /* end of TRNG_V1 register definitions */ +/* + * RKRNG register definitions + * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528 + * SoCs. It can either output true randomness (TRNG) or "deterministic" + * randomness derived from hashing the true entropy (DRNG). This driver + * implementation uses just the true entropy, and leaves stretching the entropy + * up to Linux. + */ +#define RKRNG_CFG 0x0000 +#define RKRNG_CTRL 0x0010 +#define RKRNG_CTRL_REQ_TRNG BIT(4) +#define RKRNG_STATE 0x0014 +#define RKRNG_STATE_TRNG_RDY BIT(4) +#define RKRNG_TRNG_DATA0 0x0050 +#define RKRNG_TRNG_DATA1 0x0054 +#define RKRNG_TRNG_DATA2 0x0058 +#define RKRNG_TRNG_DATA3 0x005C +#define RKRNG_TRNG_DATA4 0x0060 +#define RKRNG_TRNG_DATA5 0x0064 +#define RKRNG_TRNG_DATA6 0x0068 +#define RKRNG_TRNG_DATA7 0x006C +#define RKRNG_READ_LEN 32 + /* Before removing this assert, give rk3588_rng_read an upper bound of 32 */ static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0), "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats."); @@ -205,6 +229,46 @@ out: return (ret < 0) ? ret : to_read; } +static int rk3576_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + return rk_rng_enable_clks(rk_rng); +} + +static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RKRNG_READ_LEN); + int ret = 0; + u32 val; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16), + RKRNG_CTRL); + + if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val, + (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US, + RK_RNG_POLL_TIMEOUT_US)) { + dev_err(rk_rng->dev, "timed out waiting for data\n"); + ret = -ETIMEDOUT; + goto out; + } + + rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE); + + memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read); + +out: + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + static int rk3588_rng_init(struct hwrng *rng) { struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); @@ -305,6 +369,14 @@ static const struct rk_rng_soc_data rk3568_soc_data = { .reset_optional = false, }; +static const struct rk_rng_soc_data rk3576_soc_data = { + .rk_rng_init = rk3576_rng_init, + .rk_rng_read = rk3576_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + static const struct rk_rng_soc_data rk3588_soc_data = { .rk_rng_init = rk3588_rng_init, .rk_rng_read = rk3588_rng_read, @@ -397,6 +469,7 @@ static const struct dev_pm_ops rk_rng_pm_ops = { static const struct of_device_id rk_rng_dt_match[] = { { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data }, + { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data }, { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data }, { /* sentinel */ }, }; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 169eed162a7f..48839958b0b1 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn) { return devmem_is_allowed(pfn); } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - u64 from = ((u64)pfn) << PAGE_SHIFT; - u64 to = from + size; - u64 cursor = from; - - while (cursor < to) { - if (!devmem_is_allowed(pfn)) - return 0; - cursor += PAGE_SIZE; - pfn++; - } - return 1; -} #else static inline int page_is_allowed(unsigned long pfn) { return 1; } -static inline int range_is_allowed(unsigned long pfn, unsigned long size) -{ - return 1; -} #endif static inline bool should_stop_iteration(void) diff --git a/drivers/char/random.c b/drivers/char/random.c index 38f2fab29c56..5f22a08101f6 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -309,11 +309,11 @@ static void crng_reseed(struct work_struct *work) * key value, at index 4, so the state should always be zeroed out * immediately after using in order to maintain forward secrecy. * If the state cannot be erased in a timely manner, then it is - * safer to set the random_data parameter to &chacha_state[4] so - * that this function overwrites it before returning. + * safer to set the random_data parameter to &chacha_state->x[4] + * so that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], - u32 chacha_state[CHACHA_STATE_WORDS], + struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { u8 first_block[CHACHA_BLOCK_SIZE]; @@ -321,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], BUG_ON(random_data_len > 32); chacha_init_consts(chacha_state); - memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); - memset(&chacha_state[12], 0, sizeof(u32) * 4); + memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE); + memset(&chacha_state->x[12], 0, sizeof(u32) * 4); chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); @@ -335,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], * random data. It also returns up to 32 bytes on its own of random data * that may be used; random_data_len may not be greater than 32. */ -static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], +static void crng_make_state(struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { unsigned long flags; @@ -395,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], static void _get_random_bytes(void *buf, size_t len) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 tmp[CHACHA_BLOCK_SIZE]; size_t first_block_len; @@ -403,26 +403,26 @@ static void _get_random_bytes(void *buf, size_t len) return; first_block_len = min_t(size_t, 32, len); - crng_make_state(chacha_state, buf, first_block_len); + crng_make_state(&chacha_state, buf, first_block_len); len -= first_block_len; buf += first_block_len; while (len) { if (len < CHACHA_BLOCK_SIZE) { - chacha20_block(chacha_state, tmp); + chacha20_block(&chacha_state, tmp); memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } - chacha20_block(chacha_state, buf); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, buf); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); } /* @@ -441,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes); static ssize_t get_random_bytes_user(struct iov_iter *iter) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 block[CHACHA_BLOCK_SIZE]; size_t ret = 0, copied; @@ -453,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ - crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4], + CHACHA_KEY_SIZE); /* * However, if we're doing a read of len <= 32, we don't need to * use chacha_state after, so we can simply return those bytes to * the user directly. */ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { - ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); + ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter); goto out_zero_chacha; } for (;;) { - chacha20_block(chacha_state, block); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, block); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; copied = copy_to_iter(block, sizeof(block), iter); ret += copied; @@ -484,7 +485,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) memzero_explicit(block, sizeof(block)); out_zero_chacha: - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); return ret ? ret : -EFAULT; } diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 12ee42a31c71..e7913b2853d5 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v) (unsigned char *)(v + sizeof(struct tcpa_event)); eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); - if (!eventname) { - printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", - __func__); - return -EFAULT; - } + if (!eventname) + return -ENOMEM; /* 1st: PCR */ seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index)); diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c index 3169a87a56b6..4ead61f01299 100644 --- a/drivers/char/tpm/tpm_crb_ffa.c +++ b/drivers/char/tpm/tpm_crb_ffa.c @@ -38,9 +38,11 @@ * messages. * * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP - * are using the AArch32 SMC calling convention with register usage as - * defined in FF-A specification: - * w0: Function ID (0x8400006F or 0x84000070) + * are using the AArch32 or AArch64 SMC calling convention with register usage + * as defined in FF-A specification: + * w0: Function ID + * -for 32-bit: 0x8400006F or 0x84000070 + * -for 64-bit: 0xC400006F or 0xC4000070 * w1: Source/Destination IDs * w2: Reserved (MBZ) * w3-w7: Implementation defined, free to be used below @@ -68,7 +70,8 @@ #define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001 /* - * Return information on a given feature of the TPM service + * Notifies the TPM service that a TPM command or TPM locality request is + * ready to be processed, and allows the TPM service to process it. * Call register usage: * w3: Not used (MBZ) * w4: TPM service function ID, CRB_FFA_START @@ -105,7 +108,10 @@ struct tpm_crb_ffa { u16 minor_version; /* lock to protect sending of FF-A messages: */ struct mutex msg_data_lock; - struct ffa_send_direct_data direct_msg_data; + union { + struct ffa_send_direct_data direct_msg_data; + struct ffa_send_direct_data2 direct_msg_data2; + }; }; static struct tpm_crb_ffa *tpm_crb_ffa; @@ -185,18 +191,34 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id, msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops; - memset(&tpm_crb_ffa->direct_msg_data, 0x00, - sizeof(struct ffa_send_direct_data)); - - tpm_crb_ffa->direct_msg_data.data1 = func_id; - tpm_crb_ffa->direct_msg_data.data2 = a0; - tpm_crb_ffa->direct_msg_data.data3 = a1; - tpm_crb_ffa->direct_msg_data.data4 = a2; + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + memset(&tpm_crb_ffa->direct_msg_data2, 0x00, + sizeof(struct ffa_send_direct_data2)); + + tpm_crb_ffa->direct_msg_data2.data[0] = func_id; + tpm_crb_ffa->direct_msg_data2.data[1] = a0; + tpm_crb_ffa->direct_msg_data2.data[2] = a1; + tpm_crb_ffa->direct_msg_data2.data[3] = a2; + + ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data2); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]); + } else { + memset(&tpm_crb_ffa->direct_msg_data, 0x00, + sizeof(struct ffa_send_direct_data)); + + tpm_crb_ffa->direct_msg_data.data1 = func_id; + tpm_crb_ffa->direct_msg_data.data2 = a0; + tpm_crb_ffa->direct_msg_data.data3 = a1; + tpm_crb_ffa->direct_msg_data.data4 = a2; + + ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev, + &tpm_crb_ffa->direct_msg_data); + if (!ret) + ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1); + } - ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev, - &tpm_crb_ffa->direct_msg_data); - if (!ret) - ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1); return ret; } @@ -231,8 +253,13 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00); if (!rc) { - *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); - *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]); + } else { + *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2); + } } return rc; @@ -277,8 +304,9 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev) tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure - if (!ffa_partition_supports_direct_recv(ffa_dev)) { - pr_err("TPM partition doesn't support direct message receive.\n"); + if (!ffa_partition_supports_direct_recv(ffa_dev) && + !ffa_partition_supports_direct_req2_recv(ffa_dev)) { + dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n"); return -EINVAL; } @@ -299,17 +327,17 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev) rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version, &tpm_crb_ffa->minor_version); if (rc) { - pr_err("failed to get crb interface version. rc:%d", rc); + dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc); goto out; } - pr_info("ABI version %u.%u", tpm_crb_ffa->major_version, + dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version, tpm_crb_ffa->minor_version); if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR || (tpm_crb_ffa->minor_version > 0 && tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) { - pr_err("Incompatible ABI version"); + dev_warn(&ffa_dev->dev, "Incompatible ABI version\n"); goto out; } diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 014db6386624..8ddf3a9a53df 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev) if (!clk_data) return -ENOMEM; + clk_data->num = S2MPS11_CLKS_NUM; + switch (hwid) { case S2MPS11X: s2mps11_reg = S2MPS11_REG_RTC_CTRL; @@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev) clk_data->hws[i] = &s2mps11_clks[i].hw; } - clk_data->num = S2MPS11_CLKS_NUM; of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get, clk_data); diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c index 595e010341f7..be703f250197 100644 --- a/drivers/clk/rockchip/clk-rk3576.c +++ b/drivers/clk/rockchip/clk-rk3576.c @@ -541,6 +541,8 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = { RK3576_CLKGATE_CON(5), 14, GFLAGS), GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0, RK3576_CLKGATE_CON(5), 15, GFLAGS), + GATE(CLK_OTP_PHY_G, "clk_otp_phy_g", "xin24m", 0, + RK3576_CLKGATE_CON(6), 0, GFLAGS), COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0, RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS, RK3576_CLKGATE_CON(6), 3, GFLAGS), diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c index bb66c906ebbb..e83d4fd40240 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c @@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = { { .hw = &pll_periph0_2x_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); - -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", + mmc0_mmc1_parents, 0x830, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", + mmc0_mmc1_parents, 0x834, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static const struct clk_parent_data mmc2_parents[] = { { .fw_name = "hosc" }, @@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = { { .hw = &pll_periph0_800M_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents, + 0x838, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws, 0x84c, BIT(0), 0); diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h index b35aeec70484..bb09c649bfa3 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.h +++ b/drivers/clk/sunxi-ng/ccu_mp.h @@ -52,6 +52,28 @@ struct ccu_mp { } \ } +#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \ + _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + _gate, _postdiv, _flags)\ + struct ccu_mp _struct = { \ + .enable = _gate, \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ + .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \ + .fixed_post_div = _postdiv, \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FIXED_POSTDIV, \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \ + _parents, \ + &ccu_mp_ops, \ + _flags), \ + } \ + } + #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ _mshift, _mwidth, \ _pshift, _pwidth, \ @@ -109,8 +131,7 @@ struct ccu_mp { _mshift, _mwidth, \ _pshift, _pwidth, \ _muxshift, _muxwidth, \ - _gate, _features, \ - _flags) \ + _gate, _flags, _features) \ struct ccu_mp _struct = { \ .enable = _gate, \ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index d26b610e4f24..ea4b8f220a05 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -79,11 +79,11 @@ static bool boost_state(unsigned int cpu) case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: - rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); + rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); case X86_VENDOR_HYGON: case X86_VENDOR_AMD: - rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr); + rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr); return !(msr & MSR_K7_HWCR_CPB_DIS); } return false; @@ -110,14 +110,14 @@ static int boost_set_msr(bool enable) return -EINVAL; } - rdmsrl(msr_addr, val); + rdmsrq(msr_addr, val); if (enable) val &= ~msr_mask; else val |= msr_mask; - wrmsrl(msr_addr, val); + wrmsrq(msr_addr, val); return 0; } diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index e671bc7d1550..c8d031b297d2 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -31,6 +31,8 @@ #include <acpi/cppc_acpi.h> +#include <asm/msr.h> + #include "amd-pstate.h" @@ -90,9 +92,9 @@ static int amd_pstate_ut_check_enabled(u32 index) if (get_shared_mem()) return 0; - ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); + ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); if (ret) { - pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); + pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); return ret; } @@ -137,7 +139,7 @@ static int amd_pstate_ut_check_perf(u32 index) lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; lowest_perf = cppc_perf.lowest_perf; } else { - ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); + ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) { pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret); return ret; diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index b961f3a3b580..66fdc74f13ef 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -197,7 +197,7 @@ static u8 msr_get_epp(struct amd_cpudata *cpudata) u64 value; int ret; - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); if (ret < 0) { pr_debug("Could not retrieve energy perf value (%d)\n", ret); return ret; @@ -258,10 +258,10 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf, return 0; if (fast_switch) { - wrmsrl(MSR_AMD_CPPC_REQ, value); + wrmsrq(MSR_AMD_CPPC_REQ, value); return 0; } else { - int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); if (ret) return ret; @@ -309,7 +309,7 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp) if (value == prev) return 0; - ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); if (ret) { pr_err("failed to set energy perf value (%d)\n", ret); return ret; @@ -371,7 +371,7 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp) static inline int msr_cppc_enable(struct cpufreq_policy *policy) { - return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1); + return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1); } static int shmem_cppc_enable(struct cpufreq_policy *policy) @@ -391,7 +391,7 @@ static int msr_init_perf(struct amd_cpudata *cpudata) union perf_cached perf = READ_ONCE(cpudata->perf); u64 cap1, numerator; - int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, + int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &cap1); if (ret) return ret; @@ -518,8 +518,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) unsigned long flags; local_irq_save(flags); - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); tsc = rdtsc(); if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { @@ -772,7 +772,7 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata) goto exit_err; } - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); if (ret) { pr_err_once("failed to read initial CPU boost state!\n"); ret = -EIO; @@ -791,7 +791,7 @@ exit_err: static void amd_perf_ctl_reset(unsigned int cpu) { - wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); + wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); } #define CPPC_MAX_PERF U8_MAX @@ -831,8 +831,10 @@ static void amd_pstate_update_limits(unsigned int cpu) if (highest_perf_changed) { WRITE_ONCE(cpudata->prefcore_ranking, cur_high); - if (cur_high < CPPC_MAX_PERF) + if (cur_high < CPPC_MAX_PERF) { sched_set_itmt_core_prio((int)cur_high, cpu); + sched_update_asym_prefer_cpu(cpu, prev_high, cur_high); + } } } @@ -1485,7 +1487,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) } if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); + ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); if (ret) return ret; WRITE_ONCE(cpudata->cppc_req_cached, value); diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index 59b19b9975e8..13fed4b9e02b 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void) pci_dev_put(pcidev); } - if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) + if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) return -ENODEV; if (!(val >> CLASS_CODE_SHIFT)) diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index d23a97ba6478..320a0af2266a 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) return -ENODEV; } /* Enable Enhanced PowerSaver */ - rdmsrl(MSR_IA32_MISC_ENABLE, val); + rdmsrq(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; - wrmsrl(MSR_IA32_MISC_ENABLE, val); + wrmsrq(MSR_IA32_MISC_ENABLE, val); /* Can be locked at 0 */ - rdmsrl(MSR_IA32_MISC_ENABLE, val); + rdmsrq(MSR_IA32_MISC_ENABLE, val); if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { pr_info("Can't enable Enhanced PowerSaver\n"); return -ENODEV; diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index 36494b855e41..fc5a58088b35 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c @@ -21,7 +21,6 @@ #include <linux/cpufreq.h> #include <asm/cpu_device_id.h> -#include <asm/msr.h> #include <linux/timex.h> #include <linux/io.h> diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ba9bf06f1c77..db8c99535e61 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -601,7 +601,7 @@ static bool turbo_is_disabled(void) if (!cpu_feature_enabled(X86_FEATURE_IDA)) return true; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + rdmsrq(MSR_IA32_MISC_ENABLE, misc_en); return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); } @@ -623,7 +623,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; - ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret) return (s16)ret; @@ -640,7 +640,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) * MSR_HWP_REQUEST, so need to read and get EPP. */ if (!hwp_req_data) { - epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, + epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &hwp_req_data); if (epp) return epp; @@ -662,12 +662,12 @@ static int intel_pstate_set_epb(int cpu, s16 pref) if (!boot_cpu_has(X86_FEATURE_EPB)) return -ENXIO; - ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); + ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); if (ret) return ret; epb = (epb & ~0x0f) | pref; - wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); + wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); return 0; } @@ -765,7 +765,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) * function, so it cannot run in parallel with the update below. */ WRITE_ONCE(cpu->hwp_req_cached, value); - ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); if (!ret) cpu->epp_cached = epp; @@ -919,7 +919,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) if (ratio <= 0) { u64 cap; - rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); + rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); ratio = HWP_GUARANTEED_PERF(cap); } @@ -1091,7 +1091,7 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) { u64 cap; - rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); + rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(cpu->hwp_cap_cached, cap); cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); @@ -1165,7 +1165,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) min = max; - rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); + rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value); value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); @@ -1212,7 +1212,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) } skip_epp: WRITE_ONCE(cpu_data->hwp_req_cached, value); - wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value); } static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); @@ -1259,7 +1259,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); mutex_lock(&hybrid_capacity_lock); @@ -1288,7 +1288,7 @@ static void set_power_ctl_ee_state(bool input) u64 power_ctl; mutex_lock(&intel_pstate_driver_lock); - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); if (input) { power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); power_ctl_ee_state = POWER_CTL_EE_ENABLE; @@ -1296,7 +1296,7 @@ static void set_power_ctl_ee_state(bool input) power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); power_ctl_ee_state = POWER_CTL_EE_DISABLE; } - wrmsrl(MSR_IA32_POWER_CTL, power_ctl); + wrmsrq(MSR_IA32_POWER_CTL, power_ctl); mutex_unlock(&intel_pstate_driver_lock); } @@ -1305,7 +1305,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata); static void intel_pstate_hwp_reenable(struct cpudata *cpu) { intel_pstate_hwp_enable(cpu); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); } static int intel_pstate_suspend(struct cpufreq_policy *policy) @@ -1706,7 +1706,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut u64 power_ctl; int enable; - rdmsrl(MSR_IA32_POWER_CTL, power_ctl); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); return sprintf(buf, "%d\n", !enable); } @@ -1858,7 +1858,7 @@ static void intel_pstate_notify_work(struct work_struct *work) hybrid_update_capacity(cpudata); } - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } static DEFINE_RAW_SPINLOCK(hwp_notify_lock); @@ -1880,7 +1880,7 @@ void notify_hwp_interrupt(void) if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; - rdmsrl_safe(MSR_HWP_STATUS, &value); + rdmsrq_safe(MSR_HWP_STATUS, &value); if (!(value & status_mask)) return; @@ -1897,7 +1897,7 @@ void notify_hwp_interrupt(void) return; ack_intr: - wrmsrl_safe(MSR_HWP_STATUS, 0); + wrmsrq_safe(MSR_HWP_STATUS, 0); raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); } @@ -1908,8 +1908,8 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); raw_spin_lock_irq(&hwp_notify_lock); cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); @@ -1936,9 +1936,9 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; - /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); + /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } @@ -1977,9 +1977,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt till we activate again */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); + wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); intel_pstate_enable_hwp_interrupt(cpudata); @@ -1993,7 +1993,7 @@ static int atom_get_min_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_RATIOS, value); return (value >> 8) & 0x7F; } @@ -2001,7 +2001,7 @@ static int atom_get_max_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_RATIOS, value); return (value >> 16) & 0x7F; } @@ -2009,7 +2009,7 @@ static int atom_get_turbo_pstate(int not_used) { u64 value; - rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); + rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value); return value & 0x7F; } @@ -2044,7 +2044,7 @@ static int silvermont_get_scaling(void) static int silvermont_freq_table[] = { 83300, 100000, 133300, 116700, 80000}; - rdmsrl(MSR_FSB_FREQ, value); + rdmsrq(MSR_FSB_FREQ, value); i = value & 0x7; WARN_ON(i > 4); @@ -2060,7 +2060,7 @@ static int airmont_get_scaling(void) 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500}; - rdmsrl(MSR_FSB_FREQ, value); + rdmsrq(MSR_FSB_FREQ, value); i = value & 0xF; WARN_ON(i > 8); @@ -2071,7 +2071,7 @@ static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(MSR_ATOM_CORE_VIDS, value); + rdmsrq(MSR_ATOM_CORE_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -2079,7 +2079,7 @@ static void atom_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); + rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -2087,7 +2087,7 @@ static int core_get_min_pstate(int cpu) { u64 value; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 40) & 0xFF; } @@ -2095,7 +2095,7 @@ static int core_get_max_pstate_physical(int cpu) { u64 value; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 8) & 0xFF; } @@ -2109,13 +2109,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info) int err; /* Get the TDP level (0, 1, 2) to get ratios */ - err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); + err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); if (err) return err; /* TDP MSR are continuous starting at 0x648 */ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); - err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); + err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); if (err) return err; @@ -2140,7 +2140,7 @@ static int core_get_max_pstate(int cpu) int tdp_ratio; int err; - rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); + rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); max_pstate = (plat_info >> 8) & 0xFF; tdp_ratio = core_get_tdp_ratio(cpu, plat_info); @@ -2152,7 +2152,7 @@ static int core_get_max_pstate(int cpu) return tdp_ratio; } - err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); + err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); if (!err) { int tar_levels; @@ -2172,7 +2172,7 @@ static int core_get_turbo_pstate(int cpu) u64 value; int nont, ret; - rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); nont = core_get_max_pstate(cpu); ret = (value) & 255; if (ret <= nont) @@ -2201,7 +2201,7 @@ static int knl_get_turbo_pstate(int cpu) u64 value; int nont, ret; - rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); nont = core_get_max_pstate(cpu); ret = (((value) >> 8) & 0xFF); if (ret <= nont) @@ -2247,7 +2247,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) * the CPU being updated, so force the register update to run on the * right CPU. */ - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } @@ -2354,7 +2354,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) return; hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; - wrmsrl(MSR_HWP_REQUEST, hwp_req); + wrmsrq(MSR_HWP_REQUEST, hwp_req); cpu->last_update = cpu->sample.time; } @@ -2367,7 +2367,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) expired = time_after64(cpu->sample.time, cpu->last_update + hwp_boost_hold_time_ns); if (expired) { - wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); + wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached); cpu->hwp_boost_min = 0; } } @@ -2428,8 +2428,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) u64 tsc; local_irq_save(flags); - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); + rdmsrq(MSR_IA32_APERF, aperf); + rdmsrq(MSR_IA32_MPERF, mperf); tsc = rdtsc(); if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { local_irq_restore(flags); @@ -2523,7 +2523,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) return; cpu->pstate.current_pstate = pstate; - wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); + wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } static void intel_pstate_adjust_pstate(struct cpudata *cpu) @@ -3103,19 +3103,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, WRITE_ONCE(cpu->hwp_req_cached, value); if (fast_switch) - wrmsrl(MSR_HWP_REQUEST, value); + wrmsrq(MSR_HWP_REQUEST, value); else - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); } static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, u32 target_pstate, bool fast_switch) { if (fast_switch) - wrmsrl(MSR_IA32_PERF_CTL, + wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, target_pstate)); else - wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, target_pstate)); } @@ -3259,7 +3259,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) intel_pstate_get_hwp_cap(cpu); - rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); + rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); WRITE_ONCE(cpu->hwp_req_cached, value); cpu->epp_cached = intel_pstate_get_epp(cpu, value); @@ -3326,7 +3326,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy) * written by it may not be suitable. */ value &= ~HWP_DESIRED_PERF(~0L); - wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); WRITE_ONCE(cpu->hwp_req_cached, value); } @@ -3576,7 +3576,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { - rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); + rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr); if (misc_pwr & BITMASK_OOB) { pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); @@ -3632,7 +3632,7 @@ static bool intel_pstate_hwp_is_enabled(void) { u64 value; - rdmsrl(MSR_PM_ENABLE, value); + rdmsrq(MSR_PM_ENABLE, value); return !!(value & 0x1); } diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 68ccd73c8129..ba0e08c8486a 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index) { union msr_bcr2 bcr2; - rdmsrl(MSR_VIA_BCR2, bcr2.val); + rdmsrq(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; bcr2.bits.CLOCKMUL = mults_index & 0xff; @@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index) /* Sync to timer tick */ safe_halt(); /* Change frequency on next halt or sleep */ - wrmsrl(MSR_VIA_BCR2, bcr2.val); + wrmsrq(MSR_VIA_BCR2, bcr2.val); /* Invoke transition */ ACPI_FLUSH_CPU_CACHE(); halt(); /* Disable software clock multiplier */ local_irq_disable(); - rdmsrl(MSR_VIA_BCR2, bcr2.val); + rdmsrq(MSR_VIA_BCR2, bcr2.val); bcr2.bits.ESOFTBF = 0; - wrmsrl(MSR_VIA_BCR2, bcr2.val); + wrmsrq(MSR_VIA_BCR2, bcr2.val); } /* For processor with Longhaul MSR */ @@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, union msr_longhaul longhaul; u32 t; - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Setup new frequency */ if (!revid_errata) longhaul.bits.RevisionKey = longhaul.bits.RevisionID; @@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, /* Raise voltage if necessary */ if (can_scale_voltage && dir) { longhaul.bits.EnableSoftVID = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); @@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index, t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); } /* Change frequency on next halt or sleep */ longhaul.bits.EnableSoftBusRatio = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); halt(); @@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index, } /* Disable bus ratio bit */ longhaul.bits.EnableSoftBusRatio = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Reduce voltage if necessary */ if (can_scale_voltage && !dir) { longhaul.bits.EnableSoftVID = 1; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); /* Change voltage */ if (!cx_address) { ACPI_FLUSH_CPU_CACHE(); @@ -231,7 +231,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, t = inl(acpi_gbl_FADT.xpm_timer_block.address); } longhaul.bits.EnableSoftVID = 0; - wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + wrmsrq(MSR_VIA_LONGHAUL, longhaul.val); } } @@ -534,7 +534,7 @@ static void longhaul_setup_voltagescaling(void) unsigned int j, speed, pos, kHz_step, numvscales; int min_vid_speed; - rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + rdmsrq(MSR_VIA_LONGHAUL, longhaul.val); if (!(longhaul.bits.RevisionID & 1)) { pr_info("Voltage scaling not supported by CPU\n"); return; diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index fb2197dc170f..31039330a3ba 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -219,13 +219,13 @@ static void change_FID(int fid) { union msr_fidvidctl fidvidctl; - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.FID != fid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.FID = fid; fidvidctl.bits.VIDC = 0; fidvidctl.bits.FIDC = 1; - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -234,13 +234,13 @@ static void change_VID(int vid) { union msr_fidvidctl fidvidctl; - rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.VID != vid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.VID = vid; fidvidctl.bits.FIDC = 0; fidvidctl.bits.VIDC = 1; - wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) fid = powernow_table[index].driver_data & 0xFF; vid = (powernow_table[index].driver_data & 0xFF00) >> 8; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; freqs.old = fsb * fid_codes[cfid] / 10; @@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu) if (cpu) return 0; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; return fsb * fid_codes[cfid] / 10; @@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy) if (policy->cpu != 0) return -ENODEV; - rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val); recalibrate_cpu_khz(); diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index 103d2519dff7..b360f03a116f 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -21,7 +21,6 @@ #include <linux/io.h> #include <asm/cpu_device_id.h> -#include <asm/msr.h> #define MMCR_BASE 0xfffef000 /* The default base address */ #define OFFS_CPUCTL 0x2 /* CPU Control Register */ diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 47082782008a..5686369779be 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -530,13 +530,6 @@ source "drivers/crypto/cavium/nitrox/Kconfig" source "drivers/crypto/marvell/Kconfig" source "drivers/crypto/intel/Kconfig" -config CRYPTO_DEV_CAVIUM_ZIP - tristate "Cavium ZIP driver" - depends on PCI && 64BIT && (ARM64 || COMPILE_TEST) - help - Select this option if you want to enable compression/decompression - acceleration on Cavium's ARM based SoCs - config CRYPTO_DEV_QCE tristate "Qualcomm crypto engine accelerator" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index c97f0ebc55ec..22eadcc8f4a2 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -8,12 +8,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o -obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ -obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ -obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/ obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o @@ -50,3 +47,4 @@ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ +obj-y += cavium/ diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 19b7fb4a93e8..f9cf00d690e2 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -33,22 +33,30 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG || sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) { - algt->stat_fb_maxsg++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_maxsg++; + return true; } if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) { - algt->stat_fb_leniv++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_leniv++; + return true; } if (areq->cryptlen == 0) { - algt->stat_fb_len0++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_len0++; + return true; } if (areq->cryptlen % 16) { - algt->stat_fb_mod16++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_mod16++; + return true; } @@ -56,12 +64,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) sg = areq->src; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - algt->stat_fb_srcali++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srcali++; + return true; } todo = min(len, sg->length); if (todo % 4) { - algt->stat_fb_srclen++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srclen++; + return true; } len -= todo; @@ -72,12 +84,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) sg = areq->dst; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - algt->stat_fb_dstali++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_dstali++; + return true; } todo = min(len, sg->length); if (todo % 4) { - algt->stat_fb_dstlen++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_dstlen++; + return true; } len -= todo; @@ -100,9 +116,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_fb++; -#endif } skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); @@ -146,9 +160,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), op->keylen); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_req++; -#endif + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_req++; flow = rctx->flow; @@ -275,13 +288,16 @@ theend_sgs: } else { if (nr_sgs > 0) dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); + + if (nr_sgd > 0) + dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); } theend_iv: if (areq->iv && ivsize > 0) { - if (rctx->addr_iv) + if (!dma_mapping_error(ce->dev, rctx->addr_iv)) dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); + offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { memcpy(areq->iv, chan->backup_iv, ivsize); @@ -434,17 +450,17 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm)); - memcpy(algt->fbname, - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), - CRYPTO_MAX_ALG_NAME); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + memcpy(algt->fbname, + crypto_skcipher_driver_name(op->fallback_tfm), + CRYPTO_MAX_ALG_NAME); - err = pm_runtime_get_sync(op->ce->dev); + err = pm_runtime_resume_and_get(op->ce->dev); if (err < 0) goto error_pm; return 0; error_pm: - pm_runtime_put_noidle(op->ce->dev); crypto_free_skcipher(op->fallback_tfm); return err; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index ec1ffda9ea32..658f520cee0c 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce) err = pm_runtime_set_suspended(ce->dev); if (err) return err; - pm_runtime_enable(ce->dev); - return err; -} -static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce) -{ - pm_runtime_disable(ce->dev); + err = devm_pm_runtime_enable(ce->dev); + if (err) + return err; + + return 0; } static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) @@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) "sun8i-ce-ns", ce); if (err) { dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err); - goto error_irq; + goto error_pm; } err = sun8i_ce_register_algs(ce); @@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev) return 0; error_alg: sun8i_ce_unregister_algs(ce); -error_irq: - sun8i_ce_pm_exit(ce); error_pm: sun8i_ce_free_chanlist(ce, MAXFLOW - 1); return err; @@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev) #endif sun8i_ce_free_chanlist(ce, MAXFLOW - 1); - - sun8i_ce_pm_exit(ce); } static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 6072dd9f390b..bef44f350167 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -23,6 +23,18 @@ #include <linux/string.h> #include "sun8i-ce.h" +static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm) +{ + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct sun8i_ce_alg_template *algt __maybe_unused; + struct ahash_alg *alg = crypto_ahash_alg(tfm); + + algt = container_of(alg, struct sun8i_ce_alg_template, + alg.hash.base); + algt->stat_fb++; + } +} + int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) { struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); @@ -48,15 +60,16 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) sizeof(struct sun8i_ce_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); - memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm), - CRYPTO_MAX_ALG_NAME); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + memcpy(algt->fbname, + crypto_ahash_driver_name(op->fallback_tfm), + CRYPTO_MAX_ALG_NAME); - err = pm_runtime_get_sync(op->ce->dev); + err = pm_runtime_resume_and_get(op->ce->dev); if (err < 0) goto error_pm; return 0; error_pm: - pm_runtime_put_noidle(op->ce->dev); crypto_free_ahash(op->fallback_tfm); return err; } @@ -78,7 +91,9 @@ int sun8i_ce_hash_init(struct ahash_request *areq) memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx)); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -90,7 +105,9 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out) struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } @@ -102,7 +119,9 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -113,21 +132,13 @@ int sun8i_ce_hash_final(struct ahash_request *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = areq->result; - - if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct sun8i_ce_alg_template *algt __maybe_unused; - struct ahash_alg *alg = crypto_ahash_alg(tfm); + sun8i_ce_hash_stat_fb_inc(tfm); - algt = container_of(alg, struct sun8i_ce_alg_template, - alg.hash.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_fb++; -#endif - } + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -139,10 +150,10 @@ int sun8i_ce_hash_update(struct ahash_request *areq) struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -153,24 +164,14 @@ int sun8i_ce_hash_finup(struct ahash_request *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + sun8i_ce_hash_stat_fb_inc(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct sun8i_ce_alg_template *algt __maybe_unused; - struct ahash_alg *alg = crypto_ahash_alg(tfm); - - algt = container_of(alg, struct sun8i_ce_alg_template, - alg.hash.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_fb++; -#endif - } + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -181,24 +182,14 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; - - if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct sun8i_ce_alg_template *algt __maybe_unused; - struct ahash_alg *alg = crypto_ahash_alg(tfm); + sun8i_ce_hash_stat_fb_inc(tfm); - algt = container_of(alg, struct sun8i_ce_alg_template, - alg.hash.base); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_fb++; -#endif - } + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_digest(&rctx->fallback_req); } @@ -213,22 +204,30 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); if (areq->nbytes == 0) { - algt->stat_fb_len0++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_len0++; + return true; } /* we need to reserve one SG for padding one */ if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) { - algt->stat_fb_maxsg++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_maxsg++; + return true; } sg = areq->src; while (sg) { if (sg->length % 4) { - algt->stat_fb_srclen++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srclen++; + return true; } if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - algt->stat_fb_srcali++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_fb_srcali++; + return true; } sg = sg_next(sg); @@ -244,21 +243,11 @@ int sun8i_ce_hash_digest(struct ahash_request *areq) struct sun8i_ce_alg_template *algt; struct sun8i_ce_dev *ce; struct crypto_engine *engine; - struct scatterlist *sg; - int nr_sgs, e, i; + int e; if (sun8i_ce_hash_need_fallback(areq)) return sun8i_ce_hash_digest_fb(areq); - nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); - if (nr_sgs > MAX_SG - 1) - return sun8i_ce_hash_digest_fb(areq); - - for_each_sg(areq->src, sg, nr_sgs, i) { - if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) - return sun8i_ce_hash_digest_fb(areq); - } - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; @@ -343,9 +332,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) u32 common; u64 byte_count; __le32 *bf; - void *buf = NULL; + void *buf, *result; int j, i, todo; - void *result = NULL; u64 bs; int digestsize; dma_addr_t addr_res, addr_pad; @@ -365,22 +353,22 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA); if (!buf) { err = -ENOMEM; - goto theend; + goto err_out; } bf = (__le32 *)buf; result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); if (!result) { err = -ENOMEM; - goto theend; + goto err_free_buf; } flow = rctx->flow; chan = &ce->chanlist[flow]; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt->stat_req++; -#endif + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) + algt->stat_req++; + dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); cet = chan->tl; @@ -398,7 +386,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; - goto theend; + goto err_free_result; } len = areq->nbytes; @@ -411,7 +399,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; - goto theend; + goto err_unmap_src; } addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res); @@ -419,7 +407,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_res)) { dev_err(ce->dev, "DMA map dest\n"); err = -EINVAL; - goto theend; + goto err_unmap_src; } byte_count = areq->nbytes; @@ -441,7 +429,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) } if (!j) { err = -EINVAL; - goto theend; + goto err_unmap_result; } addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); @@ -450,7 +438,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); err = -EINVAL; - goto theend; + goto err_unmap_result; } if (ce->variant->hash_t_dlen_in_bits) @@ -463,16 +451,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + +err_unmap_result: dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); + if (!err) + memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); +err_unmap_src: + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); -theend: - kfree(buf); +err_free_result: kfree(result); + +err_free_buf: + kfree(buf); + +err_out: local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); + return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 3b5c2af013d0..83df4d719053 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx { * @flow: the flow to use for this request */ struct sun8i_ce_hash_reqctx { - struct ahash_request fallback_req; int flow; + struct ahash_request fallback_req; // keep at the end }; /* diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 9b9605ce8ee6..8831bcb230c2 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) /* we need to copy all IVs from source in case DMA is bi-directionnal */ while (sg && len) { - if (sg_dma_len(sg) == 0) { + if (sg->length == 0) { sg = sg_next(sg); continue; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 753f67a36dc5..8bc08089f044 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -150,7 +150,9 @@ int sun8i_ss_hash_init(struct ahash_request *areq) memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx)); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -162,7 +164,9 @@ int sun8i_ss_hash_export(struct ahash_request *areq, void *out) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } @@ -174,7 +178,9 @@ int sun8i_ss_hash_import(struct ahash_request *areq, const void *in) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -186,9 +192,10 @@ int sun8i_ss_hash_final(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); @@ -212,10 +219,10 @@ int sun8i_ss_hash_update(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -227,12 +234,11 @@ int sun8i_ss_hash_finup(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); @@ -256,12 +262,11 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq) struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index e0af611a95d8..38e8a61e9166 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -12,9 +12,6 @@ #include <linux/interrupt.h> #include <linux/spinlock_types.h> #include <linux/scatterlist.h> -#include <linux/crypto.h> -#include <linux/hash.h> -#include <crypto/internal/hash.h> #include <linux/dma-mapping.h> #include <crypto/algapi.h> #include <crypto/aead.h> @@ -72,7 +69,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req, { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); - __le32 iv[AES_IV_SIZE]; + __le32 iv[AES_IV_SIZE / 4]; if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) return -EINVAL; @@ -429,7 +426,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); - __le32 iv[16]; + __le32 iv[4]; u32 tmp_sa[SA_AES128_CCM_LEN + 4]; struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa; unsigned int len = req->cryptlen; @@ -602,106 +599,3 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req) { return crypto4xx_crypt_aes_gcm(req, true); } - -/* - * HASH SHA1 Functions - */ -static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, - unsigned int sa_len, - unsigned char ha, - unsigned char hm) -{ - struct crypto_alg *alg = tfm->__crt_alg; - struct crypto4xx_alg *my_alg; - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); - struct dynamic_sa_hash160 *sa; - int rc; - - my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg, - alg.u.hash); - ctx->dev = my_alg->dev; - - /* Create SA */ - if (ctx->sa_in || ctx->sa_out) - crypto4xx_free_sa(ctx); - - rc = crypto4xx_alloc_sa(ctx, sa_len); - if (rc) - return rc; - - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct crypto4xx_ctx)); - sa = (struct dynamic_sa_hash160 *)ctx->sa_in; - set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV, - SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, - SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL, - SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, - SA_OPCODE_HASH, DIR_INBOUND); - set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH, - CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, - SA_SEQ_MASK_OFF, SA_MC_ENABLE, - SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, - SA_NOT_COPY_HDR); - /* Need to zero hash digest in SA */ - memset(sa->inner_digest, 0, sizeof(sa->inner_digest)); - memset(sa->outer_digest, 0, sizeof(sa->outer_digest)); - - return 0; -} - -int crypto4xx_hash_init(struct ahash_request *req) -{ - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - int ds; - struct dynamic_sa_ctl *sa; - - sa = ctx->sa_in; - ds = crypto_ahash_digestsize( - __crypto_ahash_cast(req->base.tfm)); - sa->sa_command_0.bf.digest_len = ds >> 2; - sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; - - return 0; -} - -int crypto4xx_hash_update(struct ahash_request *req) -{ - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct scatterlist dst; - unsigned int ds = crypto_ahash_digestsize(ahash); - - sg_init_one(&dst, req->result, ds); - - return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, - req->nbytes, NULL, 0, ctx->sa_in, - ctx->sa_len, 0, NULL); -} - -int crypto4xx_hash_final(struct ahash_request *req) -{ - return 0; -} - -int crypto4xx_hash_digest(struct ahash_request *req) -{ - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct scatterlist dst; - unsigned int ds = crypto_ahash_digestsize(ahash); - - sg_init_one(&dst, req->result, ds); - - return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, - req->nbytes, NULL, 0, ctx->sa_in, - ctx->sa_len, 0, NULL); -} - -/* - * SHA1 Algorithm - */ -int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) -{ - return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1, - SA_HASH_MODE_HASH); -} diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index ec3ccfa60445..8cdc66d520c9 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -485,18 +485,6 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, } } -static void crypto4xx_copy_digest_to_dst(void *dst, - struct pd_uinfo *pd_uinfo, - struct crypto4xx_ctx *ctx) -{ - struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; - - if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { - memcpy(dst, pd_uinfo->sr_va->save_digest, - SA_HASH_ALG_SHA1_DIGEST_SIZE); - } -} - static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo) { @@ -549,23 +537,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, skcipher_request_complete(req, 0); } -static void crypto4xx_ahash_done(struct crypto4xx_device *dev, - struct pd_uinfo *pd_uinfo) -{ - struct crypto4xx_ctx *ctx; - struct ahash_request *ahash_req; - - ahash_req = ahash_request_cast(pd_uinfo->async_req); - ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req)); - - crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx); - crypto4xx_ret_sg_desc(dev, pd_uinfo); - - if (pd_uinfo->state & PD_ENTRY_BUSY) - ahash_request_complete(ahash_req, -EINPROGRESS); - ahash_request_complete(ahash_req, 0); -} - static void crypto4xx_aead_done(struct crypto4xx_device *dev, struct pd_uinfo *pd_uinfo, struct ce_pd *pd) @@ -642,9 +613,6 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) case CRYPTO_ALG_TYPE_AEAD: crypto4xx_aead_done(dev, pd_uinfo, pd); break; - case CRYPTO_ALG_TYPE_AHASH: - crypto4xx_ahash_done(dev, pd_uinfo); - break; } } @@ -676,7 +644,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, struct scatterlist *src, struct scatterlist *dst, const unsigned int datalen, - const __le32 *iv, const u32 iv_len, + const void *iv, const u32 iv_len, const struct dynamic_sa_ctl *req_sa, const unsigned int sa_len, const unsigned int assoclen, @@ -912,8 +880,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, } pd->pd_ctl.w = PD_CTL_HOST_READY | - ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) || - (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? + ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? PD_CTL_HASH_FINAL : 0); pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen); pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0); @@ -1019,10 +986,6 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, rc = crypto_register_aead(&alg->alg.u.aead); break; - case CRYPTO_ALG_TYPE_AHASH: - rc = crypto_register_ahash(&alg->alg.u.hash); - break; - case CRYPTO_ALG_TYPE_RNG: rc = crypto_register_rng(&alg->alg.u.rng); break; @@ -1048,10 +1011,6 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { list_del(&alg->entry); switch (alg->alg.type) { - case CRYPTO_ALG_TYPE_AHASH: - crypto_unregister_ahash(&alg->alg.u.hash); - break; - case CRYPTO_ALG_TYPE_AEAD: crypto_unregister_aead(&alg->alg.u.aead); break; diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index 3adcc5e65694..ee36630c670f 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -16,7 +16,6 @@ #include <linux/ratelimit.h> #include <linux/mutex.h> #include <linux/scatterlist.h> -#include <crypto/internal/hash.h> #include <crypto/internal/aead.h> #include <crypto/internal/rng.h> #include <crypto/internal/skcipher.h> @@ -135,7 +134,6 @@ struct crypto4xx_alg_common { u32 type; union { struct skcipher_alg cipher; - struct ahash_alg hash; struct aead_alg aead; struct rng_alg rng; } u; @@ -147,6 +145,12 @@ struct crypto4xx_alg { struct crypto4xx_device *dev; }; +#if IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION >= 120000 +#define BUILD_PD_ACCESS __attribute__((access(read_only, 6, 7))) +#else +#define BUILD_PD_ACCESS +#endif + int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); int crypto4xx_build_pd(struct crypto_async_request *req, @@ -154,11 +158,11 @@ int crypto4xx_build_pd(struct crypto_async_request *req, struct scatterlist *src, struct scatterlist *dst, const unsigned int datalen, - const __le32 *iv, const u32 iv_len, + const void *iv, const u32 iv_len, const struct dynamic_sa_ctl *sa, const unsigned int sa_len, const unsigned int assoclen, - struct scatterlist *dst_tmp); + struct scatterlist *dst_tmp) BUILD_PD_ACCESS; int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen); int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher, @@ -177,11 +181,6 @@ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req); int crypto4xx_decrypt_noiv_block(struct skcipher_request *req); int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); -int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); -int crypto4xx_hash_digest(struct ahash_request *req); -int crypto4xx_hash_final(struct ahash_request *req); -int crypto4xx_hash_update(struct ahash_request *req); -int crypto4xx_hash_init(struct ahash_request *req); /* * Note: Only use this function to copy items that is word aligned. diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 14bf86957d31..27c5d000b4b2 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -1743,7 +1743,8 @@ static struct skcipher_alg aes_xts_alg = { .base.cra_driver_name = "atmel-xts-aes", .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx), - .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -2220,7 +2221,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) static void atmel_aes_crypto_alg_init(struct crypto_alg *alg) { - alg->cra_flags |= CRYPTO_ALG_ASYNC; + alg->cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->cra_alignmask = 0xf; alg->cra_priority = ATMEL_AES_PRIORITY; alg->cra_module = THIS_MODULE; diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 67a170608566..2cc36da163e8 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1254,7 +1254,8 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm) static void atmel_sha_alg_init(struct ahash_alg *alg) { alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; - alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx); alg->halg.base.cra_module = THIS_MODULE; alg->halg.base.cra_init = atmel_sha_cra_init; @@ -2041,7 +2042,8 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm) static void atmel_sha_hmac_alg_init(struct ahash_alg *alg) { alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; - alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; + alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY; alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx); alg->halg.base.cra_module = THIS_MODULE; alg->halg.base.cra_init = atmel_sha_hmac_cra_init; diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index de9717e221e4..098f5532f389 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -785,7 +785,7 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm) static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg) { alg->base.cra_priority = ATMEL_TDES_PRIORITY; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx); alg->base.cra_module = THIS_MODULE; diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index d4b39184dbdb..38ff931059b4 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -573,6 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX7*", .data = &caam_imx7_data }, { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, { .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data }, + { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data }, { .soc_id = "VF*", .data = &caam_vf610_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile index 4679c06b611f..75227c587ed0 100644 --- a/drivers/crypto/cavium/Makefile +++ b/drivers/crypto/cavium/Makefile @@ -2,4 +2,5 @@ # # Makefile for Cavium crypto device drivers # -obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/ +obj-$(CONFIG_CRYPTO_DEV_CPT) += cpt/ +obj-$(CONFIG_CRYPTO_DEV_NITROX) += nitrox/ diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile deleted file mode 100644 index 020d189d793d..000000000000 --- a/drivers/crypto/cavium/zip/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Makefile for Cavium's ZIP Driver. -# - -obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o -thunderx_zip-y := zip_main.o \ - zip_device.o \ - zip_crypto.o \ - zip_mem.o \ - zip_deflate.o \ - zip_inflate.o diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h deleted file mode 100644 index 54f6fb054119..000000000000 --- a/drivers/crypto/cavium/zip/common.h +++ /dev/null @@ -1,222 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __COMMON_H__ -#define __COMMON_H__ - -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/seq_file.h> -#include <linux/string.h> -#include <linux/types.h> - -/* Device specific zlib function definitions */ -#include "zip_device.h" - -/* ZIP device definitions */ -#include "zip_main.h" - -/* ZIP memory allocation/deallocation related definitions */ -#include "zip_mem.h" - -/* Device specific structure definitions */ -#include "zip_regs.h" - -#define ZIP_ERROR -1 - -#define ZIP_FLUSH_FINISH 4 - -#define RAW_FORMAT 0 /* for rawpipe */ -#define ZLIB_FORMAT 1 /* for zpipe */ -#define GZIP_FORMAT 2 /* for gzpipe */ -#define LZS_FORMAT 3 /* for lzspipe */ - -/* Max number of ZIP devices supported */ -#define MAX_ZIP_DEVICES 2 - -/* Configures the number of zip queues to be used */ -#define ZIP_NUM_QUEUES 2 - -#define DYNAMIC_STOP_EXCESS 1024 - -/* Maximum buffer sizes in direct mode */ -#define MAX_INPUT_BUFFER_SIZE (64 * 1024) -#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024) - -/** - * struct zip_operation - common data structure for comp and decomp operations - * @input: Next input byte is read from here - * @output: Next output byte written here - * @ctx_addr: Inflate context buffer address - * @history: Pointer to the history buffer - * @input_len: Number of bytes available at next_in - * @input_total_len: Total number of input bytes read - * @output_len: Remaining free space at next_out - * @output_total_len: Total number of bytes output so far - * @csum: Checksum value of the uncompressed data - * @flush: Flush flag - * @format: Format (depends on stream's wrap) - * @speed: Speed depends on stream's level - * @ccode: Compression code ( stream's strategy) - * @lzs_flag: Flag for LZS support - * @begin_file: Beginning of file indication for inflate - * @history_len: Size of the history data - * @end_file: Ending of the file indication for inflate - * @compcode: Completion status of the ZIP invocation - * @bytes_read: Input bytes read in current instruction - * @bits_processed: Total bits processed for entire file - * @sizeofptr: To distinguish between ILP32 and LP64 - * @sizeofzops: Optional just for padding - * - * This structure is used to maintain the required meta data for the - * comp and decomp operations. - */ -struct zip_operation { - u8 *input; - u8 *output; - u64 ctx_addr; - u64 history; - - u32 input_len; - u32 input_total_len; - - u32 output_len; - u32 output_total_len; - - u32 csum; - u32 flush; - - u32 format; - u32 speed; - u32 ccode; - u32 lzs_flag; - - u32 begin_file; - u32 history_len; - - u32 end_file; - u32 compcode; - u32 bytes_read; - u32 bits_processed; - - u32 sizeofptr; - u32 sizeofzops; -}; - -static inline int zip_poll_result(union zip_zres_s *result) -{ - int retries = 1000; - - while (!result->s.compcode) { - if (!--retries) { - pr_err("ZIP ERR: request timed out"); - return -ETIMEDOUT; - } - udelay(10); - /* - * Force re-reading of compcode which is updated - * by the ZIP coprocessor. - */ - rmb(); - } - return 0; -} - -/* error messages */ -#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \ - fmt "\n", __func__, __LINE__, ## args) - -#ifdef MSG_ENABLE -/* Enable all messages */ -#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args) -#else -#define zip_msg(fmt, args...) -#endif - -#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE) - -#ifdef DEBUG_LEVEL - -#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \ - strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__) - -#if DEBUG_LEVEL >= 4 - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \ - fmt "\n", FILE_NAME, __func__, __LINE__, ## args) - -#elif DEBUG_LEVEL >= 3 - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \ - fmt "\n", FILE_NAME, __func__, __LINE__, ## args) - -#elif DEBUG_LEVEL >= 2 - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \ - fmt "\n", __func__, __LINE__, ## args) - -#else - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args) - -#endif /* DEBUG LEVEL >=4 */ - -#else - -#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args) - -#endif /* DEBUG_LEVEL */ -#else - -#define zip_dbg(fmt, args...) - -#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/ - -#endif diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c deleted file mode 100644 index 02e87f2d50db..000000000000 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ /dev/null @@ -1,261 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include "zip_crypto.h" - -static void zip_static_init_zip_ops(struct zip_operation *zip_ops, - int lzs_flag) -{ - zip_ops->flush = ZIP_FLUSH_FINISH; - - /* equivalent to level 6 of opensource zlib */ - zip_ops->speed = 1; - - if (!lzs_flag) { - zip_ops->ccode = 0; /* Auto Huffman */ - zip_ops->lzs_flag = 0; - zip_ops->format = ZLIB_FORMAT; - } else { - zip_ops->ccode = 3; /* LZS Encoding */ - zip_ops->lzs_flag = 1; - zip_ops->format = LZS_FORMAT; - } - zip_ops->begin_file = 1; - zip_ops->history_len = 0; - zip_ops->end_file = 1; - zip_ops->compcode = 0; - zip_ops->csum = 1; /* Adler checksum desired */ -} - -static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag) -{ - struct zip_operation *comp_ctx = &zip_ctx->zip_comp; - struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp; - - zip_static_init_zip_ops(comp_ctx, lzs_flag); - zip_static_init_zip_ops(decomp_ctx, lzs_flag); - - comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE); - if (!comp_ctx->input) - return -ENOMEM; - - comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE); - if (!comp_ctx->output) - goto err_comp_input; - - decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE); - if (!decomp_ctx->input) - goto err_comp_output; - - decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE); - if (!decomp_ctx->output) - goto err_decomp_input; - - return 0; - -err_decomp_input: - zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE); - -err_comp_output: - zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE); - -err_comp_input: - zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE); - - return -ENOMEM; -} - -static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx) -{ - struct zip_operation *comp_ctx = &zip_ctx->zip_comp; - struct zip_operation *dec_ctx = &zip_ctx->zip_decomp; - - zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE); - zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE); - - zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE); - zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE); -} - -static int zip_compress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, - struct zip_kernel_ctx *zip_ctx) -{ - struct zip_operation *zip_ops = NULL; - struct zip_state *zip_state; - struct zip_device *zip = NULL; - int ret; - - if (!zip_ctx || !src || !dst || !dlen) - return -ENOMEM; - - zip = zip_get_device(zip_get_node_id()); - if (!zip) - return -ENODEV; - - zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); - if (!zip_state) - return -ENOMEM; - - zip_ops = &zip_ctx->zip_comp; - - zip_ops->input_len = slen; - zip_ops->output_len = *dlen; - memcpy(zip_ops->input, src, slen); - - ret = zip_deflate(zip_ops, zip_state, zip); - - if (!ret) { - *dlen = zip_ops->output_len; - memcpy(dst, zip_ops->output, *dlen); - } - kfree(zip_state); - return ret; -} - -static int zip_decompress(const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, - struct zip_kernel_ctx *zip_ctx) -{ - struct zip_operation *zip_ops = NULL; - struct zip_state *zip_state; - struct zip_device *zip = NULL; - int ret; - - if (!zip_ctx || !src || !dst || !dlen) - return -ENOMEM; - - zip = zip_get_device(zip_get_node_id()); - if (!zip) - return -ENODEV; - - zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); - if (!zip_state) - return -ENOMEM; - - zip_ops = &zip_ctx->zip_decomp; - memcpy(zip_ops->input, src, slen); - - /* Work around for a bug in zlib which needs an extra bytes sometimes */ - if (zip_ops->ccode != 3) /* Not LZS Encoding */ - zip_ops->input[slen++] = 0; - - zip_ops->input_len = slen; - zip_ops->output_len = *dlen; - - ret = zip_inflate(zip_ops, zip_state, zip); - - if (!ret) { - *dlen = zip_ops->output_len; - memcpy(dst, zip_ops->output, *dlen); - } - kfree(zip_state); - return ret; -} - -/* SCOMP framework start */ -void *zip_alloc_scomp_ctx_deflate(void) -{ - int ret; - struct zip_kernel_ctx *zip_ctx; - - zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL); - if (!zip_ctx) - return ERR_PTR(-ENOMEM); - - ret = zip_ctx_init(zip_ctx, 0); - - if (ret) { - kfree_sensitive(zip_ctx); - return ERR_PTR(ret); - } - - return zip_ctx; -} - -void *zip_alloc_scomp_ctx_lzs(void) -{ - int ret; - struct zip_kernel_ctx *zip_ctx; - - zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL); - if (!zip_ctx) - return ERR_PTR(-ENOMEM); - - ret = zip_ctx_init(zip_ctx, 1); - - if (ret) { - kfree_sensitive(zip_ctx); - return ERR_PTR(ret); - } - - return zip_ctx; -} - -void zip_free_scomp_ctx(void *ctx) -{ - struct zip_kernel_ctx *zip_ctx = ctx; - - zip_ctx_exit(zip_ctx); - kfree_sensitive(zip_ctx); -} - -int zip_scomp_compress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) -{ - struct zip_kernel_ctx *zip_ctx = ctx; - - return zip_compress(src, slen, dst, dlen, zip_ctx); -} - -int zip_scomp_decompress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx) -{ - struct zip_kernel_ctx *zip_ctx = ctx; - - return zip_decompress(src, slen, dst, dlen, zip_ctx); -} /* SCOMP framework end */ diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h deleted file mode 100644 index 10899ece2d1f..000000000000 --- a/drivers/crypto/cavium/zip/zip_crypto.h +++ /dev/null @@ -1,68 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_CRYPTO_H__ -#define __ZIP_CRYPTO_H__ - -#include <crypto/internal/scompress.h> -#include "common.h" -#include "zip_deflate.h" -#include "zip_inflate.h" - -struct zip_kernel_ctx { - struct zip_operation zip_comp; - struct zip_operation zip_decomp; -}; - -void *zip_alloc_scomp_ctx_deflate(void); -void *zip_alloc_scomp_ctx_lzs(void); -void zip_free_scomp_ctx(void *zip_ctx); -int zip_scomp_compress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx); -int zip_scomp_decompress(struct crypto_scomp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen, void *ctx); -#endif diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c deleted file mode 100644 index d7133f857d67..000000000000 --- a/drivers/crypto/cavium/zip/zip_deflate.c +++ /dev/null @@ -1,200 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include <linux/delay.h> -#include <linux/sched.h> - -#include "common.h" -#include "zip_deflate.h" - -/* Prepares the deflate zip command */ -static int prepare_zip_command(struct zip_operation *zip_ops, - struct zip_state *s, union zip_inst_s *zip_cmd) -{ - union zip_zres_s *result_ptr = &s->result; - - memset(zip_cmd, 0, sizeof(s->zip_cmd)); - memset(result_ptr, 0, sizeof(s->result)); - - /* IWORD #0 */ - /* History gather */ - zip_cmd->s.hg = 0; - /* compression enable = 1 for deflate */ - zip_cmd->s.ce = 1; - /* sf (sync flush) */ - zip_cmd->s.sf = 1; - /* ef (end of file) */ - if (zip_ops->flush == ZIP_FLUSH_FINISH) { - zip_cmd->s.ef = 1; - zip_cmd->s.sf = 0; - } - - zip_cmd->s.cc = zip_ops->ccode; - /* ss (compression speed/storage) */ - zip_cmd->s.ss = zip_ops->speed; - - /* IWORD #1 */ - /* adler checksum */ - zip_cmd->s.adlercrc32 = zip_ops->csum; - zip_cmd->s.historylength = zip_ops->history_len; - zip_cmd->s.dg = 0; - - /* IWORD # 6 and 7 - compression input/history pointer */ - zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input); - zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len + - zip_ops->history_len); - zip_cmd->s.ds = 0; - - /* IWORD # 8 and 9 - Output pointer */ - zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output); - zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len; - /* maximum number of output-stream bytes that can be written */ - zip_cmd->s.totaloutputlength = zip_ops->output_len; - - /* IWORD # 10 and 11 - Result pointer */ - zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr); - /* Clearing completion code */ - result_ptr->s.compcode = 0; - - return 0; -} - -/** - * zip_deflate - API to offload deflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to zip device structure - * - * This function prepares the zip deflate command and submits it to the zip - * engine for processing. - * - * Return: 0 if successful or error code - */ -int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev) -{ - union zip_inst_s *zip_cmd = &s->zip_cmd; - union zip_zres_s *result_ptr = &s->result; - u32 queue; - - /* Prepares zip command based on the input parameters */ - prepare_zip_command(zip_ops, s, zip_cmd); - - atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes); - /* Loads zip command into command queues and rings door bell */ - queue = zip_load_instr(zip_cmd, zip_dev); - - /* Stats update for compression requests submitted */ - atomic64_inc(&zip_dev->stats.comp_req_submit); - - /* Wait for completion or error */ - zip_poll_result(result_ptr); - - /* Stats update for compression requests completed */ - atomic64_inc(&zip_dev->stats.comp_req_complete); - - zip_ops->compcode = result_ptr->s.compcode; - switch (zip_ops->compcode) { - case ZIP_CMD_NOTDONE: - zip_dbg("Zip instruction not yet completed"); - return ZIP_ERROR; - - case ZIP_CMD_SUCCESS: - zip_dbg("Zip instruction completed successfully"); - zip_update_cmd_bufs(zip_dev, queue); - break; - - case ZIP_CMD_DTRUNC: - zip_dbg("Output Truncate error"); - /* Returning ZIP_ERROR to avoid copy to user */ - return ZIP_ERROR; - - default: - zip_err("Zip instruction failed. Code:%d", zip_ops->compcode); - return ZIP_ERROR; - } - - /* Update the CRC depending on the format */ - switch (zip_ops->format) { - case RAW_FORMAT: - zip_dbg("RAW Format: %d ", zip_ops->format); - /* Get checksum from engine, need to feed it again */ - zip_ops->csum = result_ptr->s.adler32; - break; - - case ZLIB_FORMAT: - zip_dbg("ZLIB Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.adler32; - break; - - case GZIP_FORMAT: - zip_dbg("GZIP Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.crc32; - break; - - case LZS_FORMAT: - zip_dbg("LZS Format: %d ", zip_ops->format); - break; - - default: - zip_err("Unknown Format:%d\n", zip_ops->format); - } - - atomic64_add(result_ptr->s.totalbyteswritten, - &zip_dev->stats.comp_out_bytes); - - /* Update output_len */ - if (zip_ops->output_len < result_ptr->s.totalbyteswritten) { - /* Dynamic stop && strm->output_len < zipconstants[onfsize] */ - zip_err("output_len (%d) < total bytes written(%d)\n", - zip_ops->output_len, result_ptr->s.totalbyteswritten); - zip_ops->output_len = 0; - - } else { - zip_ops->output_len = result_ptr->s.totalbyteswritten; - } - - return 0; -} diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h deleted file mode 100644 index 1d32e76edc4d..000000000000 --- a/drivers/crypto/cavium/zip/zip_deflate.h +++ /dev/null @@ -1,62 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_DEFLATE_H__ -#define __ZIP_DEFLATE_H__ - -/** - * zip_deflate - API to offload deflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to the structure representing zip device - * - * This function prepares the zip deflate command and submits it to the zip - * engine by ringing the doorbell. - * - * Return: 0 if successful or error code - */ -int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev); -#endif diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c deleted file mode 100644 index f174ec29ed69..000000000000 --- a/drivers/crypto/cavium/zip/zip_device.c +++ /dev/null @@ -1,202 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include "common.h" -#include "zip_deflate.h" - -/** - * zip_cmd_queue_consumed - Calculates the space consumed in the command queue. - * - * @zip_dev: Pointer to zip device structure - * @queue: Queue number - * - * Return: Bytes consumed in the command queue buffer. - */ -static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) -{ - return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * - sizeof(u64 *)); -} - -/** - * zip_load_instr - Submits the instruction into the ZIP command queue - * @instr: Pointer to the instruction to be submitted - * @zip_dev: Pointer to ZIP device structure to which the instruction is to - * be submitted - * - * This function copies the ZIP instruction to the command queue and rings the - * doorbell to notify the engine of the instruction submission. The command - * queue is maintained in a circular fashion. When there is space for exactly - * one instruction in the queue, next chunk pointer of the queue is made to - * point to the head of the queue, thus maintaining a circular queue. - * - * Return: Queue number to which the instruction was submitted - */ -u32 zip_load_instr(union zip_inst_s *instr, - struct zip_device *zip_dev) -{ - union zip_quex_doorbell dbell; - u32 queue = 0; - u32 consumed = 0; - u64 *ncb_ptr = NULL; - union zip_nptr_s ncp; - - /* - * Distribute the instructions between the enabled queues based on - * the CPU id. - */ - if (raw_smp_processor_id() % 2 == 0) - queue = 0; - else - queue = 1; - - zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue); - - /* Take cmd buffer lock */ - spin_lock(&zip_dev->iq[queue].lock); - - /* - * Command Queue implementation - * 1. If there is place for new instructions, push the cmd at sw_head. - * 2. If there is place for exactly one instruction, push the new cmd - * at the sw_head. Make sw_head point to the sw_tail to make it - * circular. Write sw_head's physical address to the "Next-Chunk - * Buffer Ptr" to make it cmd_hw_tail. - * 3. Ring the door bell. - */ - zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); - zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); - - consumed = zip_cmd_queue_consumed(zip_dev, queue); - /* Check if there is space to push just one cmd */ - if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) { - zip_dbg("Cmd queue space available for single command"); - /* Space for one cmd, pust it and make it circular queue */ - memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, - sizeof(union zip_inst_s)); - zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ - - /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */ - ncb_ptr = zip_dev->iq[queue].sw_head; - - zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx", - ncb_ptr, zip_dev->iq[queue].sw_head - 16); - - /* Using Circular command queue */ - zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail; - /* Mark this buffer for free */ - zip_dev->iq[queue].free_flag = 1; - - /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */ - ncp.u_reg64 = 0ull; - ncp.s.addr = __pa(zip_dev->iq[queue].sw_head); - *ncb_ptr = ncp.u_reg64; - zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx", - *ncb_ptr, __pa(zip_dev->iq[queue].sw_head)); - - zip_dev->iq[queue].pend_cnt++; - - } else { - zip_dbg("Enough space is available for commands"); - /* Push this cmd to cmd queue buffer */ - memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, - sizeof(union zip_inst_s)); - zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ - - zip_dev->iq[queue].pend_cnt++; - } - zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", - zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, - zip_dev->iq[queue].hw_tail); - - zip_dbg(" Pushed the new cmd : pend_cnt : %d", - zip_dev->iq[queue].pend_cnt); - - /* Ring the doorbell */ - dbell.u_reg64 = 0ull; - dbell.s.dbell_cnt = 1; - zip_reg_write(dbell.u_reg64, - (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue))); - - /* Unlock cmd buffer lock */ - spin_unlock(&zip_dev->iq[queue].lock); - - return queue; -} - -/** - * zip_update_cmd_bufs - Updates the queue statistics after posting the - * instruction - * @zip_dev: Pointer to zip device structure - * @queue: Queue number - */ -void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue) -{ - /* Take cmd buffer lock */ - spin_lock(&zip_dev->iq[queue].lock); - - /* Check if the previous buffer can be freed */ - if (zip_dev->iq[queue].free_flag == 1) { - zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail"); - /* Reset the free flag */ - zip_dev->iq[queue].free_flag = 0; - - /* Point the hw_tail to start of the new chunk buffer */ - zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head; - } else { - zip_dbg("Free flag not set. increment hw tail"); - zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */ - } - - zip_dev->iq[queue].done_cnt++; - zip_dev->iq[queue].pend_cnt--; - - zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", - zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, - zip_dev->iq[queue].hw_tail); - zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt); - - spin_unlock(&zip_dev->iq[queue].lock); -} diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h deleted file mode 100644 index 9e18b3b93d38..000000000000 --- a/drivers/crypto/cavium/zip/zip_device.h +++ /dev/null @@ -1,108 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_DEVICE_H__ -#define __ZIP_DEVICE_H__ - -#include <linux/types.h> -#include "zip_main.h" - -struct sg_info { - /* - * Pointer to the input data when scatter_gather == 0 and - * pointer to the input gather list buffer when scatter_gather == 1 - */ - union zip_zptr_s *gather; - - /* - * Pointer to the output data when scatter_gather == 0 and - * pointer to the output scatter list buffer when scatter_gather == 1 - */ - union zip_zptr_s *scatter; - - /* - * Holds size of the output buffer pointed by scatter list - * when scatter_gather == 1 - */ - u64 scatter_buf_size; - - /* for gather data */ - u64 gather_enable; - - /* for scatter data */ - u64 scatter_enable; - - /* Number of gather list pointers for gather data */ - u32 gbuf_cnt; - - /* Number of scatter list pointers for scatter data */ - u32 sbuf_cnt; - - /* Buffers allocation state */ - u8 alloc_state; -}; - -/** - * struct zip_state - Structure representing the required information related - * to a command - * @zip_cmd: Pointer to zip instruction structure - * @result: Pointer to zip result structure - * @ctx: Context pointer for inflate - * @history: Decompression history pointer - * @sginfo: Scatter-gather info structure - */ -struct zip_state { - union zip_inst_s zip_cmd; - union zip_zres_s result; - union zip_zptr_s *ctx; - union zip_zptr_s *history; - struct sg_info sginfo; -}; - -#define ZIP_CONTEXT_SIZE 2048 -#define ZIP_INFLATE_HISTORY_SIZE 32768 -#define ZIP_DEFLATE_HISTORY_SIZE 32768 - -#endif diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c deleted file mode 100644 index 7e0d73e2f89e..000000000000 --- a/drivers/crypto/cavium/zip/zip_inflate.c +++ /dev/null @@ -1,223 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include <linux/delay.h> -#include <linux/sched.h> - -#include "common.h" -#include "zip_inflate.h" - -static int prepare_inflate_zcmd(struct zip_operation *zip_ops, - struct zip_state *s, union zip_inst_s *zip_cmd) -{ - union zip_zres_s *result_ptr = &s->result; - - memset(zip_cmd, 0, sizeof(s->zip_cmd)); - memset(result_ptr, 0, sizeof(s->result)); - - /* IWORD#0 */ - - /* Decompression History Gather list - no gather list */ - zip_cmd->s.hg = 0; - /* For decompression, CE must be 0x0. */ - zip_cmd->s.ce = 0; - /* For decompression, SS must be 0x0. */ - zip_cmd->s.ss = 0; - /* For decompression, SF should always be set. */ - zip_cmd->s.sf = 1; - - /* Begin File */ - if (zip_ops->begin_file == 0) - zip_cmd->s.bf = 0; - else - zip_cmd->s.bf = 1; - - zip_cmd->s.ef = 1; - /* 0: for Deflate decompression, 3: for LZS decompression */ - zip_cmd->s.cc = zip_ops->ccode; - - /* IWORD #1*/ - - /* adler checksum */ - zip_cmd->s.adlercrc32 = zip_ops->csum; - - /* - * HISTORYLENGTH must be 0x0 for any ZIP decompress operation. - * History data is added to a decompression operation via IWORD3. - */ - zip_cmd->s.historylength = 0; - zip_cmd->s.ds = 0; - - /* IWORD # 8 and 9 - Output pointer */ - zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output); - zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len; - - /* Maximum number of output-stream bytes that can be written */ - zip_cmd->s.totaloutputlength = zip_ops->output_len; - - zip_dbg("Data Direct Input case "); - - /* IWORD # 6 and 7 - input pointer */ - zip_cmd->s.dg = 0; - zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input); - zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len; - - /* IWORD # 10 and 11 - Result pointer */ - zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr); - - /* Clearing completion code */ - result_ptr->s.compcode = 0; - - /* Returning 0 for time being.*/ - return 0; -} - -/** - * zip_inflate - API to offload inflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to zip device structure - * - * This function prepares the zip inflate command and submits it to the zip - * engine for processing. - * - * Return: 0 if successful or error code - */ -int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev) -{ - union zip_inst_s *zip_cmd = &s->zip_cmd; - union zip_zres_s *result_ptr = &s->result; - u32 queue; - - /* Prepare inflate zip command */ - prepare_inflate_zcmd(zip_ops, s, zip_cmd); - - atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes); - - /* Load inflate command to zip queue and ring the doorbell */ - queue = zip_load_instr(zip_cmd, zip_dev); - - /* Decompression requests submitted stats update */ - atomic64_inc(&zip_dev->stats.decomp_req_submit); - - /* Wait for completion or error */ - zip_poll_result(result_ptr); - - /* Decompression requests completed stats update */ - atomic64_inc(&zip_dev->stats.decomp_req_complete); - - zip_ops->compcode = result_ptr->s.compcode; - switch (zip_ops->compcode) { - case ZIP_CMD_NOTDONE: - zip_dbg("Zip Instruction not yet completed\n"); - return ZIP_ERROR; - - case ZIP_CMD_SUCCESS: - zip_dbg("Zip Instruction completed successfully\n"); - break; - - case ZIP_CMD_DYNAMIC_STOP: - zip_dbg(" Dynamic stop Initiated\n"); - break; - - default: - zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode); - atomic64_inc(&zip_dev->stats.decomp_bad_reqs); - zip_update_cmd_bufs(zip_dev, queue); - return ZIP_ERROR; - } - - zip_update_cmd_bufs(zip_dev, queue); - - if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) && - (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP)) - result_ptr->s.ef = 1; - - zip_ops->csum = result_ptr->s.adler32; - - atomic64_add(result_ptr->s.totalbyteswritten, - &zip_dev->stats.decomp_out_bytes); - - if (zip_ops->output_len < result_ptr->s.totalbyteswritten) { - zip_err("output_len (%d) < total bytes written (%d)\n", - zip_ops->output_len, result_ptr->s.totalbyteswritten); - zip_ops->output_len = 0; - } else { - zip_ops->output_len = result_ptr->s.totalbyteswritten; - } - - zip_ops->bytes_read = result_ptr->s.totalbytesread; - zip_ops->bits_processed = result_ptr->s.totalbitsprocessed; - zip_ops->end_file = result_ptr->s.ef; - if (zip_ops->end_file) { - switch (zip_ops->format) { - case RAW_FORMAT: - zip_dbg("RAW Format: %d ", zip_ops->format); - /* Get checksum from engine */ - zip_ops->csum = result_ptr->s.adler32; - break; - - case ZLIB_FORMAT: - zip_dbg("ZLIB Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.adler32; - break; - - case GZIP_FORMAT: - zip_dbg("GZIP Format: %d ", zip_ops->format); - zip_ops->csum = result_ptr->s.crc32; - break; - - case LZS_FORMAT: - zip_dbg("LZS Format: %d ", zip_ops->format); - break; - - default: - zip_err("Format error:%d\n", zip_ops->format); - } - } - - return 0; -} diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h deleted file mode 100644 index 6b20f179978e..000000000000 --- a/drivers/crypto/cavium/zip/zip_inflate.h +++ /dev/null @@ -1,62 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_INFLATE_H__ -#define __ZIP_INFLATE_H__ - -/** - * zip_inflate - API to offload inflate operation to hardware - * @zip_ops: Pointer to zip operation structure - * @s: Pointer to the structure representing zip state - * @zip_dev: Pointer to the structure representing zip device - * - * This function prepares the zip inflate command and submits it to the zip - * engine for processing. - * - * Return: 0 if successful or error code - */ -int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, - struct zip_device *zip_dev); -#endif diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c deleted file mode 100644 index abd58de4343d..000000000000 --- a/drivers/crypto/cavium/zip/zip_main.c +++ /dev/null @@ -1,603 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include "common.h" -#include "zip_crypto.h" - -#define DRV_NAME "ThunderX-ZIP" - -static struct zip_device *zip_dev[MAX_ZIP_DEVICES]; - -static const struct pci_device_id zip_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) }, - { 0, } -}; - -static void zip_debugfs_init(void); -static void zip_debugfs_exit(void); -static int zip_register_compression_device(void); -static void zip_unregister_compression_device(void); - -void zip_reg_write(u64 val, u64 __iomem *addr) -{ - writeq(val, addr); -} - -u64 zip_reg_read(u64 __iomem *addr) -{ - return readq(addr); -} - -/* - * Allocates new ZIP device structure - * Returns zip_device pointer or NULL if cannot allocate memory for zip_device - */ -static struct zip_device *zip_alloc_device(struct pci_dev *pdev) -{ - struct zip_device *zip = NULL; - int idx; - - for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) { - if (!zip_dev[idx]) - break; - } - - /* To ensure that the index is within the limit */ - if (idx < MAX_ZIP_DEVICES) - zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL); - - if (!zip) - return NULL; - - zip_dev[idx] = zip; - zip->index = idx; - return zip; -} - -/** - * zip_get_device - Get ZIP device based on node id of cpu - * - * @node: Node id of the current cpu - * Return: Pointer to Zip device structure - */ -struct zip_device *zip_get_device(int node) -{ - if ((node < MAX_ZIP_DEVICES) && (node >= 0)) - return zip_dev[node]; - - zip_err("ZIP device not found for node id %d\n", node); - return NULL; -} - -/** - * zip_get_node_id - Get the node id of the current cpu - * - * Return: Node id of the current cpu - */ -int zip_get_node_id(void) -{ - return cpu_to_node(raw_smp_processor_id()); -} - -/* Initializes the ZIP h/w sub-system */ -static int zip_init_hw(struct zip_device *zip) -{ - union zip_cmd_ctl cmd_ctl; - union zip_constants constants; - union zip_que_ena que_ena; - union zip_quex_map que_map; - union zip_que_pri que_pri; - - union zip_quex_sbuf_addr que_sbuf_addr; - union zip_quex_sbuf_ctl que_sbuf_ctl; - - int q = 0; - - /* Enable the ZIP Engine(Core) Clock */ - cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL); - cmd_ctl.s.forceclk = 1; - zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL)); - - zip_msg("ZIP_CMD_CTL : 0x%016llx", - zip_reg_read(zip->reg_base + ZIP_CMD_CTL)); - - constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS); - zip->depth = constants.s.depth; - zip->onfsize = constants.s.onfsize; - zip->ctxsize = constants.s.ctxsize; - - zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx", - zip->depth, zip->onfsize, zip->ctxsize); - - /* - * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to - * have the correct buffer pointer and size configured for each - * instruction queue. - */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - que_sbuf_ctl.u_reg64 = 0ull; - que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64)); - que_sbuf_ctl.s.inst_be = 0; - que_sbuf_ctl.s.stream_id = 0; - zip_reg_write(que_sbuf_ctl.u_reg64, - (zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); - - zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q, - zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); - } - - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - memset(&zip->iq[q], 0x0, sizeof(struct zip_iq)); - - spin_lock_init(&zip->iq[q].lock); - - if (zip_cmd_qbuf_alloc(zip, q)) { - while (q != 0) { - q--; - zip_cmd_qbuf_free(zip, q); - } - return -ENOMEM; - } - - /* Initialize tail ptr to head */ - zip->iq[q].sw_tail = zip->iq[q].sw_head; - zip->iq[q].hw_tail = zip->iq[q].sw_head; - - /* Write the physical addr to register */ - que_sbuf_addr.u_reg64 = 0ull; - que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >> - ZIP_128B_ALIGN); - - zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q, - (u64)que_sbuf_addr.s.ptr); - - zip_reg_write(que_sbuf_addr.u_reg64, - (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); - - zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q, - zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); - - zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", - zip->iq[q].sw_head, zip->iq[q].sw_tail, - zip->iq[q].hw_tail); - zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr); - } - - /* - * Queue-to-ZIP core mapping - * If a queue is not mapped to a particular core, it is equivalent to - * the ZIP core being disabled. - */ - que_ena.u_reg64 = 0x0ull; - /* Enabling queues based on ZIP_NUM_QUEUES */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) - que_ena.s.ena |= (0x1 << q); - zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA)); - - zip_msg("QUE_ENA : 0x%016llx", - zip_reg_read(zip->reg_base + ZIP_QUE_ENA)); - - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - que_map.u_reg64 = 0ull; - /* Mapping each queue to two ZIP cores */ - que_map.s.zce = 0x3; - zip_reg_write(que_map.u_reg64, - (zip->reg_base + ZIP_QUEX_MAP(q))); - - zip_msg("QUE_MAP(%d) : 0x%016llx", q, - zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q))); - } - - que_pri.u_reg64 = 0ull; - for (q = 0; q < ZIP_NUM_QUEUES; q++) - que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */ - zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI)); - - zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI)); - - return 0; -} - -static void zip_reset(struct zip_device *zip) -{ - union zip_cmd_ctl cmd_ctl; - - cmd_ctl.u_reg64 = 0x0ull; - cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */ - zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL)); -} - -static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct device *dev = &pdev->dev; - struct zip_device *zip = NULL; - int err; - - zip = zip_alloc_device(pdev); - if (!zip) - return -ENOMEM; - - dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index, - pdev->vendor, pdev->device, dev_to_node(dev)); - - pci_set_drvdata(pdev, zip); - zip->pdev = pdev; - - err = pci_enable_device(pdev); - if (err) { - dev_err(dev, "Failed to enable PCI device"); - goto err_free_device; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - dev_err(dev, "PCI request regions failed 0x%x", err); - goto err_disable_device; - } - - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); - if (err) { - dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); - goto err_release_regions; - } - - /* MAP configuration registers */ - zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0); - if (!zip->reg_base) { - dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting"); - err = -ENOMEM; - goto err_release_regions; - } - - /* Initialize ZIP Hardware */ - err = zip_init_hw(zip); - if (err) - goto err_release_regions; - - /* Register with the Kernel Crypto Interface */ - err = zip_register_compression_device(); - if (err < 0) { - zip_err("ZIP: Kernel Crypto Registration failed\n"); - goto err_register; - } - - /* comp-decomp statistics are handled with debugfs interface */ - zip_debugfs_init(); - - return 0; - -err_register: - zip_reset(zip); - -err_release_regions: - if (zip->reg_base) - iounmap(zip->reg_base); - pci_release_regions(pdev); - -err_disable_device: - pci_disable_device(pdev); - -err_free_device: - pci_set_drvdata(pdev, NULL); - - /* Remove zip_dev from zip_device list, free the zip_device memory */ - zip_dev[zip->index] = NULL; - devm_kfree(dev, zip); - - return err; -} - -static void zip_remove(struct pci_dev *pdev) -{ - struct zip_device *zip = pci_get_drvdata(pdev); - int q = 0; - - if (!zip) - return; - - zip_debugfs_exit(); - - zip_unregister_compression_device(); - - if (zip->reg_base) { - zip_reset(zip); - iounmap(zip->reg_base); - } - - pci_release_regions(pdev); - pci_disable_device(pdev); - - /* - * Free Command Queue buffers. This free should be called for all - * the enabled Queues. - */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) - zip_cmd_qbuf_free(zip, q); - - pci_set_drvdata(pdev, NULL); - /* remove zip device from zip device list */ - zip_dev[zip->index] = NULL; -} - -/* PCI Sub-System Interface */ -static struct pci_driver zip_driver = { - .name = DRV_NAME, - .id_table = zip_id_table, - .probe = zip_probe, - .remove = zip_remove, -}; - -/* Kernel Crypto Subsystem Interface */ - -static struct scomp_alg zip_scomp_deflate = { - .alloc_ctx = zip_alloc_scomp_ctx_deflate, - .free_ctx = zip_free_scomp_ctx, - .compress = zip_scomp_compress, - .decompress = zip_scomp_decompress, - .base = { - .cra_name = "deflate", - .cra_driver_name = "deflate-scomp-cavium", - .cra_module = THIS_MODULE, - .cra_priority = 300, - } -}; - -static struct scomp_alg zip_scomp_lzs = { - .alloc_ctx = zip_alloc_scomp_ctx_lzs, - .free_ctx = zip_free_scomp_ctx, - .compress = zip_scomp_compress, - .decompress = zip_scomp_decompress, - .base = { - .cra_name = "lzs", - .cra_driver_name = "lzs-scomp-cavium", - .cra_module = THIS_MODULE, - .cra_priority = 300, - } -}; - -static int zip_register_compression_device(void) -{ - int ret; - - ret = crypto_register_scomp(&zip_scomp_deflate); - if (ret < 0) { - zip_err("Deflate scomp algorithm registration failed\n"); - return ret; - } - - ret = crypto_register_scomp(&zip_scomp_lzs); - if (ret < 0) { - zip_err("LZS scomp algorithm registration failed\n"); - goto err_unregister_scomp_deflate; - } - - return ret; - -err_unregister_scomp_deflate: - crypto_unregister_scomp(&zip_scomp_deflate); - - return ret; -} - -static void zip_unregister_compression_device(void) -{ - crypto_unregister_scomp(&zip_scomp_deflate); - crypto_unregister_scomp(&zip_scomp_lzs); -} - -/* - * debugfs functions - */ -#ifdef CONFIG_DEBUG_FS -#include <linux/debugfs.h> - -/* Displays ZIP device statistics */ -static int zip_stats_show(struct seq_file *s, void *unused) -{ - u64 val = 0ull; - u64 avg_chunk = 0ull, avg_cr = 0ull; - u32 q = 0; - - int index = 0; - struct zip_device *zip; - struct zip_stats *st; - - for (index = 0; index < MAX_ZIP_DEVICES; index++) { - u64 pending = 0; - - if (zip_dev[index]) { - zip = zip_dev[index]; - st = &zip->stats; - - /* Get all the pending requests */ - for (q = 0; q < ZIP_NUM_QUEUES; q++) { - val = zip_reg_read((zip->reg_base + - ZIP_DBG_QUEX_STA(q))); - pending += val >> 32 & 0xffffff; - } - - val = atomic64_read(&st->comp_req_complete); - avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; - - val = atomic64_read(&st->comp_out_bytes); - avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; - seq_printf(s, " ZIP Device %d Stats\n" - "-----------------------------------\n" - "Comp Req Submitted : \t%lld\n" - "Comp Req Completed : \t%lld\n" - "Compress In Bytes : \t%lld\n" - "Compressed Out Bytes : \t%lld\n" - "Average Chunk size : \t%llu\n" - "Average Compression ratio : \t%llu\n" - "Decomp Req Submitted : \t%lld\n" - "Decomp Req Completed : \t%lld\n" - "Decompress In Bytes : \t%lld\n" - "Decompressed Out Bytes : \t%lld\n" - "Decompress Bad requests : \t%lld\n" - "Pending Req : \t%lld\n" - "---------------------------------\n", - index, - (u64)atomic64_read(&st->comp_req_submit), - (u64)atomic64_read(&st->comp_req_complete), - (u64)atomic64_read(&st->comp_in_bytes), - (u64)atomic64_read(&st->comp_out_bytes), - avg_chunk, - avg_cr, - (u64)atomic64_read(&st->decomp_req_submit), - (u64)atomic64_read(&st->decomp_req_complete), - (u64)atomic64_read(&st->decomp_in_bytes), - (u64)atomic64_read(&st->decomp_out_bytes), - (u64)atomic64_read(&st->decomp_bad_reqs), - pending); - } - } - return 0; -} - -/* Clears stats data */ -static int zip_clear_show(struct seq_file *s, void *unused) -{ - int index = 0; - - for (index = 0; index < MAX_ZIP_DEVICES; index++) { - if (zip_dev[index]) { - memset(&zip_dev[index]->stats, 0, - sizeof(struct zip_stats)); - seq_printf(s, "Cleared stats for zip %d\n", index); - } - } - - return 0; -} - -static struct zip_registers zipregs[64] = { - {"ZIP_CMD_CTL ", 0x0000ull}, - {"ZIP_THROTTLE ", 0x0010ull}, - {"ZIP_CONSTANTS ", 0x00A0ull}, - {"ZIP_QUE0_MAP ", 0x1400ull}, - {"ZIP_QUE1_MAP ", 0x1408ull}, - {"ZIP_QUE_ENA ", 0x0500ull}, - {"ZIP_QUE_PRI ", 0x0508ull}, - {"ZIP_QUE0_DONE ", 0x2000ull}, - {"ZIP_QUE1_DONE ", 0x2008ull}, - {"ZIP_QUE0_DOORBELL ", 0x4000ull}, - {"ZIP_QUE1_DOORBELL ", 0x4008ull}, - {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull}, - {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull}, - {"ZIP_QUE0_SBUF_CTL ", 0x1200ull}, - {"ZIP_QUE1_SBUF_CTL ", 0x1208ull}, - { NULL, 0} -}; - -/* Prints registers' contents */ -static int zip_regs_show(struct seq_file *s, void *unused) -{ - u64 val = 0; - int i = 0, index = 0; - - for (index = 0; index < MAX_ZIP_DEVICES; index++) { - if (zip_dev[index]) { - seq_printf(s, "--------------------------------\n" - " ZIP Device %d Registers\n" - "--------------------------------\n", - index); - - i = 0; - - while (zipregs[i].reg_name) { - val = zip_reg_read((zip_dev[index]->reg_base + - zipregs[i].reg_offset)); - seq_printf(s, "%s: 0x%016llx\n", - zipregs[i].reg_name, val); - i++; - } - } - } - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(zip_stats); -DEFINE_SHOW_ATTRIBUTE(zip_clear); -DEFINE_SHOW_ATTRIBUTE(zip_regs); - -/* Root directory for thunderx_zip debugfs entry */ -static struct dentry *zip_debugfs_root; - -static void zip_debugfs_init(void) -{ - if (!debugfs_initialized()) - return; - - zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL); - - /* Creating files for entries inside thunderx_zip directory */ - debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL, - &zip_stats_fops); - - debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL, - &zip_clear_fops); - - debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL, - &zip_regs_fops); - -} - -static void zip_debugfs_exit(void) -{ - debugfs_remove_recursive(zip_debugfs_root); -} - -#else -static void __init zip_debugfs_init(void) { } -static void __exit zip_debugfs_exit(void) { } -#endif -/* debugfs - end */ - -module_pci_driver(zip_driver); - -MODULE_AUTHOR("Cavium Inc"); -MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver"); -MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(pci, zip_id_table); diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h deleted file mode 100644 index e1e4fa92ce80..000000000000 --- a/drivers/crypto/cavium/zip/zip_main.h +++ /dev/null @@ -1,120 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_MAIN_H__ -#define __ZIP_MAIN_H__ - -#include "zip_device.h" -#include "zip_regs.h" - -/* PCI device IDs */ -#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A - -/* ZIP device BARs */ -#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */ - -/* Maximum available zip queues */ -#define ZIP_MAX_NUM_QUEUES 8 - -#define ZIP_128B_ALIGN 7 - -/* Command queue buffer size */ -#define ZIP_CMD_QBUF_SIZE (8064 + 8) - -struct zip_registers { - char *reg_name; - u64 reg_offset; -}; - -/* ZIP Compression - Decompression stats */ -struct zip_stats { - atomic64_t comp_req_submit; - atomic64_t comp_req_complete; - atomic64_t decomp_req_submit; - atomic64_t decomp_req_complete; - atomic64_t comp_in_bytes; - atomic64_t comp_out_bytes; - atomic64_t decomp_in_bytes; - atomic64_t decomp_out_bytes; - atomic64_t decomp_bad_reqs; -}; - -/* ZIP Instruction Queue */ -struct zip_iq { - u64 *sw_head; - u64 *sw_tail; - u64 *hw_tail; - u64 done_cnt; - u64 pend_cnt; - u64 free_flag; - - /* ZIP IQ lock */ - spinlock_t lock; -}; - -/* ZIP Device */ -struct zip_device { - u32 index; - void __iomem *reg_base; - struct pci_dev *pdev; - - /* Different ZIP Constants */ - u64 depth; - u64 onfsize; - u64 ctxsize; - - struct zip_iq iq[ZIP_MAX_NUM_QUEUES]; - struct zip_stats stats; -}; - -/* Prototypes */ -struct zip_device *zip_get_device(int node_id); -int zip_get_node_id(void); -void zip_reg_write(u64 val, u64 __iomem *addr); -u64 zip_reg_read(u64 __iomem *addr); -void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue); -u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev); - -#endif /* ZIP_MAIN_H */ diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c deleted file mode 100644 index b3e0843a9169..000000000000 --- a/drivers/crypto/cavium/zip/zip_mem.c +++ /dev/null @@ -1,114 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#include <linux/types.h> -#include <linux/vmalloc.h> - -#include "common.h" - -/** - * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue - * @zip: Pointer to zip device structure - * @q: Queue number to allocate bufffer to - * Return: 0 if successful, -ENOMEM otherwise - */ -int zip_cmd_qbuf_alloc(struct zip_device *zip, int q) -{ - zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), - get_order(ZIP_CMD_QBUF_SIZE)); - - if (!zip->iq[q].sw_head) - return -ENOMEM; - - memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); - - zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); - return 0; -} - -/** - * zip_cmd_qbuf_free - Frees the cmd Queue buffer - * @zip: Pointer to zip device structure - * @q: Queue number to free buffer of - */ -void zip_cmd_qbuf_free(struct zip_device *zip, int q) -{ - zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); - - free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); -} - -/** - * zip_data_buf_alloc - Allocates memory for a data bufffer - * @size: Size of the buffer to allocate - * Returns: Pointer to the buffer allocated - */ -u8 *zip_data_buf_alloc(u64 size) -{ - u8 *ptr; - - ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA), - get_order(size)); - - if (!ptr) - return NULL; - - memset(ptr, 0, size); - - zip_dbg("Data buffer allocation success\n"); - return ptr; -} - -/** - * zip_data_buf_free - Frees the memory of a data buffer - * @ptr: Pointer to the buffer - * @size: Buffer size - */ -void zip_data_buf_free(u8 *ptr, u64 size) -{ - zip_dbg("Freeing data buffer 0x%lx\n", ptr); - - free_pages((u64)ptr, get_order(size)); -} diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h deleted file mode 100644 index f8f2f08c4a5c..000000000000 --- a/drivers/crypto/cavium/zip/zip_mem.h +++ /dev/null @@ -1,78 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_MEM_H__ -#define __ZIP_MEM_H__ - -/** - * zip_cmd_qbuf_free - Frees the cmd Queue buffer - * @zip: Pointer to zip device structure - * @q: Queue nmber to free buffer of - */ -void zip_cmd_qbuf_free(struct zip_device *zip, int q); - -/** - * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue - * @zip: Pointer to zip device structure - * @q: Queue number to allocate bufffer to - * Return: 0 if successful, 1 otherwise - */ -int zip_cmd_qbuf_alloc(struct zip_device *zip, int q); - -/** - * zip_data_buf_alloc - Allocates memory for a data bufffer - * @size: Size of the buffer to allocate - * Returns: Pointer to the buffer allocated - */ -u8 *zip_data_buf_alloc(u64 size); - -/** - * zip_data_buf_free - Frees the memory of a data buffer - * @ptr: Pointer to the buffer - * @size: Buffer size - */ -void zip_data_buf_free(u8 *ptr, u64 size); - -#endif diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h deleted file mode 100644 index 874e0236c87e..000000000000 --- a/drivers/crypto/cavium/zip/zip_regs.h +++ /dev/null @@ -1,1347 +0,0 @@ -/***********************license start************************************ - * Copyright (c) 2003-2017 Cavium, Inc. - * All rights reserved. - * - * License: one of 'Cavium License' or 'GNU General Public License Version 2' - * - * This file is provided under the terms of the Cavium License (see below) - * or under the terms of GNU General Public License, Version 2, as - * published by the Free Software Foundation. When using or redistributing - * this file, you may do so under either license. - * - * Cavium License: Redistribution and use in source and binary forms, with - * or without modification, are permitted provided that the following - * conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of Cavium Inc. nor the names of its contributors may be - * used to endorse or promote products derived from this software without - * specific prior written permission. - * - * This Software, including technical data, may be subject to U.S. export - * control laws, including the U.S. Export Administration Act and its - * associated regulations, and may be subject to export or import - * regulations in other countries. - * - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" - * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS - * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH - * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY - * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT - * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) - * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A - * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET - * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE - * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES - * WITH YOU. - ***********************license end**************************************/ - -#ifndef __ZIP_REGS_H__ -#define __ZIP_REGS_H__ - -/* - * Configuration and status register (CSR) address and type definitions for - * Cavium ZIP. - */ - -#include <linux/kern_levels.h> - -/* ZIP invocation result completion status codes */ -#define ZIP_CMD_NOTDONE 0x0 - -/* Successful completion. */ -#define ZIP_CMD_SUCCESS 0x1 - -/* Output truncated */ -#define ZIP_CMD_DTRUNC 0x2 - -/* Dynamic Stop */ -#define ZIP_CMD_DYNAMIC_STOP 0x3 - -/* Uncompress ran out of input data when IWORD0[EF] was set */ -#define ZIP_CMD_ITRUNC 0x4 - -/* Uncompress found the reserved block type 3 */ -#define ZIP_CMD_RBLOCK 0x5 - -/* - * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input. - */ -#define ZIP_CMD_NLEN 0x6 - -/* Uncompress found a bad code in the main Huffman codes. */ -#define ZIP_CMD_BADCODE 0x7 - -/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */ -#define ZIP_CMD_BADCODE2 0x8 - -/* Compress found a zero-length input. */ -#define ZIP_CMD_ZERO_LEN 0x9 - -/* The compress or decompress encountered an internal parity error. */ -#define ZIP_CMD_PARITY 0xA - -/* - * Uncompress found a string identifier that precedes the uncompressed data and - * decompression history. - */ -#define ZIP_CMD_FATAL 0xB - -/** - * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X - * interrupt vectors. - */ -enum zip_int_vec_e { - ZIP_INT_VEC_E_ECCE = 0x10, - ZIP_INT_VEC_E_FIFE = 0x11, - ZIP_INT_VEC_E_QUE0_DONE = 0x0, - ZIP_INT_VEC_E_QUE0_ERR = 0x8, - ZIP_INT_VEC_E_QUE1_DONE = 0x1, - ZIP_INT_VEC_E_QUE1_ERR = 0x9, - ZIP_INT_VEC_E_QUE2_DONE = 0x2, - ZIP_INT_VEC_E_QUE2_ERR = 0xa, - ZIP_INT_VEC_E_QUE3_DONE = 0x3, - ZIP_INT_VEC_E_QUE3_ERR = 0xb, - ZIP_INT_VEC_E_QUE4_DONE = 0x4, - ZIP_INT_VEC_E_QUE4_ERR = 0xc, - ZIP_INT_VEC_E_QUE5_DONE = 0x5, - ZIP_INT_VEC_E_QUE5_ERR = 0xd, - ZIP_INT_VEC_E_QUE6_DONE = 0x6, - ZIP_INT_VEC_E_QUE6_ERR = 0xe, - ZIP_INT_VEC_E_QUE7_DONE = 0x7, - ZIP_INT_VEC_E_QUE7_ERR = 0xf, - ZIP_INT_VEC_E_ENUM_LAST = 0x12, -}; - -/** - * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR. - * - * It is the generic format of pointers in ZIP_INST_S. - */ -union zip_zptr_addr_s { - u64 u_reg64; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 addr : 49; - u64 reserved_49_63 : 15; -#endif - } s; - -}; - -/** - * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL. - * - * It is the generic format of pointers in ZIP_INST_S. - */ -union zip_zptr_ctl_s { - u64 u_reg64; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_112_127 : 16; - u64 length : 16; - u64 reserved_67_95 : 29; - u64 fw : 1; - u64 nc : 1; - u64 data_be : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 data_be : 1; - u64 nc : 1; - u64 fw : 1; - u64 reserved_67_95 : 29; - u64 length : 16; - u64 reserved_112_127 : 16; -#endif - } s; -}; - -/** - * union zip_inst_s - ZIP Instruction Structure. - * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within - * the structure). - */ -union zip_inst_s { - u64 u_reg64[16]; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 doneint : 1; - u64 reserved_56_62 : 7; - u64 totaloutputlength : 24; - u64 reserved_27_31 : 5; - u64 exn : 3; - u64 reserved_23_23 : 1; - u64 exbits : 7; - u64 reserved_12_15 : 4; - u64 sf : 1; - u64 ss : 2; - u64 cc : 2; - u64 ef : 1; - u64 bf : 1; - u64 ce : 1; - u64 reserved_3_3 : 1; - u64 ds : 1; - u64 dg : 1; - u64 hg : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 hg : 1; - u64 dg : 1; - u64 ds : 1; - u64 reserved_3_3 : 1; - u64 ce : 1; - u64 bf : 1; - u64 ef : 1; - u64 cc : 2; - u64 ss : 2; - u64 sf : 1; - u64 reserved_12_15 : 4; - u64 exbits : 7; - u64 reserved_23_23 : 1; - u64 exn : 3; - u64 reserved_27_31 : 5; - u64 totaloutputlength : 24; - u64 reserved_56_62 : 7; - u64 doneint : 1; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 historylength : 16; - u64 reserved_96_111 : 16; - u64 adlercrc32 : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 adlercrc32 : 32; - u64 reserved_96_111 : 16; - u64 historylength : 16; -#endif - union zip_zptr_addr_s ctx_ptr_addr; - union zip_zptr_ctl_s ctx_ptr_ctl; - union zip_zptr_addr_s his_ptr_addr; - union zip_zptr_ctl_s his_ptr_ctl; - union zip_zptr_addr_s inp_ptr_addr; - union zip_zptr_ctl_s inp_ptr_ctl; - union zip_zptr_addr_s out_ptr_addr; - union zip_zptr_ctl_s out_ptr_ctl; - union zip_zptr_addr_s res_ptr_addr; - union zip_zptr_ctl_s res_ptr_ctl; -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_817_831 : 15; - u64 wq_ptr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 wq_ptr : 49; - u64 reserved_817_831 : 15; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_882_895 : 14; - u64 tt : 2; - u64 reserved_874_879 : 6; - u64 grp : 10; - u64 tag : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 tag : 32; - u64 grp : 10; - u64 reserved_874_879 : 6; - u64 tt : 2; - u64 reserved_882_895 : 14; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_896_959 : 64; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reserved_896_959 : 64; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_960_1023 : 64; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reserved_960_1023 : 64; -#endif - } s; -}; - -/** - * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR) - * Structure - * - * ZIP_NPTR structure is used to chain all the zip instruction buffers - * together. ZIP instruction buffers are managed (allocated and released) by - * the software. - */ -union zip_nptr_s { - u64 u_reg64; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 addr : 49; - u64 reserved_49_63 : 15; -#endif - } s; -}; - -/** - * union zip_zptr_s - ZIP Generic Pointer Structure. - * - * It is the generic format of pointers in ZIP_INST_S. - */ -union zip_zptr_s { - u64 u_reg64[2]; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 49; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 addr : 49; - u64 reserved_49_63 : 15; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_112_127 : 16; - u64 length : 16; - u64 reserved_67_95 : 29; - u64 fw : 1; - u64 nc : 1; - u64 data_be : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 data_be : 1; - u64 nc : 1; - u64 fw : 1; - u64 reserved_67_95 : 29; - u64 length : 16; - u64 reserved_112_127 : 16; -#endif - } s; -}; - -/** - * union zip_zres_s - ZIP Result Structure - * - * The ZIP coprocessor writes the result structure after it completes the - * invocation. The result structure is exactly 24 bytes, and each invocation of - * the ZIP coprocessor produces exactly one result structure. - */ -union zip_zres_s { - u64 u_reg64[3]; - struct { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 crc32 : 32; - u64 adler32 : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 adler32 : 32; - u64 crc32 : 32; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 totalbyteswritten : 32; - u64 totalbytesread : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 totalbytesread : 32; - u64 totalbyteswritten : 32; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) - u64 totalbitsprocessed : 32; - u64 doneint : 1; - u64 reserved_155_158 : 4; - u64 exn : 3; - u64 reserved_151_151 : 1; - u64 exbits : 7; - u64 reserved_137_143 : 7; - u64 ef : 1; - - volatile u64 compcode : 8; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - - volatile u64 compcode : 8; - u64 ef : 1; - u64 reserved_137_143 : 7; - u64 exbits : 7; - u64 reserved_151_151 : 1; - u64 exn : 3; - u64 reserved_155_158 : 4; - u64 doneint : 1; - u64 totalbitsprocessed : 32; -#endif - } s; -}; - -/** - * union zip_cmd_ctl - Structure representing the register that controls - * clock and reset. - */ -union zip_cmd_ctl { - u64 u_reg64; - struct zip_cmd_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_2_63 : 62; - u64 forceclk : 1; - u64 reset : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reset : 1; - u64 forceclk : 1; - u64 reserved_2_63 : 62; -#endif - } s; -}; - -#define ZIP_CMD_CTL 0x0ull - -/** - * union zip_constants - Data structure representing the register that contains - * all of the current implementation-related parameters of the zip core in this - * chip. - */ -union zip_constants { - u64 u_reg64; - struct zip_constants_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 nexec : 8; - u64 reserved_49_55 : 7; - u64 syncflush_capable : 1; - u64 depth : 16; - u64 onfsize : 12; - u64 ctxsize : 12; - u64 reserved_1_7 : 7; - u64 disabled : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 disabled : 1; - u64 reserved_1_7 : 7; - u64 ctxsize : 12; - u64 onfsize : 12; - u64 depth : 16; - u64 syncflush_capable : 1; - u64 reserved_49_55 : 7; - u64 nexec : 8; -#endif - } s; -}; - -#define ZIP_CONSTANTS 0x00A0ull - -/** - * union zip_corex_bist_status - Represents registers which have the BIST - * status of memories in zip cores. - * - * Each bit is the BIST result of an individual memory - * (per bit, 0 = pass and 1 = fail). - */ -union zip_corex_bist_status { - u64 u_reg64; - struct zip_corex_bist_status_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_53_63 : 11; - u64 bstatus : 53; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 bstatus : 53; - u64 reserved_53_63 : 11; -#endif - } s; -}; - -static inline u64 ZIP_COREX_BIST_STATUS(u64 param1) -{ - if (param1 <= 1) - return 0x0520ull + (param1 & 1) * 0x8ull; - pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1); - return 0; -} - -/** - * union zip_ctl_bist_status - Represents register that has the BIST status of - * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data - * buffer, output data buffers). - * - * Each bit is the BIST result of an individual memory - * (per bit, 0 = pass and 1 = fail). - */ -union zip_ctl_bist_status { - u64 u_reg64; - struct zip_ctl_bist_status_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_9_63 : 55; - u64 bstatus : 9; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 bstatus : 9; - u64 reserved_9_63 : 55; -#endif - } s; -}; - -#define ZIP_CTL_BIST_STATUS 0x0510ull - -/** - * union zip_ctl_cfg - Represents the register that controls the behavior of - * the ZIP DMA engines. - * - * It is recommended to keep default values for normal operation. Changing the - * values of the fields may be useful for diagnostics. - */ -union zip_ctl_cfg { - u64 u_reg64; - struct zip_ctl_cfg_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_52_63 : 12; - u64 ildf : 4; - u64 reserved_36_47 : 12; - u64 drtf : 4; - u64 reserved_27_31 : 5; - u64 stcf : 3; - u64 reserved_19_23 : 5; - u64 ldf : 3; - u64 reserved_2_15 : 14; - u64 busy : 1; - u64 reserved_0_0 : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 reserved_0_0 : 1; - u64 busy : 1; - u64 reserved_2_15 : 14; - u64 ldf : 3; - u64 reserved_19_23 : 5; - u64 stcf : 3; - u64 reserved_27_31 : 5; - u64 drtf : 4; - u64 reserved_36_47 : 12; - u64 ildf : 4; - u64 reserved_52_63 : 12; -#endif - } s; -}; - -#define ZIP_CTL_CFG 0x0560ull - -/** - * union zip_dbg_corex_inst - Represents the registers that reflect the status - * of the current instruction that the ZIP core is executing or has executed. - * - * These registers are only for debug use. - */ -union zip_dbg_corex_inst { - u64 u_reg64; - struct zip_dbg_corex_inst_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 busy : 1; - u64 reserved_35_62 : 28; - u64 qid : 3; - u64 iid : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 iid : 32; - u64 qid : 3; - u64 reserved_35_62 : 28; - u64 busy : 1; -#endif - } s; -}; - -static inline u64 ZIP_DBG_COREX_INST(u64 param1) -{ - if (param1 <= 1) - return 0x0640ull + (param1 & 1) * 0x8ull; - pr_err("ZIP_DBG_COREX_INST: %llu\n", param1); - return 0; -} - -/** - * union zip_dbg_corex_sta - Represents registers that reflect the status of - * the zip cores. - * - * They are for debug use only. - */ -union zip_dbg_corex_sta { - u64 u_reg64; - struct zip_dbg_corex_sta_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 busy : 1; - u64 reserved_37_62 : 26; - u64 ist : 5; - u64 nie : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 nie : 32; - u64 ist : 5; - u64 reserved_37_62 : 26; - u64 busy : 1; -#endif - } s; -}; - -static inline u64 ZIP_DBG_COREX_STA(u64 param1) -{ - if (param1 <= 1) - return 0x0680ull + (param1 & 1) * 0x8ull; - pr_err("ZIP_DBG_COREX_STA: %llu\n", param1); - return 0; -} - -/** - * union zip_dbg_quex_sta - Represets registers that reflect status of the zip - * instruction queues. - * - * They are for debug use only. - */ -union zip_dbg_quex_sta { - u64 u_reg64; - struct zip_dbg_quex_sta_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 busy : 1; - u64 reserved_56_62 : 7; - u64 rqwc : 24; - u64 nii : 32; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 nii : 32; - u64 rqwc : 24; - u64 reserved_56_62 : 7; - u64 busy : 1; -#endif - } s; -}; - -static inline u64 ZIP_DBG_QUEX_STA(u64 param1) -{ - if (param1 <= 7) - return 0x1800ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1); - return 0; -} - -/** - * union zip_ecc_ctl - Represents the register that enables ECC for each - * individual internal memory that requires ECC. - * - * For debug purpose, it can also flip one or two bits in the ECC data. - */ -union zip_ecc_ctl { - u64 u_reg64; - struct zip_ecc_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_19_63 : 45; - u64 vmem_cdis : 1; - u64 vmem_fs : 2; - u64 reserved_15_15 : 1; - u64 idf1_cdis : 1; - u64 idf1_fs : 2; - u64 reserved_11_11 : 1; - u64 idf0_cdis : 1; - u64 idf0_fs : 2; - u64 reserved_7_7 : 1; - u64 gspf_cdis : 1; - u64 gspf_fs : 2; - u64 reserved_3_3 : 1; - u64 iqf_cdis : 1; - u64 iqf_fs : 2; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 iqf_fs : 2; - u64 iqf_cdis : 1; - u64 reserved_3_3 : 1; - u64 gspf_fs : 2; - u64 gspf_cdis : 1; - u64 reserved_7_7 : 1; - u64 idf0_fs : 2; - u64 idf0_cdis : 1; - u64 reserved_11_11 : 1; - u64 idf1_fs : 2; - u64 idf1_cdis : 1; - u64 reserved_15_15 : 1; - u64 vmem_fs : 2; - u64 vmem_cdis : 1; - u64 reserved_19_63 : 45; -#endif - } s; -}; - -#define ZIP_ECC_CTL 0x0568ull - -/* NCB - zip_ecce_ena_w1c */ -union zip_ecce_ena_w1c { - u64 u_reg64; - struct zip_ecce_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_ENA_W1C 0x0598ull - -/* NCB - zip_ecce_ena_w1s */ -union zip_ecce_ena_w1s { - u64 u_reg64; - struct zip_ecce_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_ENA_W1S 0x0590ull - -/** - * union zip_ecce_int - Represents the register that contains the status of the - * ECC interrupt sources. - */ -union zip_ecce_int { - u64 u_reg64; - struct zip_ecce_int_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_INT 0x0580ull - -/* NCB - zip_ecce_int_w1s */ -union zip_ecce_int_w1s { - u64 u_reg64; - struct zip_ecce_int_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_37_63 : 27; - u64 dbe : 5; - u64 reserved_5_31 : 27; - u64 sbe : 5; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 sbe : 5; - u64 reserved_5_31 : 27; - u64 dbe : 5; - u64 reserved_37_63 : 27; -#endif - } s; -}; - -#define ZIP_ECCE_INT_W1S 0x0588ull - -/* NCB - zip_fife_ena_w1c */ -union zip_fife_ena_w1c { - u64 u_reg64; - struct zip_fife_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_ENA_W1C 0x0090ull - -/* NCB - zip_fife_ena_w1s */ -union zip_fife_ena_w1s { - u64 u_reg64; - struct zip_fife_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_ENA_W1S 0x0088ull - -/* NCB - zip_fife_int */ -union zip_fife_int { - u64 u_reg64; - struct zip_fife_int_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_INT 0x0078ull - -/* NCB - zip_fife_int_w1s */ -union zip_fife_int_w1s { - u64 u_reg64; - struct zip_fife_int_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_42_63 : 22; - u64 asserts : 42; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 asserts : 42; - u64 reserved_42_63 : 22; -#endif - } s; -}; - -#define ZIP_FIFE_INT_W1S 0x0080ull - -/** - * union zip_msix_pbax - Represents the register that is the MSI-X PBA table - * - * The bit number is indexed by the ZIP_INT_VEC_E enumeration. - */ -union zip_msix_pbax { - u64 u_reg64; - struct zip_msix_pbax_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 pend : 64; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 pend : 64; -#endif - } s; -}; - -static inline u64 ZIP_MSIX_PBAX(u64 param1) -{ - if (param1 == 0) - return 0x0000838000FF0000ull; - pr_err("ZIP_MSIX_PBAX: %llu\n", param1); - return 0; -} - -/** - * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector - * table, indexed by the ZIP_INT_VEC_E enumeration. - */ -union zip_msix_vecx_addr { - u64 u_reg64; - struct zip_msix_vecx_addr_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 addr : 47; - u64 reserved_1_1 : 1; - u64 secvec : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 secvec : 1; - u64 reserved_1_1 : 1; - u64 addr : 47; - u64 reserved_49_63 : 15; -#endif - } s; -}; - -static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1) -{ - if (param1 <= 17) - return 0x0000838000F00000ull + (param1 & 31) * 0x10ull; - pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1); - return 0; -} - -/** - * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector - * table, indexed by the ZIP_INT_VEC_E enumeration. - */ -union zip_msix_vecx_ctl { - u64 u_reg64; - struct zip_msix_vecx_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_33_63 : 31; - u64 mask : 1; - u64 reserved_20_31 : 12; - u64 data : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 data : 20; - u64 reserved_20_31 : 12; - u64 mask : 1; - u64 reserved_33_63 : 31; -#endif - } s; -}; - -static inline u64 ZIP_MSIX_VECX_CTL(u64 param1) -{ - if (param1 <= 17) - return 0x0000838000F00008ull + (param1 & 31) * 0x10ull; - pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done - Represents the registers that contain the per-queue - * instruction done count. - */ -union zip_quex_done { - u64 u_reg64; - struct zip_quex_done_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63 : 44; - u64 done : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done : 20; - u64 reserved_20_63 : 44; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE(u64 param1) -{ - if (param1 <= 7) - return 0x2000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_ack - Represents the registers on write to which will - * decrement the per-queue instructiona done count. - */ -union zip_quex_done_ack { - u64 u_reg64; - struct zip_quex_done_ack_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63 : 44; - u64 done_ack : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done_ack : 20; - u64 reserved_20_63 : 44; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_ACK(u64 param1) -{ - if (param1 <= 7) - return 0x2200ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_ena_w1c - Represents the register which when written - * 1 to will disable the DONEINT interrupt for the queue. - */ -union zip_quex_done_ena_w1c { - u64 u_reg64; - struct zip_quex_done_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_1_63 : 63; - u64 done_ena : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done_ena : 1; - u64 reserved_1_63 : 63; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1) -{ - if (param1 <= 7) - return 0x2600ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_ena_w1s - Represents the register that when written 1 to - * will enable the DONEINT interrupt for the queue. - */ -union zip_quex_done_ena_w1s { - u64 u_reg64; - struct zip_quex_done_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_1_63 : 63; - u64 done_ena : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 done_ena : 1; - u64 reserved_1_63 : 63; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1) -{ - if (param1 <= 7) - return 0x2400ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_done_wait - Represents the register that specifies the per - * queue interrupt coalescing settings. - */ -union zip_quex_done_wait { - u64 u_reg64; - struct zip_quex_done_wait_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_48_63 : 16; - u64 time_wait : 16; - u64 reserved_20_31 : 12; - u64 num_wait : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 num_wait : 20; - u64 reserved_20_31 : 12; - u64 time_wait : 16; - u64 reserved_48_63 : 16; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1) -{ - if (param1 <= 7) - return 0x2800ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_doorbell - Represents doorbell registers for the ZIP - * instruction queues. - */ -union zip_quex_doorbell { - u64 u_reg64; - struct zip_quex_doorbell_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63 : 44; - u64 dbell_cnt : 20; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dbell_cnt : 20; - u64 reserved_20_63 : 44; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_DOORBELL(u64 param1) -{ - if (param1 <= 7) - return 0x4000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1); - return 0; -} - -union zip_quex_err_ena_w1c { - u64 u_reg64; - struct zip_quex_err_ena_w1c_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1) -{ - if (param1 <= 7) - return 0x3600ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1); - return 0; -} - -union zip_quex_err_ena_w1s { - u64 u_reg64; - struct zip_quex_err_ena_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1) -{ - if (param1 <= 7) - return 0x3400ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_err_int - Represents registers that contain the per-queue - * error interrupts. - */ -union zip_quex_err_int { - u64 u_reg64; - struct zip_quex_err_int_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_INT(u64 param1) -{ - if (param1 <= 7) - return 0x3000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1); - return 0; -} - -/* NCB - zip_que#_err_int_w1s */ -union zip_quex_err_int_w1s { - u64 u_reg64; - struct zip_quex_err_int_w1s_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_5_63 : 59; - u64 mdbe : 1; - u64 nwrp : 1; - u64 nrrp : 1; - u64 irde : 1; - u64 dovf : 1; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 dovf : 1; - u64 irde : 1; - u64 nrrp : 1; - u64 nwrp : 1; - u64 mdbe : 1; - u64 reserved_5_63 : 59; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1) -{ - if (param1 <= 7) - return 0x3200ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_gcfg - Represents the registers that reflect status of the - * zip instruction queues,debug use only. - */ -union zip_quex_gcfg { - u64 u_reg64; - struct zip_quex_gcfg_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_4_63 : 60; - u64 iqb_ldwb : 1; - u64 cbw_sty : 1; - u64 l2ld_cmd : 2; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 l2ld_cmd : 2; - u64 cbw_sty : 1; - u64 iqb_ldwb : 1; - u64 reserved_4_63 : 60; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_GCFG(u64 param1) -{ - if (param1 <= 7) - return 0x1A00ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_GCFG: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_map - Represents the registers that control how each - * instruction queue maps to zip cores. - */ -union zip_quex_map { - u64 u_reg64; - struct zip_quex_map_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_2_63 : 62; - u64 zce : 2; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 zce : 2; - u64 reserved_2_63 : 62; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_MAP(u64 param1) -{ - if (param1 <= 7) - return 0x1400ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_MAP: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_sbuf_addr - Represents the registers that set the buffer - * parameters for the instruction queues. - * - * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite - * this register to effectively reset the command buffer state machine. - * These registers must be programmed after SW programs the corresponding - * ZIP_QUE(0..7)_SBUF_CTL. - */ -union zip_quex_sbuf_addr { - u64 u_reg64; - struct zip_quex_sbuf_addr_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_49_63 : 15; - u64 ptr : 42; - u64 off : 7; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 off : 7; - u64 ptr : 42; - u64 reserved_49_63 : 15; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1) -{ - if (param1 <= 7) - return 0x1000ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1); - return 0; -} - -/** - * union zip_quex_sbuf_ctl - Represents the registers that set the buffer - * parameters for the instruction queues. - * - * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite - * this register to effectively reset the command buffer state machine. - * These registers must be programmed before SW programs the corresponding - * ZIP_QUE(0..7)_SBUF_ADDR. - */ -union zip_quex_sbuf_ctl { - u64 u_reg64; - struct zip_quex_sbuf_ctl_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_45_63 : 19; - u64 size : 13; - u64 inst_be : 1; - u64 reserved_24_30 : 7; - u64 stream_id : 8; - u64 reserved_12_15 : 4; - u64 aura : 12; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 aura : 12; - u64 reserved_12_15 : 4; - u64 stream_id : 8; - u64 reserved_24_30 : 7; - u64 inst_be : 1; - u64 size : 13; - u64 reserved_45_63 : 19; -#endif - } s; -}; - -static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1) -{ - if (param1 <= 7) - return 0x1200ull + (param1 & 7) * 0x8ull; - pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1); - return 0; -} - -/** - * union zip_que_ena - Represents queue enable register - * - * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue. - */ -union zip_que_ena { - u64 u_reg64; - struct zip_que_ena_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_8_63 : 56; - u64 ena : 8; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 ena : 8; - u64 reserved_8_63 : 56; -#endif - } s; -}; - -#define ZIP_QUE_ENA 0x0500ull - -/** - * union zip_que_pri - Represents the register that defines the priority - * between instruction queues. - */ -union zip_que_pri { - u64 u_reg64; - struct zip_que_pri_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_8_63 : 56; - u64 pri : 8; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 pri : 8; - u64 reserved_8_63 : 56; -#endif - } s; -}; - -#define ZIP_QUE_PRI 0x0508ull - -/** - * union zip_throttle - Represents the register that controls the maximum - * number of in-flight X2I data fetch transactions. - * - * Writing 0 to this register causes the ZIP module to temporarily suspend NCB - * accesses; it is not recommended for normal operation, but may be useful for - * diagnostics. - */ -union zip_throttle { - u64 u_reg64; - struct zip_throttle_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_6_63 : 58; - u64 ld_infl : 6; -#elif defined(__LITTLE_ENDIAN_BITFIELD) - u64 ld_infl : 6; - u64 reserved_6_63 : 58; -#endif - } s; -}; - -#define ZIP_THROTTLE 0x0010ull - -#endif /* _CSRS_ZIP__ */ diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index d11daaf47f06..685d42ec7ade 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c @@ -7,15 +7,16 @@ * Author: Tom Lendacky <thomas.lendacky@amd.com> */ -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/scatterlist.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/ctr.h> -#include <crypto/scatterwalk.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/string.h> #include "ccp-crypto.h" diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index afae30adb703..91b1189c47de 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -7,14 +7,15 @@ * Author: Gary R Hook <ghook@amd.com> */ +#include <crypto/internal/des.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/list.h> #include <linux/module.h> -#include <linux/sched.h> -#include <linux/delay.h> #include <linux/scatterlist.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/des.h> +#include <linux/slab.h> +#include <linux/string.h> #include "ccp-crypto.h" diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index ecd58b38c46e..bc90aba5162a 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -7,14 +7,17 @@ * Author: Tom Lendacky <thomas.lendacky@amd.com> */ -#include <linux/module.h> -#include <linux/moduleparam.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <linux/ccp.h> +#include <linux/err.h> #include <linux/kernel.h> #include <linux/list.h> -#include <linux/ccp.h> +#include <linux/module.h> #include <linux/scatterlist.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/akcipher.h> +#include <linux/slab.h> +#include <linux/spinlock.h> #include "ccp-crypto.h" diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index cb8e99936abb..109b5aef4034 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -8,13 +8,14 @@ * Author: Gary R Hook <gary.hook@amd.com> */ -#include <linux/dma-mapping.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <crypto/scatterwalk.h> #include <crypto/des.h> +#include <crypto/scatterwalk.h> +#include <crypto/utils.h> #include <linux/ccp.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> #include "ccp-dev.h" diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 2e87ca0e292a..3451bada884e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -33,6 +33,7 @@ #include <asm/cacheflush.h> #include <asm/e820/types.h> #include <asm/sev.h> +#include <asm/msr.h> #include "psp-dev.h" #include "sev-dev.h" @@ -109,6 +110,15 @@ static void *sev_init_ex_buffer; */ static struct sev_data_range_list *snp_range_list; +static void __sev_firmware_shutdown(struct sev_device *sev, bool panic); + +static int snp_shutdown_on_panic(struct notifier_block *nb, + unsigned long reason, void *arg); + +static struct notifier_block snp_panic_notifier = { + .notifier_call = snp_shutdown_on_panic, +}; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -1060,7 +1070,7 @@ static inline int __sev_do_init_locked(int *psp_ret) static void snp_set_hsave_pa(void *arg) { - wrmsrl(MSR_VM_HSAVE_PA, 0); + wrmsrq(MSR_VM_HSAVE_PA, 0); } static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg) @@ -1112,7 +1122,7 @@ static int __sev_snp_init_locked(int *error) if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) { dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n", SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR); - return 0; + return -EOPNOTSUPP; } /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */ @@ -1176,21 +1186,34 @@ static int __sev_snp_init_locked(int *error) wbinvd_on_all_cpus(); rc = __sev_do_cmd_locked(cmd, arg, error); - if (rc) + if (rc) { + dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n", + cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT", + rc, *error); return rc; + } /* Prepare for first SNP guest launch after INIT. */ wbinvd_on_all_cpus(); rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error); - if (rc) + if (rc) { + dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n", + rc, *error); return rc; + } sev->snp_initialized = true; dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); + dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); + + atomic_notifier_chain_register(&panic_notifier_list, + &snp_panic_notifier); + sev_es_tmr_size = SNP_TMR_SIZE; - return rc; + return 0; } static void __sev_platform_init_handle_tmr(struct sev_device *sev) @@ -1287,16 +1310,22 @@ static int __sev_platform_init_locked(int *error) if (error) *error = psp_ret; - if (rc) + if (rc) { + dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n", + sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc); return rc; + } sev->state = SEV_STATE_INIT; /* Prepare for first SEV guest launch after INIT */ wbinvd_on_all_cpus(); rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); - if (rc) + if (rc) { + dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n", + *error, rc); return rc; + } dev_dbg(sev->dev, "SEV firmware initialized\n"); @@ -1319,19 +1348,9 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args) if (sev->state == SEV_STATE_INIT) return 0; - /* - * Legacy guests cannot be running while SNP_INIT(_EX) is executing, - * so perform SEV-SNP initialization at probe time. - */ rc = __sev_snp_init_locked(&args->error); - if (rc && rc != -ENODEV) { - /* - * Don't abort the probe if SNP INIT failed, - * continue to initialize the legacy SEV firmware. - */ - dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n", - rc, args->error); - } + if (rc && rc != -ENODEV) + return rc; /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */ if (args->probe && !psp_init_on_probe) @@ -1367,8 +1386,11 @@ static int __sev_platform_shutdown_locked(int *error) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); - if (ret) + if (ret) { + dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n", + *error, ret); return ret; + } sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); @@ -1389,6 +1411,37 @@ static int sev_get_platform_state(int *state, int *error) return rc; } +static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) +{ + struct sev_platform_init_args init_args = {0}; + int rc; + + rc = _sev_platform_init_locked(&init_args); + if (rc) { + argp->error = SEV_RET_INVALID_PLATFORM_STATE; + return rc; + } + + *shutdown_required = true; + + return 0; +} + +static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required) +{ + int error, rc; + + rc = __sev_snp_init_locked(&error); + if (rc) { + argp->error = SEV_RET_INVALID_PLATFORM_STATE; + return rc; + } + + *shutdown_required = true; + + return 0; +} + static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) { int state, rc; @@ -1441,24 +1494,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; + bool shutdown_required = false; int rc; if (!writable) return -EPERM; if (sev->state == SEV_STATE_UNINIT) { - rc = __sev_platform_init_locked(&argp->error); + rc = sev_move_to_init_state(argp, &shutdown_required); if (rc) return rc; } - return __sev_do_cmd_locked(cmd, NULL, &argp->error); + rc = __sev_do_cmd_locked(cmd, NULL, &argp->error); + + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + + return rc; } static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_csr input; + bool shutdown_required = false; struct sev_data_pek_csr data; void __user *input_address; void *blob = NULL; @@ -1490,7 +1550,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) cmd: if (sev->state == SEV_STATE_UNINIT) { - ret = __sev_platform_init_locked(&argp->error); + ret = sev_move_to_init_state(argp, &shutdown_required); if (ret) goto e_free_blob; } @@ -1511,6 +1571,9 @@ cmd: } e_free_blob: + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + kfree(blob); return ret; } @@ -1682,9 +1745,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic) ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error); /* SHUTDOWN may require DF_FLUSH */ if (*error == SEV_RET_DFFLUSH_REQUIRED) { - ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL); + int dfflush_error = SEV_RET_NO_FW_CALL; + + ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error); if (ret) { - dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n"); + dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n", + ret, dfflush_error); return ret; } /* reissue the shutdown command */ @@ -1692,7 +1758,8 @@ static int __sev_snp_shutdown_locked(int *error, bool panic) error); } if (ret) { - dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n"); + dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n", + ret, *error); return ret; } @@ -1718,6 +1785,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic) sev->snp_initialized = false; dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n"); + atomic_notifier_chain_unregister(&panic_notifier_list, + &snp_panic_notifier); + + /* Reset TMR size back to default */ + sev_es_tmr_size = SEV_TMR_SIZE; + return ret; } @@ -1726,6 +1799,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_cert_import input; struct sev_data_pek_cert_import data; + bool shutdown_required = false; void *pek_blob, *oca_blob; int ret; @@ -1756,7 +1830,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) /* If platform is not in INIT state then transition it to INIT */ if (sev->state != SEV_STATE_INIT) { - ret = __sev_platform_init_locked(&argp->error); + ret = sev_move_to_init_state(argp, &shutdown_required); if (ret) goto e_free_oca; } @@ -1764,6 +1838,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); e_free_oca: + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + kfree(oca_blob); e_free_pek: kfree(pek_blob); @@ -1880,32 +1957,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) struct sev_data_pdh_cert_export data; void __user *input_cert_chain_address; void __user *input_pdh_cert_address; + bool shutdown_required = false; int ret; - /* If platform is not in INIT state then transition it to INIT. */ - if (sev->state != SEV_STATE_INIT) { - if (!writable) - return -EPERM; - - ret = __sev_platform_init_locked(&argp->error); - if (ret) - return ret; - } - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; memset(&data, 0, sizeof(data)); + input_pdh_cert_address = (void __user *)input.pdh_cert_address; + input_cert_chain_address = (void __user *)input.cert_chain_address; + /* Userspace wants to query the certificate length. */ if (!input.pdh_cert_address || !input.pdh_cert_len || !input.cert_chain_address) goto cmd; - input_pdh_cert_address = (void __user *)input.pdh_cert_address; - input_cert_chain_address = (void __user *)input.cert_chain_address; - /* Allocate a physically contiguous buffer to store the PDH blob. */ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; @@ -1931,6 +1999,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) data.cert_chain_len = input.cert_chain_len; cmd: + /* If platform is not in INIT state then transition it to INIT. */ + if (sev->state != SEV_STATE_INIT) { + if (!writable) { + ret = -EPERM; + goto e_free_cert; + } + ret = sev_move_to_init_state(argp, &shutdown_required); + if (ret) + goto e_free_cert; + } + ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); /* If we query the length, FW responded with expected data. */ @@ -1957,6 +2036,9 @@ cmd: } e_free_cert: + if (shutdown_required) + __sev_firmware_shutdown(sev, false); + kfree(cert_blob); e_free_pdh: kfree(pdh_blob); @@ -1966,12 +2048,13 @@ e_free_pdh: static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) { struct sev_device *sev = psp_master->sev_data; + bool shutdown_required = false; struct sev_data_snp_addr buf; struct page *status_page; + int ret, error; void *data; - int ret; - if (!sev->snp_initialized || !argp->data) + if (!argp->data) return -EINVAL; status_page = alloc_page(GFP_KERNEL_ACCOUNT); @@ -1980,6 +2063,12 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) data = page_address(status_page); + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + goto cleanup; + } + /* * Firmware expects status page to be in firmware-owned state, otherwise * it will report firmware error code INVALID_PAGE_STATE (0x1A). @@ -2008,6 +2097,9 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp) ret = -EFAULT; cleanup: + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + __free_pages(status_page, 0); return ret; } @@ -2016,21 +2108,33 @@ static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp) { struct sev_device *sev = psp_master->sev_data; struct sev_data_snp_commit buf; + bool shutdown_required = false; + int ret, error; - if (!sev->snp_initialized) - return -EINVAL; + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + return ret; + } buf.len = sizeof(buf); - return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error); + ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error); + + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + + return ret; } static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_snp_config config; + bool shutdown_required = false; + int ret, error; - if (!sev->snp_initialized || !argp->data) + if (!argp->data) return -EINVAL; if (!writable) @@ -2039,17 +2143,29 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable if (copy_from_user(&config, (void __user *)argp->data, sizeof(config))) return -EFAULT; - return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + return ret; + } + + ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); + + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + + return ret; } static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_snp_vlek_load input; + bool shutdown_required = false; + int ret, error; void *blob; - int ret; - if (!sev->snp_initialized || !argp->data) + if (!argp->data) return -EINVAL; if (!writable) @@ -2068,8 +2184,18 @@ static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable) input.vlek_wrapped_address = __psp_pa(blob); + if (!sev->snp_initialized) { + ret = snp_move_to_init_state(argp, &shutdown_required); + if (ret) + goto cleanup; + } + ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error); + if (shutdown_required) + __sev_snp_shutdown_locked(&error, false); + +cleanup: kfree(blob); return ret; @@ -2339,6 +2465,15 @@ static void sev_firmware_shutdown(struct sev_device *sev) mutex_unlock(&sev_cmd_mutex); } +void sev_platform_shutdown(void) +{ + if (!psp_master || !psp_master->sev_data) + return; + + sev_firmware_shutdown(psp_master->sev_data); +} +EXPORT_SYMBOL_GPL(sev_platform_shutdown); + void sev_dev_destroy(struct psp_device *psp) { struct sev_device *sev = psp->sev_data; @@ -2373,10 +2508,6 @@ static int snp_shutdown_on_panic(struct notifier_block *nb, return NOTIFY_DONE; } -static struct notifier_block snp_panic_notifier = { - .notifier_call = snp_shutdown_on_panic, -}; - int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, void *data, int *error) { @@ -2390,9 +2521,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; - struct sev_platform_init_args args = {0}; u8 api_major, api_minor, build; - int rc; if (!sev) return; @@ -2415,18 +2544,6 @@ void sev_pci_init(void) api_major, api_minor, build, sev->api_major, sev->api_minor, sev->build); - /* Initialize the platform */ - args.probe = true; - rc = sev_platform_init(&args); - if (rc) - dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n", - args.error, rc); - - dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ? - "-SNP" : "", sev->api_major, sev->api_minor, sev->build); - - atomic_notifier_chain_register(&panic_notifier_list, - &snp_panic_notifier); return; err: @@ -2443,7 +2560,4 @@ void sev_pci_exit(void) return; sev_firmware_shutdown(sev); - - atomic_notifier_chain_unregister(&panic_notifier_list, - &snp_panic_notifier); } diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 2ebc878da160..e1be2072d680 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -375,6 +375,7 @@ static const struct tee_vdata teev1 = { static const struct tee_vdata teev2 = { .ring_wptr_reg = 0x10950, /* C2PMSG_20 */ .ring_rptr_reg = 0x10954, /* C2PMSG_21 */ + .info_reg = 0x109e8, /* C2PMSG_58 */ }; static const struct platform_access_vdata pa_v1 = { @@ -440,6 +441,7 @@ static const struct psp_vdata pspv5 = { .cmdresp_reg = 0x10944, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10510, /* P2CMSG_INTEN */ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ @@ -535,6 +537,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d3f5d108b898..7c41f9593d03 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -862,7 +862,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d return -EINVAL; } - algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); + algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL); if (!algs) return -ENOMEM; @@ -5224,7 +5224,7 @@ static int qm_pre_store_caps(struct hisi_qm *qm) size_t i, size; size = ARRAY_SIZE(qm_cap_query_info); - qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); + qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL); if (!qm_cap) return -ENOMEM; diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 1dc2378aa88b..e050f5ff5efb 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -491,8 +491,9 @@ static int img_hash_init(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -555,10 +556,10 @@ static int img_hash_update(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -570,9 +571,10 @@ static int img_hash_final(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -584,11 +586,12 @@ static int img_hash_finup(struct ahash_request *req) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); + return crypto_ahash_finup(&rctx->fallback_req); } @@ -600,8 +603,9 @@ static int img_hash_import(struct ahash_request *req, const void *in) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -613,8 +617,9 @@ static int img_hash_export(struct ahash_request *req, void *out) struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); - rctx->fallback_req.base.flags = req->base.flags - & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c index df1b05ac5a57..ac13d90a2b7c 100644 --- a/drivers/crypto/inside-secure/eip93/eip93-hash.c +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c @@ -97,12 +97,20 @@ void eip93_hash_handle_result(struct crypto_async_request *async, int err) static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest) { - u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; - u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, - SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 }; - u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; - u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 }; + static const u32 sha256_init[] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }; + static const u32 sha224_init[] = { + SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 + }; + static const u32 sha1_init[] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 + }; + static const u32 md5_init[] = { + MD5_H0, MD5_H1, MD5_H2, MD5_H3 + }; /* Init HASH constant */ switch (hash) { diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index f44c08f5f5ec..d2b632193beb 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -2043,7 +2043,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = { .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = 1, + .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), .cra_init = safexcel_ahash_cra_init, .cra_exit = safexcel_ahash_cra_exit, diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 09d9589f2d68..23f585219fb4 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -725,7 +725,7 @@ static int alloc_wq_table(int max_wqs) for (cpu = 0; cpu < nr_cpus; cpu++) { entry = per_cpu_ptr(wq_table, cpu); - entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL); + entry->wqs = kcalloc(max_wqs, sizeof(*entry->wqs), GFP_KERNEL); if (!entry->wqs) { free_wq_table(); return -ENOMEM; @@ -894,7 +894,7 @@ out: static void rebalance_wq_table(void) { const struct cpumask *node_cpus; - int node, cpu, iaa = -1; + int node_cpu, node, cpu, iaa = 0; if (nr_iaa == 0) return; @@ -905,36 +905,29 @@ static void rebalance_wq_table(void) clear_wq_table(); if (nr_iaa == 1) { - for (cpu = 0; cpu < nr_cpus; cpu++) { - if (WARN_ON(wq_table_add_wqs(0, cpu))) { - pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu); - return; - } + for_each_possible_cpu(cpu) { + if (WARN_ON(wq_table_add_wqs(0, cpu))) + goto err; } return; } for_each_node_with_cpus(node) { + cpu = 0; node_cpus = cpumask_of_node(node); - for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) { - int node_cpu = cpumask_nth(cpu, node_cpus); - - if (WARN_ON(node_cpu >= nr_cpu_ids)) { - pr_debug("node_cpu %d doesn't exist!\n", node_cpu); - return; - } - - if ((cpu % cpus_per_iaa) == 0) - iaa++; - - if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) { - pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); - return; - } + for_each_cpu(node_cpu, node_cpus) { + iaa = cpu / cpus_per_iaa; + if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) + goto err; + cpu++; } } + + return; +err: + pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); } static inline int check_completion(struct device *dev, @@ -999,12 +992,9 @@ out: static int deflate_generic_decompress(struct acomp_req *req) { - ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); + ACOMP_FBREQ_ON_STACK(fbreq, req); int ret; - acomp_request_set_callback(fbreq, 0, NULL, NULL); - acomp_request_set_params(fbreq, req->src, req->dst, req->slen, - req->dlen); ret = crypto_acomp_decompress(fbreq); req->dlen = fbreq->dlen; @@ -1020,8 +1010,7 @@ static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - u32 compression_crc); + dma_addr_t dst_addr, unsigned int *dlen); static void iaa_desc_complete(struct idxd_desc *idxd_desc, enum idxd_complete_type comp_type, @@ -1087,10 +1076,10 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc, } if (ctx->compress && compression_ctx->verify_compress) { + u32 *compression_crc = acomp_request_ctx(ctx->req); dma_addr_t src_addr, dst_addr; - u32 compression_crc; - compression_crc = idxd_desc->iax_completion->crc; + *compression_crc = idxd_desc->iax_completion->crc; ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr); if (ret) { @@ -1100,8 +1089,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc, } ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr, - ctx->req->slen, dst_addr, &ctx->req->dlen, - compression_crc); + ctx->req->slen, dst_addr, &ctx->req->dlen); if (ret) { dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret); err = -EIO; @@ -1130,11 +1118,11 @@ out: static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc) + dma_addr_t dst_addr, unsigned int *dlen) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *compression_crc = acomp_request_ctx(req); struct iaa_device *iaa_device; struct idxd_desc *idxd_desc; struct iax_hw_desc *desc; @@ -1187,8 +1175,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: compression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1282,11 +1269,11 @@ out: static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - u32 compression_crc) + dma_addr_t dst_addr, unsigned int *dlen) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *compression_crc = acomp_request_ctx(req); struct iaa_device *iaa_device; struct idxd_desc *idxd_desc; struct iax_hw_desc *desc; @@ -1346,10 +1333,10 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, goto err; } - if (compression_crc != idxd_desc->iax_completion->crc) { + if (*compression_crc != idxd_desc->iax_completion->crc) { ret = -EINVAL; dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:" - " comp=0x%x, decomp=0x%x\n", compression_crc, + " comp=0x%x, decomp=0x%x\n", *compression_crc, idxd_desc->iax_completion->crc); print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, idxd_desc->iax_completion, 64, 0); @@ -1369,8 +1356,7 @@ err: static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, - dma_addr_t dst_addr, unsigned int *dlen, - bool disable_async) + dma_addr_t dst_addr, unsigned int *dlen) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1412,7 +1398,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src1_size = slen; desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1425,8 +1411,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: decompression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1446,7 +1431,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_decomp_calls(); update_wq_decomp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1474,7 +1459,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, *dlen = req->dlen; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); /* Update stats */ @@ -1496,7 +1481,6 @@ static int iaa_comp_acompress(struct acomp_req *req) dma_addr_t src_addr, dst_addr; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; - u32 compression_crc; struct idxd_wq *wq; struct device *dev; @@ -1557,7 +1541,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc); + &req->dlen); if (ret == -EINPROGRESS) return ret; @@ -1569,7 +1553,7 @@ static int iaa_comp_acompress(struct acomp_req *req) } ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, compression_crc); + dst_addr, &req->dlen); if (ret) dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret); @@ -1655,7 +1639,7 @@ static int iaa_comp_adecompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, false); + dst_addr, &req->dlen); if (ret == -EINPROGRESS) return ret; @@ -1699,6 +1683,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = { .cra_driver_name = "deflate-iaa", .cra_flags = CRYPTO_ALG_ASYNC, .cra_ctxsize = sizeof(struct iaa_compression_ctx), + .cra_reqsize = sizeof(u32), .cra_module = THIS_MODULE, .cra_priority = IAA_ALG_PRIORITY, } diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 02fb8abe4e6e..359c61f0c8a1 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -70,6 +70,18 @@ config CRYPTO_DEV_QAT_420XX To compile this as a module, choose M here: the module will be called qat_420xx. +config CRYPTO_DEV_QAT_6XXX + tristate "Support for Intel(R) QuickAssist Technology QAT_6XXX" + depends on (X86 || COMPILE_TEST) + depends on PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) QuickAssist Technology QAT_6xxx + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_6xxx. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile index 235b69f4f3f7..abef14207afa 100644 --- a/drivers/crypto/intel/qat/Makefile +++ b/drivers/crypto/intel/qat/Makefile @@ -1,10 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 +subdir-ccflags-y := -I$(src)/qat_common obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile index 72b24b1804cf..f6df54d2993e 100644 --- a/drivers/crypto/intel/qat/qat_420xx/Makefile +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o qat_420xx-y := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 4feeef83f7a3..7c3c0f561c95 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -9,15 +9,14 @@ #include <adf_common_drv.h> #include <adf_fw_config.h> #include <adf_gen4_config.h> -#include <adf_gen4_dc.h> #include <adf_gen4_hw_csr_data.h> #include <adf_gen4_hw_data.h> #include <adf_gen4_pfvf.h> #include <adf_gen4_pm.h> #include <adf_gen4_ras.h> -#include <adf_gen4_timer.h> #include <adf_gen4_tl.h> #include <adf_gen4_vf_mig.h> +#include <adf_timer.h> #include "adf_420xx_hw_data.h" #include "icp_qat_hw.h" @@ -93,7 +92,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = { static struct adf_hw_device_class adf_420xx_class = { .name = ADF_420XX_DEVICE_NAME, .type = DEV_420XX, - .instances = 0, }; static u32 get_ae_mask(struct adf_hw_device_data *self) @@ -469,8 +467,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; - hw_data->start_timer = adf_gen4_timer_start; - hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->start_timer = adf_timer_start; + hw_data->stop_timer = adf_timer_stop; hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_420XX_AE_FREQ; diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index 8084aa0f7f41..cfa00daeb4fb 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -14,7 +14,7 @@ #include "adf_420xx_hw_data.h" static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) }, { } }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); @@ -186,11 +186,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_420XX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile index e8480bb80dee..188b611445e6 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 4eb6ef99efdd..bd0b1b1015c0 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -9,15 +9,14 @@ #include <adf_common_drv.h> #include <adf_fw_config.h> #include <adf_gen4_config.h> -#include <adf_gen4_dc.h> #include <adf_gen4_hw_csr_data.h> #include <adf_gen4_hw_data.h> #include <adf_gen4_pfvf.h> #include <adf_gen4_pm.h> #include "adf_gen4_ras.h" -#include <adf_gen4_timer.h> #include <adf_gen4_tl.h> #include <adf_gen4_vf_mig.h> +#include <adf_timer.h> #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -96,7 +95,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, - .instances = 0, }; static u32 get_ae_mask(struct adf_hw_device_data *self) @@ -422,13 +420,13 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; hw_data->num_rps = ADF_GEN4_MAX_RPS; switch (dev_id) { - case ADF_402XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_402XX: hw_data->fw_name = ADF_402XX_FW; hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; hw_data->get_ena_thd_mask = get_ena_thd_mask; break; - case ADF_401XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_401XX: hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->uof_get_name = uof_get_name_4xxx; @@ -455,8 +453,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; - hw_data->start_timer = adf_gen4_timer_start; - hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->start_timer = adf_timer_start; + hw_data->stop_timer = adf_timer_stop; hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 5537a9991e4e..c9be5dcddb27 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -14,9 +14,9 @@ #include "adf_4xxx_hw_data.h" static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), }, - { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), }, - { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_4XXX) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_401XX) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_402XX) }, { } }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); @@ -188,11 +188,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_4XXX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_6xxx/Makefile b/drivers/crypto/intel/qat/qat_6xxx/Makefile new file mode 100644 index 000000000000..4b4de67cb0c2 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx.o +qat_6xxx-y := adf_drv.o adf_6xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c new file mode 100644 index 000000000000..359a6447ccb8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c @@ -0,0 +1,845 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/types.h> + +#include <adf_accel_devices.h> +#include <adf_admin.h> +#include <adf_cfg.h> +#include <adf_cfg_services.h> +#include <adf_clock.h> +#include <adf_common_drv.h> +#include <adf_fw_config.h> +#include <adf_gen6_pm.h> +#include <adf_gen6_ras.h> +#include <adf_gen6_shared.h> +#include <adf_timer.h> +#include "adf_6xxx_hw_data.h" +#include "icp_qat_fw_comp.h" +#include "icp_qat_hw_51_comp.h" + +#define RP_GROUP_0_MASK (BIT(0) | BIT(2)) +#define RP_GROUP_1_MASK (BIT(1) | BIT(3)) +#define RP_GROUP_ALL_MASK (RP_GROUP_0_MASK | RP_GROUP_1_MASK) + +#define ADF_AE_GROUP_0 GENMASK(3, 0) +#define ADF_AE_GROUP_1 GENMASK(7, 4) +#define ADF_AE_GROUP_2 BIT(8) + +struct adf_ring_config { + u32 ring_mask; + enum adf_cfg_service_type ring_type; + const unsigned long *thrd_mask; +}; + +static u32 rmask_two_services[] = { + RP_GROUP_0_MASK, + RP_GROUP_1_MASK, +}; + +enum adf_gen6_rps { + RP0 = 0, + RP1 = 1, + RP2 = 2, + RP3 = 3, + RP_MAX = RP3 +}; + +/* + * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread + * configuration for handling requests of specific services across the + * accelerator engines. Each element in an array corresponds to an + * accelerator engine, with the value being a bitmask that specifies which + * threads within that engine are capable of processing the particular service. + * + * For example, a value of 0x0C means that threads 2 and 3 are enabled for the + * service in the respective accelerator engine. + */ +static const unsigned long thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = { + 0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00 +}; + +static const unsigned long thrd_mask_asym[ADF_6XXX_MAX_ACCELENGINES] = { + 0x70, 0x70, 0x70, 0x70, 0x60, 0x60, 0x60, 0x60, 0x00 +}; + +static const unsigned long thrd_mask_cpr[ADF_6XXX_MAX_ACCELENGINES] = { + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00 +}; + +static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = { + 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00 +}; + +static const char *const adf_6xxx_fw_objs[] = { + [ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ, + [ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ, + [ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ, +}; + +static const struct adf_fw_config adf_default_fw_config[] = { + { ADF_AE_GROUP_1, ADF_FW_DC_OBJ }, + { ADF_AE_GROUP_0, ADF_FW_CY_OBJ }, + { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ }, +}; + +static struct adf_hw_device_class adf_6xxx_class = { + .name = ADF_6XXX_DEVICE_NAME, + .type = DEV_6XXX, +}; + +static bool services_supported(unsigned long mask) +{ + int num_svc; + + if (mask >= BIT(SVC_BASE_COUNT)) + return false; + + num_svc = hweight_long(mask); + switch (num_svc) { + case ADF_ONE_SERVICE: + return true; + case ADF_TWO_SERVICES: + case ADF_THREE_SERVICES: + return !test_bit(SVC_DCC, &mask); + default: + return false; + } +} + +static int get_service(unsigned long *mask) +{ + if (test_and_clear_bit(SVC_ASYM, mask)) + return SVC_ASYM; + + if (test_and_clear_bit(SVC_SYM, mask)) + return SVC_SYM; + + if (test_and_clear_bit(SVC_DC, mask)) + return SVC_DC; + + if (test_and_clear_bit(SVC_DCC, mask)) + return SVC_DCC; + + return -EINVAL; +} + +static enum adf_cfg_service_type get_ring_type(enum adf_services service) +{ + switch (service) { + case SVC_SYM: + return SYM; + case SVC_ASYM: + return ASYM; + case SVC_DC: + case SVC_DCC: + return COMP; + default: + return UNUSED; + } +} + +static const unsigned long *get_thrd_mask(enum adf_services service) +{ + switch (service) { + case SVC_SYM: + return thrd_mask_sym; + case SVC_ASYM: + return thrd_mask_asym; + case SVC_DC: + return thrd_mask_cpr; + case SVC_DCC: + return thrd_mask_dcc; + default: + return NULL; + } +} + +static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config *rp_config, + unsigned int *num_services) +{ + unsigned int i, nservices; + unsigned long mask; + int ret, service; + + ret = adf_get_service_mask(accel_dev, &mask); + if (ret) + return ret; + + nservices = hweight_long(mask); + if (nservices > MAX_NUM_CONCURR_SVC) + return -EINVAL; + + for (i = 0; i < nservices; i++) { + service = get_service(&mask); + if (service < 0) + return service; + + rp_config[i].ring_type = get_ring_type(service); + rp_config[i].thrd_mask = get_thrd_mask(service); + + /* + * If there is only one service enabled, use all ring pairs for + * that service. + * If there are two services enabled, use ring pairs 0 and 2 for + * one service and ring pairs 1 and 3 for the other service. + */ + switch (nservices) { + case ADF_ONE_SERVICE: + rp_config[i].ring_mask = RP_GROUP_ALL_MASK; + break; + case ADF_TWO_SERVICES: + rp_config[i].ring_mask = rmask_two_services[i]; + break; + case ADF_THREE_SERVICES: + rp_config[i].ring_mask = BIT(i); + + /* If ASYM is enabled, use additional ring pair */ + if (service == SVC_ASYM) + rp_config[i].ring_mask |= BIT(RP3); + + break; + default: + return -EINVAL; + } + } + + *num_services = nservices; + + return 0; +} + +static u32 adf_gen6_get_arb_mask(struct adf_accel_dev *accel_dev, unsigned int ae) +{ + struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC]; + unsigned int num_services, i, thrd; + u32 ring_mask, thd2arb_mask = 0; + const unsigned long *p_mask; + + if (get_rp_config(accel_dev, rp_config, &num_services)) + return 0; + + /* + * The thd2arb_mask maps ring pairs to threads within an accelerator engine. + * It ensures that jobs submitted to ring pairs are scheduled on threads capable + * of handling the specified service type. + * + * Each group of 4 bits in the mask corresponds to a thread, with each bit + * indicating whether a job from a ring pair can be scheduled on that thread. + * The use of 4 bits is due to the organization of ring pairs into groups of + * four, where each group shares the same configuration. + */ + for (i = 0; i < num_services; i++) { + p_mask = &rp_config[i].thrd_mask[ae]; + ring_mask = rp_config[i].ring_mask; + + for_each_set_bit(thrd, p_mask, ADF_NUM_THREADS_PER_AE) + thd2arb_mask |= ring_mask << (thrd * 4); + } + + return thd2arb_mask; +} + +static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + enum adf_cfg_service_type rps[ADF_GEN6_NUM_BANKS_PER_VF] = { }; + struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC]; + unsigned int num_services, rp_num, i; + unsigned long cfg_mask; + u16 ring_to_svc_map; + + if (get_rp_config(accel_dev, rp_config, &num_services)) + return 0; + + /* + * Loop through the configured services and populate the `rps` array that + * contains what service that particular ring pair can handle (i.e. symmetric + * crypto, asymmetric crypto, data compression or compression chaining). + */ + for (i = 0; i < num_services; i++) { + cfg_mask = rp_config[i].ring_mask; + for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF) + rps[rp_num] = rp_config[i].ring_type; + } + + /* + * The ring_mask is structured into segments of 3 bits, with each + * segment representing the service configuration for a specific ring pair. + * Since ring pairs are organized into groups of 4, the ring_mask contains 4 + * such 3-bit segments, each corresponding to one ring pair. + * + * The device has 64 ring pairs, which are organized in groups of 4, namely + * 16 groups. Each group has the same configuration, represented here by + * `ring_to_svc_map`. + */ + ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} + +static u32 get_accel_mask(struct adf_hw_device_data *self) +{ + return ADF_GEN6_ACCELERATORS_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_GEN6_MAX_ACCELERATORS; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + return self ? hweight32(self->ae_mask) : 0; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN6_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN6_ETR_BAR; +} + +static u32 get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN6_SRAM_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_1; +} + +static void get_arb_info(struct arb_info *arb_info) +{ + arb_info->arb_cfg = ADF_GEN6_ARB_CONFIG; + arb_info->arb_offset = ADF_GEN6_ARB_OFFSET; + arb_info->wt2sam_offset = ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET; +} + +static void get_admin_info(struct admin_info *admin_csrs_info) +{ + admin_csrs_info->mailbox_offset = ADF_GEN6_MAILBOX_BASE_OFFSET; + admin_csrs_info->admin_msg_ur = ADF_GEN6_ADMINMSGUR_OFFSET; + admin_csrs_info->admin_msg_lr = ADF_GEN6_ADMINMSGLR_OFFSET; +} + +static u32 get_heartbeat_clock(struct adf_hw_device_data *self) +{ + return ADF_GEN6_COUNTER_FREQ; +} + +static void enable_error_correction(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + /* + * Enable all error notification bits in errsou3 except VFLR + * notification on host. + */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_VFLNOTIFY); +} + +static void enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr = adf_get_pmisc_base(accel_dev); + + /* Enable bundle interrupts */ + ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET, 0); + ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET, 0); + + /* Enable misc interrupts */ + ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_MASK_OFFSET, 0); +} + +static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr = adf_get_pmisc_base(accel_dev); + u64 val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u64 val = ADF_SSM_WDT_DEFAULT_VALUE; + + /* Enable watchdog timer for sym and dc */ + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTATHL_OFFSET, ADF_SSMWDTATHH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val); + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val); + + /* Enable watchdog timer for pke */ + ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke); +} + +/* + * The vector routing table is used to select the MSI-X entry to use for each + * interrupt source. + * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts. + * The final entry corresponds to VF2PF or error interrupts. + * This vector table could be used to configure one MSI-X entry to be shared + * between multiple interrupt sources. + * + * The default routing is set to have a one to one correspondence between the + * interrupt source and the MSI-X entry used. + */ +static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + unsigned int i; + + for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++) + ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i); +} + +static int reset_ring_pair(void __iomem *csr, u32 bank_number) +{ + u32 status; + int ret; + + /* + * Write rpresetctl register BIT(0) as 1. + * Since rpresetctl registers have no RW fields, no need to preserve + * values for other bits. Just write directly. + */ + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number), + ADF_WQM_CSR_RPRESETCTL_RESET); + + /* Read rpresetsts register and wait for rp reset to complete */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_WQM_CSR_RPRESETSTS_STATUS, + ADF_RPRESET_POLL_DELAY_US, + ADF_RPRESET_POLL_TIMEOUT_US, true, + csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); + if (ret) + return ret; + + /* When ring pair reset is done, clear rpresetsts */ + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS); + + return 0; +} + +static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + void __iomem *csr = adf_get_etr_base(accel_dev); + int ret; + + if (bank_number >= hw_data->num_banks) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number); + + ret = reset_ring_pair(csr, bank_number); + if (ret) + dev_err(&GET_DEV(accel_dev), "ring pair reset failed (timeout)\n"); + else + dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n"); + + return ret; +} + +static int build_comp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_hw_comp_51_config_csr_lower hw_comp_lower_csr = { }; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + u32 lower_val; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; + break; + default: + return -EINVAL; + } + + hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED; + hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1; + lower_val = ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(hw_comp_lower_csr); + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + + return 0; +} + +static int build_decomp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + break; + default: + return -EINVAL; + } + + cd_pars->u.sl.comp_slice_cfg_word[0] = 0; + cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + + return 0; +} + +static void adf_gen6_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_comp_block = build_comp_block; + dc_ops->build_decomp_block = build_decomp_block; +} + +static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u32 *thd2arb_map = hw_data->thd_to_arb_map; + unsigned int i; + + for (i = 0; i < hw_data->num_engines; i++) { + thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i); + dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]); + } + + return 0; +} + +static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number) +{ + u32 value; + + /* + * After each PF FLR, for each of the 64 ring pairs in the PF, the + * driver must program the ringmodectl CSRs. + */ + value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number)); + value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT); + value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1); + ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value); +} + +static int set_vc_config(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + u32 value; + int err; + + /* + * After each PF FLR, the driver must program the Port Virtual Channel (VC) + * Control Registers. + * Read PVC0CTL then write the masked values. + */ + pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value); + value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT); + err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value); + if (err) { + dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n"); + return pcibios_err_to_errno(err); + } + + /* Read PVC1CTL then write masked values */ + pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value); + value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT); + value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON); + err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value); + if (err) + dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n"); + + return pcibios_err_to_errno(err); +} + +static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *csr = adf_get_etr_base(accel_dev); + u32 i; + + for (i = 0; i < hw_data->num_banks; i++) { + dev_dbg(&GET_DEV(accel_dev), "set virtual channels for bank:%d\n", i); + set_vc_csr_for_bank(csr, i); + } + + return set_vc_config(accel_dev); +} + +static u32 get_ae_mask(struct adf_hw_device_data *self) +{ + unsigned long fuses = self->fuses[ADF_FUSECTL4]; + u32 mask = ADF_6XXX_ACCELENGINES_MASK; + + /* + * If bit 0 is set in the fuses, the first 4 engines are disabled. + * If bit 4 is set, the second group of 4 engines are disabled. + * If bit 8 is set, the admin engine (bit 8) is disabled. + */ + if (test_bit(0, &fuses)) + mask &= ~ADF_AE_GROUP_0; + + if (test_bit(4, &fuses)) + mask &= ~ADF_AE_GROUP_1; + + if (test_bit(8, &fuses)) + mask &= ~ADF_AE_GROUP_2; + + return mask; +} + +static u32 get_accel_cap(struct adf_accel_dev *accel_dev) +{ + u32 capabilities_sym, capabilities_asym; + u32 capabilities_dc; + unsigned long mask; + u32 caps = 0; + u32 fusectl1; + + fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1]; + + /* Read accelerator capabilities mask */ + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_SHA3 | + ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_AES_V2; + + /* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */ + if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + capabilities_asym = 0; + + capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + + if (fusectl1 & ICP_ACCEL_GEN6_MASK_CPR_SLICE) { + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + } + + if (adf_get_service_mask(accel_dev, &mask)) + return 0; + + if (test_bit(SVC_ASYM, &mask)) + caps |= capabilities_asym; + if (test_bit(SVC_SYM, &mask)) + caps |= capabilities_sym; + if (test_bit(SVC_DC, &mask)) + caps |= capabilities_dc; + if (test_bit(SVC_DCC, &mask)) { + /* + * Sym capabilities are available for chaining operations, + * but sym crypto instances cannot be supported + */ + caps = capabilities_dc | capabilities_sym; + caps &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + } + + return caps; +} + +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) +{ + return ARRAY_SIZE(adf_default_fw_config); +} + +static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs); + int id; + + id = adf_default_fw_config[obj_num].obj; + if (id >= num_fw_objs) + return NULL; + + return adf_6xxx_fw_objs[id]; +} + +static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + return uof_get_name(accel_dev, obj_num); +} + +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + return adf_default_fw_config[obj_num].obj; +} + +static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + return adf_default_fw_config[obj_num].ae_mask; +} + +static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) +{ + if (adf_gen6_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Failed to generate thread to arbiter mapping"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; +} + +static int adf_init_device(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr = adf_get_pmisc_base(accel_dev); + u32 status; + u32 csr; + int ret; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_GEN6_ERRMSK2); + csr |= ADF_GEN6_PM_SOU; + ADF_CSR_WR(addr, ADF_GEN6_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_GEN6_PM_INIT_STATE, + ADF_GEN6_PM_POLL_DELAY_US, + ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr, + ADF_GEN6_PM_STATUS); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); + return ret; + } + + dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n", + accel_dev->accel_id); + + ret = adf_gen6_set_vc(accel_dev); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n"); + + return ret; +} + +static int enable_pm(struct adf_accel_dev *accel_dev) +{ + return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER); +} + +static int dev_config(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + return ret; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + return ret; + + switch (adf_get_service_enabled(accel_dev)) { + case SVC_DC: + case SVC_DCC: + ret = adf_gen6_comp_dev_config(accel_dev); + break; + default: + ret = adf_gen6_no_dev_config(accel_dev); + break; + } + if (ret) + return ret; + + __set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; +} + +void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &adf_6xxx_class; + hw_data->instance_id = adf_6xxx_class.instances++; + hw_data->num_banks = ADF_GEN6_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN6_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN6_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN6_MAX_ACCELERATORS; + hw_data->num_engines = ADF_6XXX_MAX_ACCELENGINES; + hw_data->num_logical_accel = 1; + hw_data->tx_rx_gap = ADF_GEN6_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN6_TX_RINGS_MASK; + hw_data->ring_to_svc_map = 0; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = enable_error_correction; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_sram_bar_id = get_sram_bar_id; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_arb_info = get_arb_info; + hw_data->get_admin_info = get_admin_info; + hw_data->get_accel_cap = get_accel_cap; + hw_data->get_sku = get_sku; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = enable_ints; + hw_data->reset_device = adf_reset_flr; + hw_data->admin_ae_mask = ADF_6XXX_ADMIN_AE_MASK; + hw_data->fw_name = ADF_6XXX_FW; + hw_data->fw_mmp_name = ADF_6XXX_MMP; + hw_data->uof_get_name = uof_get_name_6xxx; + hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; + hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->set_ssm_wdtimer = set_ssm_wdtimer; + hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->disable_iov = adf_disable_sriov; + hw_data->ring_pair_reset = ring_pair_reset; + hw_data->dev_config = dev_config; + hw_data->get_hb_clock = get_heartbeat_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->start_timer = adf_timer_start; + hw_data->stop_timer = adf_timer_stop; + hw_data->init_device = adf_init_device; + hw_data->enable_pm = enable_pm; + hw_data->services_supported = services_supported; + + adf_gen6_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops); + adf_gen6_init_dc_ops(&hw_data->dc_ops); + adf_gen6_init_ras_ops(&hw_data->ras_ops); +} + +void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data) +{ + if (hw_data->dev_class->instances) + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h new file mode 100644 index 000000000000..78e2e2c5816e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_6XXX_HW_DATA_H_ +#define ADF_6XXX_HW_DATA_H_ + +#include <linux/bits.h> +#include <linux/time.h> +#include <linux/units.h> + +#include "adf_accel_devices.h" +#include "adf_cfg_common.h" +#include "adf_dc.h" + +/* PCIe configuration space */ +#define ADF_GEN6_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) +#define ADF_GEN6_SRAM_BAR 0 +#define ADF_GEN6_PMISC_BAR 1 +#define ADF_GEN6_ETR_BAR 2 +#define ADF_6XXX_MAX_ACCELENGINES 9 + +/* Clocks frequency */ +#define ADF_GEN6_COUNTER_FREQ (100 * HZ_PER_MHZ) + +/* Physical function fuses */ +#define ADF_GEN6_FUSECTL0_OFFSET 0x2C8 +#define ADF_GEN6_FUSECTL1_OFFSET 0x2CC +#define ADF_GEN6_FUSECTL4_OFFSET 0x2D8 + +/* Accelerators */ +#define ADF_GEN6_ACCELERATORS_MASK 0x1 +#define ADF_GEN6_MAX_ACCELERATORS 1 + +/* MSI-X interrupt */ +#define ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 +#define ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET 0x41A044 +#define ADF_GEN6_SMIAPF_MASK_OFFSET 0x41A084 +#define ADF_GEN6_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 4)) + +/* Bank and ring configuration */ +#define ADF_GEN6_NUM_RINGS_PER_BANK 2 +#define ADF_GEN6_NUM_BANKS_PER_VF 4 +#define ADF_GEN6_ETR_MAX_BANKS 64 +#define ADF_GEN6_RX_RINGS_OFFSET 1 +#define ADF_GEN6_TX_RINGS_MASK 0x1 + +/* Arbiter configuration */ +#define ADF_GEN6_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_GEN6_ARB_OFFSET 0x000 +#define ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET 0x400 + +/* Admin interface configuration */ +#define ADF_GEN6_ADMINMSGUR_OFFSET 0x500574 +#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578 +#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970 + +/* + * Watchdog timers + * Timeout is in cycles. Clock speed may vary across products but this + * value should be a few milli-seconds. + */ +#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL +#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000ULL +#define ADF_SSMWDTATHL_OFFSET 0x5208 +#define ADF_SSMWDTATHH_OFFSET 0x520C +#define ADF_SSMWDTCNVL_OFFSET 0x5408 +#define ADF_SSMWDTCNVH_OFFSET 0x540C +#define ADF_SSMWDTUCSL_OFFSET 0x5808 +#define ADF_SSMWDTUCSH_OFFSET 0x580C +#define ADF_SSMWDTDCPRL_OFFSET 0x5A08 +#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C +#define ADF_SSMWDTPKEL_OFFSET 0x5E08 +#define ADF_SSMWDTPKEH_OFFSET 0x5E0C + +/* Ring reset */ +#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC) +#define ADF_RPRESET_POLL_DELAY_US 20 +#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0) +#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + (bank) * 8) +#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) +#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) + +/* Controls and sets up the corresponding ring mode of operation */ +#define ADF_GEN6_CSR_RINGMODECTL(bank) (0x9000 + (bank) * 4) + +/* Specifies the traffic class to use for the transactions to/from the ring */ +#define ADF_GEN6_RINGMODECTL_TC_MASK GENMASK(18, 16) +#define ADF_GEN6_RINGMODECTL_TC_DEFAULT 0x7 + +/* Specifies usage of tc for the transactions to/from this ring */ +#define ADF_GEN6_RINGMODECTL_TC_EN_MASK GENMASK(20, 19) + +/* + * Use the value programmed in the tc field for request descriptor + * and metadata read transactions + */ +#define ADF_GEN6_RINGMODECTL_TC_EN_OP1 0x1 + +/* VC0 Resource Control Register */ +#define ADF_GEN6_PVC0CTL_OFFSET 0x204 +#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1 +#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1) +#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F + +/* VC1 Resource Control Register */ +#define ADF_GEN6_PVC1CTL_OFFSET 0x210 +#define ADF_GEN6_PVC1CTL_TCVCMAP_OFFSET 1 +#define ADF_GEN6_PVC1CTL_TCVCMAP_MASK GENMASK(7, 1) +#define ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT 0x40 +#define ADF_GEN6_PVC1CTL_VCEN_OFFSET 31 +#define ADF_GEN6_PVC1CTL_VCEN_MASK BIT(31) +/* RW bit: 0x1 - enables a Virtual Channel, 0x0 - disables */ +#define ADF_GEN6_PVC1CTL_VCEN_ON 0x1 + +/* Error source mask registers */ +#define ADF_GEN6_ERRMSK0 0x41A210 +#define ADF_GEN6_ERRMSK1 0x41A214 +#define ADF_GEN6_ERRMSK2 0x41A218 +#define ADF_GEN6_ERRMSK3 0x41A21C + +#define ADF_GEN6_VFLNOTIFY BIT(7) + +/* Number of heartbeat counter pairs */ +#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE + +/* Physical function fuses */ +#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0) +#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8) + +/* Firmware binaries */ +#define ADF_6XXX_FW "qat_6xxx.bin" +#define ADF_6XXX_MMP "qat_6xxx_mmp.bin" +#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin" +#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin" +#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin" + +enum icp_qat_gen6_slice_mask { + ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0), + ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1), + ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2), + ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3), + ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4), + ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6), +}; + +void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data); + +#endif /* ADF_6XXX_HW_DATA_H_ */ diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c new file mode 100644 index 000000000000..c1dc9c56fdf5 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/array_size.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/types.h> + +#include <adf_accel_devices.h> +#include <adf_cfg.h> +#include <adf_common_drv.h> +#include <adf_dbgfs.h> + +#include "adf_gen6_shared.h" +#include "adf_6xxx_hw_data.h" + +static int bar_map[] = { + 0, /* SRAM */ + 2, /* PMISC */ + 4, /* ETR */ +}; + +static void adf_device_down(void *accel_dev) +{ + adf_dev_down(accel_dev); +} + +static void adf_dbgfs_cleanup(void *accel_dev) +{ + adf_dbgfs_exit(accel_dev); +} + +static void adf_cfg_device_remove(void *accel_dev) +{ + adf_cfg_dev_remove(accel_dev); +} + +static void adf_cleanup_hw_data(void *accel_dev) +{ + struct adf_accel_dev *accel_device = accel_dev; + + if (accel_device->hw_device) { + adf_clean_hw_data_6xxx(accel_device->hw_device); + accel_device->hw_device = NULL; + } +} + +static void adf_devmgr_remove(void *accel_dev) +{ + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + struct device *dev = &pdev->dev; + struct adf_accel_dev *accel_dev; + struct adf_bar *bar; + unsigned int i; + int ret; + + if (num_possible_nodes() > 1 && dev_to_node(dev) < 0) { + /* + * If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. + */ + return dev_err_probe(dev, -EINVAL, "Invalid NUMA configuration.\n"); + } + + accel_dev = devm_kzalloc(dev, sizeof(*accel_dev), GFP_KERNEL); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + INIT_LIST_HEAD(&accel_dev->list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + accel_dev->owner = THIS_MODULE; + + hw_data = devm_kzalloc(dev, sizeof(*hw_data), GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_GEN6_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); + pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]); + pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]); + + if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE)) + return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n"); + + /* Enable PCI device */ + ret = pcim_enable_device(pdev); + if (ret) + return dev_err_probe(dev, ret, "Cannot enable PCI device.\n"); + + ret = adf_devmgr_add_dev(accel_dev, NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to add new accelerator device.\n"); + + ret = devm_add_action_or_reset(dev, adf_devmgr_remove, accel_dev); + if (ret) + return ret; + + accel_dev->hw_device = hw_data; + adf_init_hw_data_6xxx(accel_dev->hw_device); + + ret = devm_add_action_or_reset(dev, adf_cleanup_hw_data, accel_dev); + if (ret) + return ret; + + /* Get Accelerators and Accelerator Engine masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + + /* If the device has no acceleration engines then ignore it */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + (~hw_data->ae_mask & ADF_GEN6_ACCELERATORS_MASK)) { + ret = -EFAULT; + return dev_err_probe(dev, ret, "No acceleration units were found.\n"); + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, adf_cfg_device_remove, accel_dev); + if (ret) + return ret; + + /* Set DMA identifier */ + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) + return dev_err_probe(dev, ret, "No usable DMA configuration.\n"); + + ret = adf_gen6_cfg_dev_init(accel_dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize configuration.\n"); + + /* Get accelerator capability mask */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) { + ret = -EINVAL; + return dev_err_probe(dev, ret, "Failed to get capabilities mask.\n"); + } + + for (i = 0; i < ARRAY_SIZE(bar_map); i++) { + bar = &accel_pci_dev->pci_bars[i]; + + /* Map 64-bit PCIe BAR */ + bar->virt_addr = pcim_iomap_region(pdev, bar_map[i], pci_name(pdev)); + if (IS_ERR(bar->virt_addr)) { + ret = PTR_ERR(bar->virt_addr); + return dev_err_probe(dev, ret, "Failed to ioremap PCI region.\n"); + } + } + + pci_set_master(pdev); + + /* + * The PCI config space is saved at this point and will be restored + * after a Function Level Reset (FLR) as the FLR does not completely + * restore it. + */ + ret = pci_save_state(pdev); + if (ret) + return dev_err_probe(dev, ret, "Failed to save pci state.\n"); + + accel_dev->ras_errors.enabled = true; + + adf_dbgfs_init(accel_dev); + + ret = devm_add_action_or_reset(dev, adf_dbgfs_cleanup, accel_dev); + if (ret) + return ret; + + ret = adf_dev_up(accel_dev, true); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev); + if (ret) + return ret; + + ret = adf_sysfs_init(accel_dev); + + return ret; +} + +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_6XXX) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_6XXX_DEVICE_NAME, + .probe = adf_probe, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; +module_pci_driver(adf_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_6XXX_FW); +MODULE_FIRMWARE(ADF_6XXX_MMP); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology for GEN6 Devices"); +MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS("CRYPTO_QAT"); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile index d9e568572da8..43604c025f0c 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index e78f7bfd30b8..07f2c42a68f5 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -5,7 +5,6 @@ #include <adf_clock.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = { static struct adf_hw_device_class c3xxx_class = { .name = ADF_C3XXX_DEVICE_NAME, .type = DEV_C3XXX, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index b825b35ab4bf..bceb5dd8b148 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -19,24 +19,6 @@ #include <adf_dbgfs.h> #include "adf_c3xxx_hw_data.h" -static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), }, - { } -}; -MODULE_DEVICE_TABLE(pci, adf_pci_tbl); - -static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); -static void adf_remove(struct pci_dev *dev); - -static struct pci_driver adf_driver = { - .id_table = adf_pci_tbl, - .name = ADF_C3XXX_DEVICE_NAME, - .probe = adf_probe, - .remove = adf_remove, - .sriov_configure = adf_sriov_configure, - .err_handler = &adf_err_handler, -}; - static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) { pci_release_regions(accel_dev->accel_pci_dev.pci_dev); @@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev) kfree(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C3XXX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + static int __init adfdrv_init(void) { request_module("intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile index 31a908a211ac..03f6745b4aa2 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index a512ca4efd3f..db3c33fa1881 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -3,7 +3,6 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -13,7 +12,6 @@ static struct adf_hw_device_class c3xxxiov_class = { .name = ADF_C3XXXVF_DEVICE_NAME, .type = DEV_C3XXXVF, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile index cbdaaa135e84..f3d722bef088 100644 --- a/drivers/crypto/intel/qat/qat_c62x/Makefile +++ b/drivers/crypto/intel/qat/qat_c62x/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o qat_c62x-y := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 32ebe09477a8..0b410b41474d 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -5,7 +5,6 @@ #include <adf_clock.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = { static struct adf_hw_device_class c62x_class = { .name = ADF_C62X_DEVICE_NAME, .type = DEV_C62X, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index 8a7bdec358d6..23ccb72b6ea2 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -19,24 +19,6 @@ #include <adf_dbgfs.h> #include "adf_c62x_hw_data.h" -static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), }, - { } -}; -MODULE_DEVICE_TABLE(pci, adf_pci_tbl); - -static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); -static void adf_remove(struct pci_dev *dev); - -static struct pci_driver adf_driver = { - .id_table = adf_pci_tbl, - .name = ADF_C62X_DEVICE_NAME, - .probe = adf_probe, - .remove = adf_remove, - .sriov_configure = adf_sriov_configure, - .err_handler = &adf_err_handler, -}; - static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) { pci_release_regions(accel_dev->accel_pci_dev.pci_dev); @@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev) kfree(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C62X_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + static int __init adfdrv_init(void) { request_module("intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile index 60e499b041ec..ed7f3f722d99 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 4aaaaf921734..7f00035d3661 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -3,7 +3,6 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -13,7 +12,6 @@ static struct adf_hw_device_class c62xiov_class = { .name = ADF_C62XVF_DEVICE_NAME, .type = DEV_C62XVF, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index af5df29fd2e3..66bb295ace28 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -8,19 +8,19 @@ intel_qat-y := adf_accel_engine.o \ adf_cfg_services.o \ adf_clock.o \ adf_ctl_drv.o \ + adf_dc.o \ adf_dev_mgr.o \ adf_gen2_config.o \ - adf_gen2_dc.o \ adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ adf_gen4_config.o \ - adf_gen4_dc.o \ adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ adf_gen4_ras.o \ - adf_gen4_timer.o \ adf_gen4_vf_mig.o \ + adf_gen6_ras.o \ + adf_gen6_shared.o \ adf_hw_arbiter.o \ adf_init.o \ adf_isr.o \ @@ -30,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \ adf_sysfs.o \ adf_sysfs_ras_counters.o \ adf_sysfs_rl.o \ + adf_timer.o \ adf_transport.o \ qat_algs.o \ qat_algs_send.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index dc21551153cb..2ee526063213 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -12,6 +12,7 @@ #include <linux/qat/qat_mig_dev.h> #include <linux/wordpart.h> #include "adf_cfg_common.h" +#include "adf_dc.h" #include "adf_rl.h" #include "adf_telemetry.h" #include "adf_pfvf_msg.h" @@ -25,14 +26,18 @@ #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" #define ADF_420XX_DEVICE_NAME "420xx" -#define ADF_4XXX_PCI_DEVICE_ID 0x4940 -#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 -#define ADF_401XX_PCI_DEVICE_ID 0x4942 -#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 -#define ADF_402XX_PCI_DEVICE_ID 0x4944 -#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945 -#define ADF_420XX_PCI_DEVICE_ID 0x4946 -#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947 +#define ADF_6XXX_DEVICE_NAME "6xxx" +#define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940 +#define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941 +#define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942 +#define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943 +#define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944 +#define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945 +#define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946 +#define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947 +#define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948 +#define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949 + #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 @@ -267,7 +272,8 @@ struct adf_pfvf_ops { }; struct adf_dc_ops { - void (*build_deflate_ctx)(void *ctx); + int (*build_comp_block)(void *ctx, enum adf_dc_algo algo); + int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo); }; struct qat_migdev_ops { diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index acad526eb741..573388c37100 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -449,6 +449,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +EXPORT_SYMBOL_GPL(adf_init_admin_pm); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size) diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h index 89df3888d7ea..15fdf9854b81 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h @@ -48,6 +48,7 @@ enum adf_device_type { DEV_C3XXXVF, DEV_4XXX, DEV_420XX, + DEV_6XXX, }; struct adf_dev_status_info { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c index 30abcd9e1283..c39871291da7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c @@ -116,7 +116,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, return adf_service_mask_to_string(mask, out, out_len); } -static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask) +int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; size_t len; @@ -138,6 +138,7 @@ static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long * return ret; } +EXPORT_SYMBOL_GPL(adf_get_service_mask); int adf_get_service_enabled(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h index f6bafc15cbc6..3742c450878f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h @@ -32,5 +32,6 @@ enum { int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, size_t in_len, char *out, size_t out_len); int adf_get_service_enabled(struct adf_accel_dev *accel_dev); +int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_dc.c index 47261b1c1da6..3e8fb4e3ed97 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dc.c @@ -1,22 +1,21 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation */ #include "adf_accel_devices.h" -#include "adf_gen2_dc.h" +#include "adf_dc.h" #include "icp_qat_fw_comp.h" -static void qat_comp_build_deflate_ctx(void *ctx) +int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo) { - struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; + struct icp_qat_fw_comp_req *req_tmpl = ctx; struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl; + struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + int ret; memset(req_tmpl, 0, sizeof(*req_tmpl)); header->hdr_flags = ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_SGL); @@ -26,12 +25,14 @@ static void qat_comp_build_deflate_ctx(void *ctx) ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); - cd_pars->u.sl.comp_slice_cfg_word[0] = - ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS, - ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, - ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, - ICP_QAT_HW_COMPRESSION_DEPTH_1, - ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + + /* Build HW config block for compression */ + ret = GET_DC_OPS(accel_dev)->build_comp_block(ctx, algo); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to build compression block\n"); + return ret; + } + req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER; req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC; req_pars->req_par_flags = @@ -45,26 +46,19 @@ static void qat_comp_build_deflate_ctx(void *ctx) ICP_QAT_FW_COMP_NO_XXHASH_ACC, ICP_QAT_FW_COMP_CNV_ERROR_NONE, ICP_QAT_FW_COMP_NO_APPEND_CRC, - ICP_QAT_FW_COMP_NO_DROP_DATA); + ICP_QAT_FW_COMP_NO_DROP_DATA, + ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS); ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP); /* Fill second half of the template for decompression */ memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl)); req_tmpl++; - header = &req_tmpl->comn_hdr; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; - cd_pars = &req_tmpl->cd_pars; - cd_pars->u.sl.comp_slice_cfg_word[0] = - ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS, - ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, - ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, - ICP_QAT_HW_COMPRESSION_DEPTH_1, - ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); -} -void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops) -{ - dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx; + /* Build HW config block for decompression */ + ret = GET_DC_OPS(accel_dev)->build_decomp_block(req_tmpl, algo); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to build decompression block\n"); + + return ret; } -EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dc.h b/drivers/crypto/intel/qat/qat_common/adf_dc.h new file mode 100644 index 000000000000..6cb5e09054a6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_dc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_DC_H +#define ADF_DC_H + +struct adf_accel_dev; + +enum adf_dc_algo { + QAT_DEFLATE, + QAT_LZ4, + QAT_LZ4S, + QAT_ZSTD, +}; + +int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo); + +#endif /* ADF_DC_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h index 4f86696800c9..78957fa900b7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h @@ -8,6 +8,7 @@ enum adf_fw_objs { ADF_FW_ASYM_OBJ, ADF_FW_DC_OBJ, ADF_FW_ADMIN_OBJ, + ADF_FW_CY_OBJ, }; struct adf_fw_config { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h deleted file mode 100644 index 6eae023354d7..000000000000 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright(c) 2022 Intel Corporation */ -#ifndef ADF_GEN2_DC_H -#define ADF_GEN2_DC_H - -#include "adf_accel_devices.h" - -void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops); - -#endif /* ADF_GEN2_DC_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index 2b263442c856..6a505e9a5cf9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ #include "adf_common_drv.h" +#include "adf_dc.h" #include "adf_gen2_hw_data.h" +#include "icp_qat_fw_comp.h" #include "icp_qat_hw.h" #include <linux/pci.h> @@ -169,3 +171,58 @@ void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) } } EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer); + +static int adf_gen2_build_comp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; + break; + default: + return -EINVAL; + } + + cd_pars->u.sl.comp_slice_cfg_word[0] = + ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, + ICP_QAT_HW_COMPRESSION_DEPTH_1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + + return 0; +} + +static int adf_gen2_build_decomp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + break; + default: + return -EINVAL; + } + + cd_pars->u.sl.comp_slice_cfg_word[0] = + ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED, + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE, + ICP_QAT_HW_COMPRESSION_DEPTH_1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0); + + return 0; +} + +void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_comp_block = adf_gen2_build_comp_block; + dc_ops->build_decomp_block = adf_gen2_build_decomp_block; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h index 708e9186127b..59bad368a921 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h @@ -88,5 +88,6 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info); void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev); u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev); void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h index a716545a764c..34a63cf40db2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h @@ -5,6 +5,7 @@ #include <linux/types.h> #include "adf_accel_devices.h" +#include "adf_common_drv.h" #define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C) #define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c index f97e7a880f3a..afcdfdd0a37a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -11,7 +11,7 @@ #include "qat_compression.h" #include "qat_crypto.h" -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; int banks = GET_MAX_BANKS(accel_dev); @@ -117,7 +117,7 @@ err: return ret; } -static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) +int adf_comp_dev_config(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; int banks = GET_MAX_BANKS(accel_dev); @@ -187,7 +187,7 @@ err: return ret; } -static int adf_no_dev_config(struct adf_accel_dev *accel_dev) +int adf_no_dev_config(struct adf_accel_dev *accel_dev) { unsigned long val; int ret; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h index bb87655f69a8..38a674c27e40 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h @@ -7,5 +7,8 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev); +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev); +int adf_comp_dev_config(struct adf_accel_dev *accel_dev); +int adf_no_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c deleted file mode 100644 index 5859238e37de..000000000000 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright(c) 2022 Intel Corporation */ -#include "adf_accel_devices.h" -#include "icp_qat_fw_comp.h" -#include "icp_qat_hw_20_comp.h" -#include "adf_gen4_dc.h" - -static void qat_comp_build_deflate(void *ctx) -{ - struct icp_qat_fw_comp_req *req_tmpl = - (struct icp_qat_fw_comp_req *)ctx; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars; - struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0}; - struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0}; - struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0}; - u32 upper_val; - u32 lower_val; - - memset(req_tmpl, 0, sizeof(*req_tmpl)); - header->hdr_flags = - ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); - header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC; - header->comn_req_flags = - ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, - QAT_COMN_PTR_TYPE_SGL); - header->serv_specif_flags = - ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION, - ICP_QAT_FW_COMP_AUTO_SELECT_BEST, - ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, - ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, - ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); - hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; - hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; - hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; - hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1; - hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW; - hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED; - hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL; - hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL; - - upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr); - lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr); - - cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; - cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val; - - req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER; - req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC; - req_pars->req_par_flags = - ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP, - ICP_QAT_FW_COMP_EOP, - ICP_QAT_FW_COMP_BFINAL, - ICP_QAT_FW_COMP_CNV, - ICP_QAT_FW_COMP_CNV_RECOVERY, - ICP_QAT_FW_COMP_NO_CNV_DFX, - ICP_QAT_FW_COMP_CRC_MODE_LEGACY, - ICP_QAT_FW_COMP_NO_XXHASH_ACC, - ICP_QAT_FW_COMP_CNV_ERROR_NONE, - ICP_QAT_FW_COMP_NO_APPEND_CRC, - ICP_QAT_FW_COMP_NO_DROP_DATA); - - /* Fill second half of the template for decompression */ - memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl)); - req_tmpl++; - header = &req_tmpl->comn_hdr; - header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; - cd_pars = &req_tmpl->cd_pars; - - hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; - lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr); - - cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; - cd_pars->u.sl.comp_slice_cfg_word[1] = 0; -} - -void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops) -{ - dc_ops->build_deflate_ctx = qat_comp_build_deflate; -} -EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h deleted file mode 100644 index 0b1a6774412e..000000000000 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright(c) 2022 Intel Corporation */ -#ifndef ADF_GEN4_DC_H -#define ADF_GEN4_DC_H - -#include "adf_accel_devices.h" - -void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops); - -#endif /* ADF_GEN4_DC_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 099949a2421c..0406cb09c5bb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -9,6 +9,8 @@ #include "adf_fw_config.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" +#include "icp_qat_fw_comp.h" +#include "icp_qat_hw_20_comp.h" u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) { @@ -663,3 +665,71 @@ int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number return ret; } EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore); + +static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = { }; + struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = { }; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + u32 upper_val; + u32 lower_val; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC; + break; + default: + return -EINVAL; + } + + hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL; + hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77; + hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED; + hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1; + hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW; + hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED; + hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL; + hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL; + + upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr); + lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr); + + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val; + + return 0; +} + +static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo) +{ + struct icp_qat_fw_comp_req *req_tmpl = ctx; + struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = { }; + struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + u32 lower_val; + + switch (algo) { + case QAT_DEFLATE: + header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS; + break; + default: + return -EINVAL; + } + + hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE; + lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr); + + cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val; + cd_pars->u.sl.comp_slice_cfg_word[1] = 0; + + return 0; +} + +void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops) +{ + dc_ops->build_comp_block = adf_gen4_build_comp_block; + dc_ops->build_decomp_block = adf_gen4_build_decomp_block; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 51fc2eaa263e..e4f4d5fa616d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -7,6 +7,7 @@ #include "adf_accel_devices.h" #include "adf_cfg_common.h" +#include "adf_dc.h" /* PCIe configuration space */ #define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) @@ -180,5 +181,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); bool adf_gen4_services_supported(unsigned long service_mask); +void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h index 17d1b774d4a8..2c8708117f70 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h @@ -4,6 +4,7 @@ #define ADF_GEN4_PFVF_H #include "adf_accel_devices.h" +#include "adf_common_drv.h" #ifdef CONFIG_PCI_IOV void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h new file mode 100644 index 000000000000..9a5b995f7ada --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_GEN6_PM_H +#define ADF_GEN6_PM_H + +#include <linux/bits.h> +#include <linux/time.h> + +struct adf_accel_dev; + +/* Power management */ +#define ADF_GEN6_PM_POLL_DELAY_US 20 +#define ADF_GEN6_PM_POLL_TIMEOUT_US USEC_PER_SEC +#define ADF_GEN6_PM_STATUS 0x50A00C +#define ADF_GEN6_PM_INTERRUPT 0x50A028 + +/* Power management source in ERRSOU2 and ERRMSK2 */ +#define ADF_GEN6_PM_SOU BIT(18) + +/* cpm_pm_interrupt bitfields */ +#define ADF_GEN6_PM_DRV_ACTIVE BIT(20) + +#define ADF_GEN6_PM_DEFAULT_IDLE_FILTER 0x6 + +/* cpm_pm_status bitfields */ +#define ADF_GEN6_PM_INIT_STATE BIT(21) + +#endif /* ADF_GEN6_PM_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c new file mode 100644 index 000000000000..967253082a98 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c @@ -0,0 +1,818 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/bitfield.h> +#include <linux/types.h> + +#include "adf_common_drv.h" +#include "adf_gen6_ras.h" +#include "adf_sysfs_ras_counters.h" + +static void enable_errsou_reporting(void __iomem *csr) +{ + /* Enable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, 0); + + /* Enable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, 0); + + /* + * Enable uncorrectable error reporting in ERRSOU2 + * but disable PM interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, ADF_GEN6_ERRSOU2_PM_INT_BIT); + + /* Enable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, 0); +} + +static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; + + /* Enable acceleration engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, ae_mask); + + /* Enable acceleration engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, ae_mask); +} + +static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + /* Enable HI CPP agents command parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, + ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK); + + ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_MASK); +} + +static void enable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg, mask; + + /* Enable RI memory error reporting */ + mask = ADF_GEN6_RIMEM_PARERR_FATAL_MASK | ADF_GEN6_RIMEM_PARERR_CERR_MASK; + ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, mask); + + /* Enable IOSF primary command parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, ADF_GEN6_RIMISCSTS_BIT); + + /* Enable TI internal memory parity error reporting */ + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_CI_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_CD_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK); + reg &= ~ADF_GEN6_TI_TRNSB_PAR_STS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, reg); + + /* Enable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, ADF_GEN6_RICPPINTCTL_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, ADF_GEN6_TICPPINTCTL_MASK); + + /* + * Enable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL); + reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK; + reg |= ADF_GEN6_TIMISCCTL_BIT; + ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg); +} + +static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + /* Enable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, 0); +} + +static void adf_gen6_enable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + enable_errsou_reporting(csr); + enable_ae_error_reporting(accel_dev, csr); + enable_cpp_error_reporting(accel_dev, csr); + enable_ti_ri_error_reporting(csr); + enable_ssm_error_reporting(accel_dev, csr); +} + +static void disable_errsou_reporting(void __iomem *csr) +{ + u32 val; + + /* Disable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, ADF_GEN6_ERRSOU0_MASK); + + /* Disable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, ADF_GEN6_ERRMSK1_MASK); + + /* Disable uncorrectable error reporting in ERRSOU2 */ + val = ADF_CSR_RD(csr, ADF_GEN6_ERRMSK2); + val |= ADF_GEN6_ERRSOU2_DIS_MASK; + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, val); + + /* Disable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_ERRSOU3_DIS_MASK); +} + +static void disable_ae_error_reporting(void __iomem *csr) +{ + /* Disable acceleration engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, 0); + + /* Disable acceleration engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, 0); +} + +static void disable_cpp_error_reporting(void __iomem *csr) +{ + /* Disable HI CPP agents command parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, 0); + + ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK); +} + +static void disable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg; + + /* Disable RI memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, 0); + + /* Disable IOSF primary command parity error reporting */ + reg = ADF_CSR_RD(csr, ADF_GEN6_RIMISCCTL); + reg &= ~ADF_GEN6_RIMISCSTS_BIT; + ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, reg); + + /* Disable TI internal memory parity error reporting */ + ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, ADF_GEN6_TI_CI_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, ADF_GEN6_TI_CD_PAR_STS_MASK); + ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, ADF_GEN6_TI_TRNSB_PAR_STS_MASK); + + /* Disable error handling in RI, TI CPP interface control registers */ + reg = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTCTL); + reg &= ~ADF_GEN6_RICPPINTCTL_MASK; + ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTCTL); + reg &= ~ADF_GEN6_TICPPINTCTL_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, reg); + + /* + * Disable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL); + reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK; + ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg); +} + +static void disable_ssm_error_reporting(void __iomem *csr) +{ + /* Disable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, ADF_GEN6_INTMASKSSM_MASK); +} + +static void adf_gen6_disable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + disable_errsou_reporting(csr); + disable_ae_error_reporting(csr); + disable_cpp_error_reporting(csr); + disable_ti_ri_error_reporting(csr); + disable_ssm_error_reporting(csr); +} + +static void adf_gen6_process_errsou0(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ae, errsou; + + ae = ADF_CSR_RD(csr, ADF_GEN6_HIAECORERRLOG_CPP0); + ae &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_warn(&GET_DEV(accel_dev), "Correctable error detected: %#x\n", ae); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + /* Clear interrupt from ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOG_CPP0, ae); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0); + if (errsou & ADF_GEN6_ERRSOU0_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou0 still set: %#x\n", errsou); +} + +static void adf_handle_cpp_ae_unc(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 ae; + + if (!(errsou & ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT)) + return; + + ae = ADF_CSR_RD(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0); + ae &= GET_HW_DATA(accel_dev)->ae_mask; + if (ae) { + dev_err(&GET_DEV(accel_dev), "Uncorrectable error detected: %#x\n", ae); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0, ae); + } +} + +static void adf_handle_cpp_cmd_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 cmd_par_err; + + if (!(errsou & ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT)) + return; + + cmd_par_err = ADF_CSR_RD(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG); + cmd_par_err &= ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK; + if (cmd_par_err) { + dev_err(&GET_DEV(accel_dev), "HI CPP agent command parity error: %#x\n", + cmd_par_err); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG, cmd_par_err); + } +} + +static void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 rimem_parerr_sts; + + if (!(errsou & ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT)) + return; + + rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN6_RIMEM_PARERR_STS); + rimem_parerr_sts &= ADF_GEN6_RIMEM_PARERR_CERR_MASK | + ADF_GEN6_RIMEM_PARERR_FATAL_MASK; + if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_CERR_MASK) { + dev_err(&GET_DEV(accel_dev), "RI memory parity correctable error: %#x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + + if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_FATAL_MASK) { + dev_err(&GET_DEV(accel_dev), "RI memory parity fatal error: %#x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + ADF_CSR_WR(csr, ADF_GEN6_RIMEM_PARERR_STS, rimem_parerr_sts); +} + +static void adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_ci_par_sts; + + ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_STS); + ti_ci_par_sts &= ADF_GEN6_TI_CI_PAR_STS_MASK; + if (ti_ci_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI memory parity error: %#x\n", ti_ci_par_sts); + ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_STS, ti_ci_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } +} + +static void adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_pullfub_par_sts; + + ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS); + ti_pullfub_par_sts &= ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK; + if (ti_pullfub_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI pull parity error: %#x\n", ti_pullfub_par_sts); + ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } +} + +static void adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_pushfub_par_sts; + + ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS); + ti_pushfub_par_sts &= ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK; + if (ti_pushfub_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI push parity error: %#x\n", ti_pushfub_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts); + } +} + +static void adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_cd_par_sts; + + ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_STS); + ti_cd_par_sts &= ADF_GEN6_TI_CD_PAR_STS_MASK; + if (ti_cd_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI CD parity error: %#x\n", ti_cd_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_STS, ti_cd_par_sts); + } +} + +static void adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 ti_trnsb_par_sts; + + ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_STS); + ti_trnsb_par_sts &= ADF_GEN6_TI_TRNSB_PAR_STS_MASK; + if (ti_trnsb_par_sts) { + dev_err(&GET_DEV(accel_dev), "TI TRNSB parity error: %#x\n", ti_trnsb_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); + } +} + +static void adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 rimiscsts; + + rimiscsts = ADF_CSR_RD(csr, ADF_GEN6_RIMISCSTS); + rimiscsts &= ADF_GEN6_RIMISCSTS_BIT; + if (rimiscsts) { + dev_err(&GET_DEV(accel_dev), "Command parity error detected on IOSFP: %#x\n", + rimiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_RIMISCSTS, rimiscsts); + } +} + +static void adf_handle_ti_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return; + + adf_handle_ti_ci_par_sts(accel_dev, csr); + adf_handle_ti_pullfub_par_sts(accel_dev, csr); + adf_handle_ti_pushfub_par_sts(accel_dev, csr); + adf_handle_ti_cd_par_sts(accel_dev, csr); + adf_handle_ti_trnsb_par_sts(accel_dev, csr); + adf_handle_iosfp_cmd_parerr(accel_dev, csr); +} + +static void adf_handle_sfi_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), + "Command parity error detected on streaming fabric interface\n"); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_gen6_process_errsou1(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + adf_handle_cpp_ae_unc(accel_dev, csr, errsou); + adf_handle_cpp_cmd_par_err(accel_dev, csr, errsou); + adf_handle_ri_mem_par_err(accel_dev, csr, errsou); + adf_handle_ti_err(accel_dev, csr, errsou); + adf_handle_sfi_cmd_parerr(accel_dev, csr, errsou); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1); + if (errsou & ADF_GEN6_ERRSOU1_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou1 still set: %#x\n", errsou); +} + +static void adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN6_CERRSSMSH); + reg &= ADF_GEN6_CERRSSMSH_ERROR_BIT; + if (reg) { + dev_warn(&GET_DEV(accel_dev), + "Correctable error on ssm shared memory: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + ADF_CSR_WR(csr, ADF_GEN6_CERRSSMSH, reg); + } +} + +static void adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_UERRSSMSH); + reg &= ADF_GEN6_UERRSSMSH_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "Fatal error on ssm shared memory: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_UERRSSMSH, reg); + } +} + +static void adf_handle_pperr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_PPERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_PPERR); + reg &= ADF_GEN6_PPERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "Fatal push or pull data error: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_PPERR, reg); + } +} + +static void adf_handle_scmpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_SCM_PAR_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), "Fatal error on SCM: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_cpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_CPP_PAR_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), "Fatal error on CPP: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_rfpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_RF_PAR_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), "Fatal error on RF Parity: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_unexp_cpl_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS); + reg &= ADF_GEN6_UNEXP_CPL_ERR_MASK; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "Fatal error for AXI unexpected tag/length: %#x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg); + } +} + +static void adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, void __iomem *csr) +{ + u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN6_IAINTSTATSSM); + + iastatssm &= ADF_GEN6_IAINTSTATSSM_MASK; + if (!iastatssm) + return; + + adf_handle_uerrssmsh(accel_dev, csr, iastatssm); + adf_handle_pperr_err(accel_dev, csr, iastatssm); + adf_handle_scmpar_err(accel_dev, csr, iastatssm); + adf_handle_cpppar_err(accel_dev, csr, iastatssm); + adf_handle_rfpar_err(accel_dev, csr, iastatssm); + adf_handle_unexp_cpl_err(accel_dev, csr, iastatssm); + + ADF_CSR_WR(csr, ADF_GEN6_IAINTSTATSSM, iastatssm); +} + +static void adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU2_SSM_ERR_BIT)) + return; + + adf_handle_cerrssmsh(accel_dev, csr); + adf_handle_iaintstatssm(accel_dev, csr); +} + +static void adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 reg; + + if (!(errsou & ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)) + return; + + reg = ADF_CSR_RD(csr, ADF_GEN6_CPP_CFC_ERR_STATUS); + if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { + dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: %#x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { + dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: %#x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + if (reg & ADF_GEN6_CPP_CFC_FATAL_ERR_BIT) { + dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: errors: %#x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_STATUS_CLR, + ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK); +} + +static void adf_gen6_process_errsou2(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + adf_handle_ssm(accel_dev, csr, errsou); + adf_handle_cpp_cfc_err(accel_dev, csr, errsou); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2); + if (errsou & ADF_GEN6_ERRSOU2_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou2 still set: %#x\n", errsou); +} + +static void adf_handle_timiscsts(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 timiscsts; + + if (!(errsou & ADF_GEN6_ERRSOU3_TIMISCSTS_BIT)) + return; + + timiscsts = ADF_CSR_RD(csr, ADF_GEN6_TIMISCSTS); + if (timiscsts) { + dev_err(&GET_DEV(accel_dev), "Fatal error in transmit interface: %#x\n", + timiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } +} + +static void adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 ricppintsts; + + if (!(errsou & ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK)) + return; + + ricppintsts = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTSTS); + ricppintsts &= ADF_GEN6_RICPPINTSTS_MASK; + if (ricppintsts) { + dev_err(&GET_DEV(accel_dev), "RI push pull error: %#x\n", ricppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_RICPPINTSTS, ricppintsts); + } +} + +static void adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 ticppintsts; + + if (!(errsou & ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK)) + return; + + ticppintsts = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTSTS); + ticppintsts &= ADF_GEN6_TICPPINTSTS_MASK; + if (ticppintsts) { + dev_err(&GET_DEV(accel_dev), "TI push pull error: %#x\n", ticppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN6_TICPPINTSTS, ticppintsts); + } +} + +static void adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks; + u32 atufaultstatus; + u32 i; + + if (!(errsou & ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT)) + return; + + for (i = 0; i < max_rp_num; i++) { + atufaultstatus = ADF_CSR_RD(csr, ADF_GEN6_ATUFAULTSTATUS(i)); + + atufaultstatus &= ADF_GEN6_ATUFAULTSTATUS_BIT; + if (atufaultstatus) { + dev_err(&GET_DEV(accel_dev), "Ring pair (%u) ATU detected fault: %#x\n", i, + atufaultstatus); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_ATUFAULTSTATUS(i), atufaultstatus); + } + } +} + +static void adf_handle_rlterror(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + u32 rlterror; + + if (!(errsou & ADF_GEN6_ERRSOU3_RLTERROR_BIT)) + return; + + rlterror = ADF_CSR_RD(csr, ADF_GEN6_RLT_ERRLOG); + rlterror &= ADF_GEN6_RLT_ERRLOG_MASK; + if (rlterror) { + dev_err(&GET_DEV(accel_dev), "Error in rate limiting block: %#x\n", rlterror); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN6_RLT_ERRLOG, rlterror); + } +} + +static void adf_handle_vflr(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "Uncorrectable error in VF\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); +} + +static void adf_handle_tc_vc_map_error(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "Violation of PCIe TC VC mapping\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_pcie_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), + "DEVHALT due to an error in an incoming transaction\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_pg_req_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), + "Error due to response failure in response to a page request\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_xlt_cpl_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "Error status for a address translation request\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_handle_ti_int_err_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT)) + return; + + dev_err(&GET_DEV(accel_dev), "DEVHALT due to a TI internal memory error\n"); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); +} + +static void adf_gen6_process_errsou3(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + adf_handle_timiscsts(accel_dev, csr, errsou); + adf_handle_ricppintsts(accel_dev, csr, errsou); + adf_handle_ticppintsts(accel_dev, csr, errsou); + adf_handle_atufaultstatus(accel_dev, csr, errsou); + adf_handle_rlterror(accel_dev, csr, errsou); + adf_handle_vflr(accel_dev, csr, errsou); + adf_handle_tc_vc_map_error(accel_dev, csr, errsou); + adf_handle_pcie_devhalt(accel_dev, csr, errsou); + adf_handle_pg_req_devhalt(accel_dev, csr, errsou); + adf_handle_xlt_cpl_devhalt(accel_dev, csr, errsou); + adf_handle_ti_int_err_devhalt(accel_dev, csr, errsou); + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3); + if (errsou & ADF_GEN6_ERRSOU3_MASK) + dev_warn(&GET_DEV(accel_dev), "errsou3 still set: %#x\n", errsou); +} + +static void adf_gen6_is_reset_required(struct adf_accel_dev *accel_dev, void __iomem *csr, + bool *reset_required) +{ + u8 reset, dev_state; + u32 gensts; + + gensts = ADF_CSR_RD(csr, ADF_GEN6_GENSTS); + dev_state = FIELD_GET(ADF_GEN6_GENSTS_DEVICE_STATE_MASK, gensts); + reset = FIELD_GET(ADF_GEN6_GENSTS_RESET_TYPE_MASK, gensts); + if (dev_state == ADF_GEN6_GENSTS_DEVHALT && reset == ADF_GEN6_GENSTS_PFLR) { + *reset_required = true; + return; + } + + if (reset == ADF_GEN6_GENSTS_COLD_RESET) + dev_err(&GET_DEV(accel_dev), "Fatal error, cold reset required\n"); + + *reset_required = false; +} + +static bool adf_gen6_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) +{ + void __iomem *csr = adf_get_pmisc_base(accel_dev); + bool handled = false; + u32 errsou; + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0); + if (errsou & ADF_GEN6_ERRSOU0_MASK) { + adf_gen6_process_errsou0(accel_dev, csr); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1); + if (errsou & ADF_GEN6_ERRSOU1_MASK) { + adf_gen6_process_errsou1(accel_dev, csr, errsou); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2); + if (errsou & ADF_GEN6_ERRSOU2_MASK) { + adf_gen6_process_errsou2(accel_dev, csr, errsou); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3); + if (errsou & ADF_GEN6_ERRSOU3_MASK) { + adf_gen6_process_errsou3(accel_dev, csr, errsou); + handled = true; + } + + adf_gen6_is_reset_required(accel_dev, csr, reset_required); + + return handled; +} + +void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops) +{ + ras_ops->enable_ras_errors = adf_gen6_enable_ras; + ras_ops->disable_ras_errors = adf_gen6_disable_ras; + ras_ops->handle_interrupt = adf_gen6_handle_interrupt; +} +EXPORT_SYMBOL_GPL(adf_gen6_init_ras_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h new file mode 100644 index 000000000000..66ced271d173 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_GEN6_RAS_H_ +#define ADF_GEN6_RAS_H_ + +#include <linux/bits.h> + +struct adf_ras_ops; + +/* Error source registers */ +#define ADF_GEN6_ERRSOU0 0x41A200 +#define ADF_GEN6_ERRSOU1 0x41A204 +#define ADF_GEN6_ERRSOU2 0x41A208 +#define ADF_GEN6_ERRSOU3 0x41A20C + +/* Error source mask registers */ +#define ADF_GEN6_ERRMSK0 0x41A210 +#define ADF_GEN6_ERRMSK1 0x41A214 +#define ADF_GEN6_ERRMSK2 0x41A218 +#define ADF_GEN6_ERRMSK3 0x41A21C + +/* ERRSOU0 Correctable error mask */ +#define ADF_GEN6_ERRSOU0_MASK BIT(0) + +#define ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT BIT(0) +#define ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT BIT(1) +#define ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT BIT(4) + +#define ADF_GEN6_ERRSOU1_MASK ( \ + (ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT) | \ + (ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT) | \ + (ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT)) + +#define ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT BIT(0) +#define ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT BIT(1) +#define ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT BIT(4) + +#define ADF_GEN6_ERRMSK1_MASK ( \ + (ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT) | \ + (ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT) | \ + (ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT)) + +/* HI AE Uncorrectable error log */ +#define ADF_GEN6_HIAEUNCERRLOG_CPP0 0x41A300 + +/* HI AE Uncorrectable error log enable */ +#define ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0 0x41A320 + +/* HI AE Correctable error log */ +#define ADF_GEN6_HIAECORERRLOG_CPP0 0x41A308 + +/* HI AE Correctable error log enable */ +#define ADF_GEN6_HIAECORERRLOGENABLE_CPP0 0x41A318 + +/* HI CPP Agent Command parity error log */ +#define ADF_GEN6_HICPPAGENTCMDPARERRLOG 0x41A310 + +/* HI CPP Agent command parity error logging enable */ +#define ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE 0x41A314 + +#define ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1B + +/* RI Memory parity error status register */ +#define ADF_GEN6_RIMEM_PARERR_STS 0x41B128 + +/* RI Memory parity error reporting enable */ +#define ADF_GEN6_RI_MEM_PAR_ERR_EN0 0x41B12C + +/* + * RI Memory parity error mask + * BIT(4) - ri_tlq_phdr parity error + * BIT(5) - ri_tlq_pdata parity error + * BIT(6) - ri_tlq_nphdr parity error + * BIT(7) - ri_tlq_npdata parity error + * BIT(8) - ri_tlq_cplhdr parity error + * BIT(10) - BIT(13) - ri_tlq_cpldata[0:3] parity error + * BIT(19) - ri_cds_cmd_fifo parity error + * BIT(20) - ri_obc_ricpl_fifo parity error + * BIT(21) - ri_obc_tiricpl_fifo parity error + * BIT(22) - ri_obc_cppcpl_fifo parity error + * BIT(23) - ri_obc_pendcpl_fifo parity error + * BIT(24) - ri_cpp_cmd_fifo parity error + * BIT(25) - ri_cds_ticmd_fifo parity error + * BIT(26) - riti_cmd_fifo parity error + * BIT(27) - ri_int_msixtbl parity error + * BIT(28) - ri_int_imstbl parity error + * BIT(30) - ri_kpt_fuses parity error + */ +#define ADF_GEN6_RIMEM_PARERR_FATAL_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \ + BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27) | \ + BIT(28) | BIT(30)) + +#define ADF_GEN6_RIMEM_PARERR_CERR_MASK \ + (BIT(10) | BIT(11) | BIT(12) | BIT(13)) + +/* TI CI parity status */ +#define ADF_GEN6_TI_CI_PAR_STS 0x50060C + +/* TI CI parity reporting mask */ +#define ADF_GEN6_TI_CI_PAR_ERR_MASK 0x500608 + +/* + * TI CI parity status mask + * BIT(0) - CdCmdQ_sts patiry error status + * BIT(1) - CdDataQ_sts parity error status + * BIT(3) - CPP_SkidQ_sts parity error status + */ +#define ADF_GEN6_TI_CI_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(3)) + +/* TI PULLFUB parity status */ +#define ADF_GEN6_TI_PULL0FUB_PAR_STS 0x500618 + +/* TI PULLFUB parity error reporting mask */ +#define ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK 0x500614 + +/* + * TI PULLFUB parity status mask + * BIT(0) - TrnPullReqQ_sts parity status + * BIT(1) - TrnSharedDataQ_sts parity status + * BIT(2) - TrnPullReqDataQ_sts parity status + * BIT(4) - CPP_CiPullReqQ_sts parity status + * BIT(5) - CPP_TrnPullReqQ_sts parity status + * BIT(6) - CPP_PullidQ_sts parity status + * BIT(7) - CPP_WaitDataQ_sts parity status + * BIT(8) - CPP_CdDataQ_sts parity status + * BIT(9) - CPP_TrnDataQP0_sts parity status + * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status + * BIT(12) - CPP_TrnDataQP1_sts parity status + * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status + */ +#define ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14)) + +/* TI PUSHUB parity status */ +#define ADF_GEN6_TI_PUSHFUB_PAR_STS 0x500630 + +/* TI PUSHFUB parity error reporting mask */ +#define ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK 0x50062C + +/* + * TI PUSHUB parity status mask + * BIT(0) - SbPushReqQ_sts parity status + * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status + * BIT(4) - CPP_CdPushReqQ_sts parity status + * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status + * BIT(7) - CPP_SbPushReqQ_sts parity status + * BIT(8) - CPP_SbPushDataQP_sts parity status + * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status + */ +#define ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \ + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10)) + +/* TI CD parity status */ +#define ADF_GEN6_TI_CD_PAR_STS 0x50063C + +/* TI CD parity error mask */ +#define ADF_GEN6_TI_CD_PAR_ERR_MASK 0x500638 + +/* + * TI CD parity status mask + * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status + * BIT(16) - Leaf2ClusterRam_sts parity status + * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status + * BIT(19) - VirtualQ_sts parity status + * BIT(20) - DtRdQ_sts parity status + * BIT(21) - DtWrQ_sts parity status + * BIT(22) - RiCmdQ_sts parity status + * BIT(23) - BypassQ_sts parity status + * BIT(24) - DtRdQ_sc_sts parity status + * BIT(25) - DtWrQ_sc_sts parity status + */ +#define ADF_GEN6_TI_CD_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25)) + +/* TI TRNSB parity status */ +#define ADF_GEN6_TI_TRNSB_PAR_STS 0x500648 + +/* TI TRNSB parity error reporting mask */ +#define ADF_GEN6_TI_TRNSB_PAR_ERR_MASK 0x500644 + +/* + * TI TRNSB parity status mask + * BIT(0) - TrnPHdrQP_sts parity status + * BIT(1) - TrnPHdrQRF_sts parity status + * BIT(2) - TrnPDataQP_sts parity status + * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status + * BIT(7) - TrnNpHdrQP_sts parity status + * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status + * BIT(10) - TrnCplHdrQ_sts parity status + * BIT(11) - TrnPutObsReqQ_sts parity status + * BIT(12) - TrnPushReqQ_sts parity status + * BIT(13) - SbSplitIdRam_sts parity status + * BIT(14) - SbReqCountQ_sts parity status + * BIT(15) - SbCplTrkRam_sts parity status + * BIT(16) - SbGetObsReqQ_sts parity status + * BIT(17) - SbEpochIdQ_sts parity status + * BIT(18) - SbAtCplHdrQ_sts parity status + * BIT(19) - SbAtCplDataQ_sts parity status + * BIT(20) - SbReqCountRam_sts parity status + * BIT(21) - SbAtCplHdrQ_sc_sts parity status + */ +#define ADF_GEN6_TI_TRNSB_PAR_STS_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \ + BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21)) + +/* Status register to log misc error on RI */ +#define ADF_GEN6_RIMISCSTS 0x41B1B8 + +/* Status control register to log misc RI error */ +#define ADF_GEN6_RIMISCCTL 0x41B1BC + +/* + * ERRSOU2 bit mask + * BIT(0) - SSM Interrupt Mask + * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error + * BIT(2) - BIT(4) - CPP attention interrupts + * BIT(18) - PM interrupt + */ +#define ADF_GEN6_ERRSOU2_SSM_ERR_BIT BIT(0) +#define ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1) +#define ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK \ + (BIT(2) | BIT(3) | BIT(4)) + +#define ADF_GEN6_ERRSOU2_PM_INT_BIT BIT(18) + +#define ADF_GEN6_ERRSOU2_MASK \ + (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT) + +#define ADF_GEN6_ERRSOU2_DIS_MASK \ + (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \ + ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK) + +#define ADF_GEN6_IAINTSTATSSM 0x28 + +/* IAINTSTATSSM error bit mask definitions */ +#define ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT BIT(0) +#define ADF_GEN6_IAINTSTATSSM_PPERR_BIT BIT(2) +#define ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT BIT(4) +#define ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT BIT(5) +#define ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT BIT(6) +#define ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT BIT(7) + +#define ADF_GEN6_IAINTSTATSSM_MASK \ + (ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_PPERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT | \ + ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT) + +#define ADF_GEN6_UERRSSMSH 0x18 + +/* + * UERRSSMSH error bit mask definitions + * + * BIT(0) - Indicates one uncorrectable error + * BIT(15) - Indicates multiple uncorrectable errors + * in device shared memory + */ +#define ADF_GEN6_UERRSSMSH_MASK (BIT(0) | BIT(15)) + +/* + * CERRSSMSH error bit + * BIT(0) - Indicates one correctable error + */ +#define ADF_GEN6_CERRSSMSH_ERROR_BIT (BIT(0) | BIT(15) | BIT(24)) +#define ADF_GEN6_CERRSSMSH 0x10 + +#define ADF_GEN6_INTMASKSSM 0x0 + +/* + * Error reporting mask in INTMASKSSM + * BIT(0) - Shared memory uncorrectable interrupt mask + * BIT(2) - PPERR interrupt mask + * BIT(4) - SCM parity error interrupt mask + * BIT(5) - CPP parity error interrupt mask + * BIT(6) - SHRAM RF parity error interrupt mask + * BIT(7) - AXI unexpected completion error mask + */ +#define ADF_GEN6_INTMASKSSM_MASK \ + (BIT(0) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7)) + +/* CPP push or pull error */ +#define ADF_GEN6_PPERR 0x8 + +#define ADF_GEN6_PPERR_MASK (BIT(0) | BIT(1)) + +/* + * SSM_FERR_STATUS error bit mask definitions + */ +#define ADF_GEN6_SCM_PAR_ERR_MASK BIT(5) +#define ADF_GEN6_CPP_PAR_ERR_MASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN6_UNEXP_CPL_ERR_MASK (BIT(3) | BIT(4) | BIT(10) | BIT(11)) +#define ADF_GEN6_RF_PAR_ERR_MASK BIT(16) + +#define ADF_GEN6_SSM_FERR_STATUS 0x9C + +#define ADF_GEN6_CPP_CFC_ERR_STATUS 0x640C04 + +/* + * BIT(0) - Indicates one or more CPP CFC errors + * BIT(1) - Indicates multiple CPP CFC errors + * BIT(7) - Indicates CPP CFC command parity error type + * BIT(8) - Indicates CPP CFC data parity error type + */ +#define ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT BIT(0) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8) +#define ADF_GEN6_CPP_CFC_FATAL_ERR_BIT \ + (ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT | \ + ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT) + +/* + * BIT(0) - Enables CFC to detect and log a push/pull data error + * BIT(1) - Enables CFC to generate interrupt to PCIEP for a CPP error + * BIT(4) - When 1 parity detection is disabled + * BIT(5) - When 1 parity detection is disabled on CPP command bus + * BIT(6) - When 1 parity detection is disabled on CPP push/pull bus + * BIT(9) - When 1 RF parity error detection is disabled + */ +#define ADF_GEN6_CPP_CFC_ERR_CTRL_MASK (BIT(0) | BIT(1)) + +#define ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK \ + (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10)) + +#define ADF_GEN6_CPP_CFC_ERR_CTRL 0x640C00 + +/* + * BIT(0) - Clears bit(0) of ADF_GEN6_CPP_CFC_ERR_STATUS + * when an error is reported on CPP + * BIT(1) - Clears bit(1) of ADF_GEN6_CPP_CFC_ERR_STATUS + * when multiple errors are reported on CPP + * BIT(2) - Clears bit(2) of ADF_GEN6_CPP_CFC_ERR_STATUS + * when attention interrupt is reported + */ +#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR 0x640C08 + +/* + * ERRSOU3 bit masks + * BIT(0) - indicates error response order overflow and/or BME error + * BIT(1) - indicates RI push/pull error + * BIT(2) - indicates TI push/pull error + * BIT(5) - indicates TI pull parity error + * BIT(6) - indicates RI push parity error + * BIT(7) - indicates VFLR interrupt + * BIT(8) - indicates ring pair interrupts for ATU detected fault + * BIT(9) - indicates rate limiting error + */ +#define ADF_GEN6_ERRSOU3_TIMISCSTS_BIT BIT(0) +#define ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK (BIT(1) | BIT(6)) +#define ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK (BIT(2) | BIT(5)) +#define ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT BIT(7) +#define ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8) +#define ADF_GEN6_ERRSOU3_RLTERROR_BIT BIT(9) +#define ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT BIT(16) +#define ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT BIT(17) +#define ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT BIT(18) +#define ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT BIT(19) +#define ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT BIT(20) + +#define ADF_GEN6_ERRSOU3_MASK ( \ + (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \ + (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT) | \ + (ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT) | \ + (ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT) | \ + (ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT) | \ + (ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT)) + +#define ADF_GEN6_ERRSOU3_DIS_MASK ( \ + (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \ + (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \ + (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT)) + +/* Rate limiting error log register */ +#define ADF_GEN6_RLT_ERRLOG 0x508814 + +#define ADF_GEN6_RLT_ERRLOG_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* TI misc status register */ +#define ADF_GEN6_TIMISCSTS 0x50054C + +/* TI misc error reporting mask */ +#define ADF_GEN6_TIMISCCTL 0x500548 + +/* + * TI Misc error reporting control mask + * BIT(0) - Enables error detection and logging in TIMISCSTS register + * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default + * BIT(2) - Enables the D-F-x counter within the dispatch arbiter + * to start based on the command triggered from + * BIT(30) - Disables VFLR functionality + * bits 1, 2 and 30 value should be preserved and not meant to be changed + * within RAS. + */ +#define ADF_GEN6_TIMISCCTL_BIT BIT(0) +#define ADF_GEN6_TIMSCCTL_RELAY_MASK (BIT(1) | BIT(2) | BIT(30)) + +/* RI CPP interface status register */ +#define ADF_GEN6_RICPPINTSTS 0x41A330 + +/* + * Uncorrectable error mask in RICPPINTSTS register + * BIT(0) - RI asserted the CPP error signal during a push + * BIT(1) - RI detected the CPP error signal asserted during a pull + * BIT(2) - RI detected a push data parity error + * BIT(3) - RI detected a push valid parity error + */ +#define ADF_GEN6_RICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* RI CPP interface register control */ +#define ADF_GEN6_RICPPINTCTL 0x41A32C + +/* + * Control bit mask for RICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting + * on the RI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting + * on the RI CPP Pull interface + * BIT(2) - value of 1 enables error detection and reporting + * on the RI Parity + * BIT(3) - value of 1 enable checking parity on CPP + */ +#define ADF_GEN6_RICPPINTCTL_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* TI CPP interface status register */ +#define ADF_GEN6_TICPPINTSTS 0x50053C + +/* + * Uncorrectable error mask in TICPPINTSTS register + * BIT(0) - value of 1 indicates that the TI asserted + * the CPP error signal during a push + * BIT(1) - value of 1 indicates that the TI detected + * the CPP error signal asserted during a pull + * BIT(2) - value of 1 indicates that the TI detected + * a pull data parity error + */ +#define ADF_GEN6_TICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2)) + +/* TI CPP interface status register control */ +#define ADF_GEN6_TICPPINTCTL 0x500538 + +/* + * Control bit mask for TICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(2) - value of 1 enables parity error detection and logging on + * the TI CPP Pull interface + * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking + */ +#define ADF_GEN6_TICPPINTCTL_MASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* ATU fault status register */ +#define ADF_GEN6_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4)) + +#define ADF_GEN6_ATUFAULTSTATUS_BIT BIT(0) + +/* Command parity error detected on IOSFP command to QAT */ +#define ADF_GEN6_RIMISCSTS_BIT BIT(0) + +#define ADF_GEN6_GENSTS 0x41A220 +#define ADF_GEN6_GENSTS_DEVICE_STATE_MASK GENMASK(1, 0) +#define ADF_GEN6_GENSTS_RESET_TYPE_MASK GENMASK(3, 2) +#define ADF_GEN6_GENSTS_PFLR 0x1 +#define ADF_GEN6_GENSTS_COLD_RESET 0x3 +#define ADF_GEN6_GENSTS_DEVHALT 0x1 + +void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops); + +#endif /* ADF_GEN6_RAS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c new file mode 100644 index 000000000000..58a072e2f936 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 Intel Corporation */ +#include <linux/export.h> + +#include "adf_gen4_config.h" +#include "adf_gen4_hw_csr_data.h" +#include "adf_gen4_pfvf.h" +#include "adf_gen6_shared.h" + +struct adf_accel_dev; +struct adf_pfvf_ops; +struct adf_hw_csr_ops; + +/* + * QAT GEN4 and GEN6 devices often differ in terms of supported features, + * options and internal logic. However, some of the mechanisms and register + * layout are shared between those two GENs. This file serves as an abstraction + * layer that allows to use existing GEN4 implementation that is also + * applicable to GEN6 without additional overhead and complexity. + */ +void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops) +{ + adf_gen4_init_pf_pfvf_ops(pfvf_ops); +} +EXPORT_SYMBOL_GPL(adf_gen6_init_pf_pfvf_ops); + +void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + return adf_gen4_init_hw_csr_ops(csr_ops); +} +EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops); + +int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev) +{ + return adf_gen4_cfg_dev_init(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init); + +int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + return adf_comp_dev_config(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_gen6_comp_dev_config); + +int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev) +{ + return adf_no_dev_config(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h new file mode 100644 index 000000000000..bc8e71e984fc --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ADF_GEN6_SHARED_H_ +#define ADF_GEN6_SHARED_H_ + +struct adf_hw_csr_ops; +struct adf_accel_dev; +struct adf_pfvf_ops; + +void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops); +void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); +int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev); +int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev); +int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev); +#endif/* ADF_GEN6_SHARED_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_timer.c index 35ccb91d6ec1..8962a49f145a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_timer.c @@ -12,9 +12,9 @@ #include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" -#include "adf_gen4_timer.h" +#include "adf_timer.h" -#define ADF_GEN4_TIMER_PERIOD_MS 200 +#define ADF_DEFAULT_TIMER_PERIOD_MS 200 /* This periodic update is used to trigger HB, RL & TL fw events */ static void work_handler(struct work_struct *work) @@ -27,16 +27,16 @@ static void work_handler(struct work_struct *work) accel_dev = timer_ctx->accel_dev; adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx, - msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS)); + msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS)); time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime), - ADF_GEN4_TIMER_PERIOD_MS); + ADF_DEFAULT_TIMER_PERIOD_MS); if (adf_send_admin_tim_sync(accel_dev, time_periods)) dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n"); } -int adf_gen4_timer_start(struct adf_accel_dev *accel_dev) +int adf_timer_start(struct adf_accel_dev *accel_dev) { struct adf_timer *timer_ctx; @@ -50,13 +50,13 @@ int adf_gen4_timer_start(struct adf_accel_dev *accel_dev) INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler); adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx, - msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS)); + msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS)); return 0; } -EXPORT_SYMBOL_GPL(adf_gen4_timer_start); +EXPORT_SYMBOL_GPL(adf_timer_start); -void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev) +void adf_timer_stop(struct adf_accel_dev *accel_dev) { struct adf_timer *timer_ctx = accel_dev->timer; @@ -68,4 +68,4 @@ void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev) kfree(timer_ctx); accel_dev->timer = NULL; } -EXPORT_SYMBOL_GPL(adf_gen4_timer_stop); +EXPORT_SYMBOL_GPL(adf_timer_stop); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_timer.h index 66a709e7b358..68e5136d6ba1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h +++ b/drivers/crypto/intel/qat/qat_common/adf_timer.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright(c) 2023 Intel Corporation */ -#ifndef ADF_GEN4_TIMER_H_ -#define ADF_GEN4_TIMER_H_ +#ifndef ADF_TIMER_H_ +#define ADF_TIMER_H_ #include <linux/ktime.h> #include <linux/workqueue.h> @@ -15,7 +15,7 @@ struct adf_timer { ktime_t initial_ktime; }; -int adf_gen4_timer_start(struct adf_accel_dev *accel_dev); -void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev); +int adf_timer_start(struct adf_accel_dev *accel_dev); +void adf_timer_stop(struct adf_accel_dev *accel_dev); -#endif /* ADF_GEN4_TIMER_H_ */ +#endif /* ADF_TIMER_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h index 04f645957e28..81969c515a17 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h @@ -44,6 +44,7 @@ enum icp_qat_fw_comp_20_cmd_id { #define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 #define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MAX_VALUE 0xFFFFFFFF #define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \ ret_uncomp, secure_ram) \ @@ -117,7 +118,7 @@ struct icp_qat_fw_comp_req_params { #define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \ cnvdfx, crc, xxhash_acc, \ cnv_error_type, append_crc, \ - drop_data) \ + drop_data, partial_decomp) \ ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \ ICP_QAT_FW_COMP_SOP_BITPOS) | \ (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \ @@ -139,7 +140,9 @@ struct icp_qat_fw_comp_req_params { (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \ << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \ (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \ - << ICP_QAT_FW_COMP_DROP_DATA_BITPOS)) + << ICP_QAT_FW_COMP_DROP_DATA_BITPOS) | \ + (((partial_decomp) & ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK) \ + << ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS)) #define ICP_QAT_FW_COMP_NOT_SOP 0 #define ICP_QAT_FW_COMP_SOP 1 @@ -161,6 +164,8 @@ struct icp_qat_fw_comp_req_params { #define ICP_QAT_FW_COMP_NO_APPEND_CRC 0 #define ICP_QAT_FW_COMP_DROP_DATA 1 #define ICP_QAT_FW_COMP_NO_DROP_DATA 0 +#define ICP_QAT_FW_COMP_PARTIAL_DECOMPRESS 1 +#define ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS 0 #define ICP_QAT_FW_COMP_SOP_BITPOS 0 #define ICP_QAT_FW_COMP_SOP_MASK 0x1 #define ICP_QAT_FW_COMP_EOP_BITPOS 1 @@ -189,6 +194,8 @@ struct icp_qat_fw_comp_req_params { #define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1 #define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25 #define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1 +#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS 27 +#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK 0x1 #define ICP_QAT_FW_COMP_SOP_GET(flags) \ QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \ @@ -281,8 +288,18 @@ struct icp_qat_fw_comp_req { union { struct icp_qat_fw_xlt_req_params xlt_pars; __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; + struct { + __u32 partial_decompress_length; + __u32 partial_decompress_offset; + } partial_decompress; } u1; - __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + union { + __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + struct { + __u32 asb_value; + __u32 reserved; + } asb_threshold; + } u3; struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl; union { struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl; diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h index 7eb5daef4f88..6887930c7995 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h @@ -35,6 +35,7 @@ struct icp_qat_fw_loader_chip_info { u32 wakeup_event_val; bool fw_auth; bool css_3k; + bool dual_sign; bool tgroup_share_ustore; u32 fcu_ctl_csr; u32 fcu_sts_csr; diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h new file mode 100644 index 000000000000..dce639152345 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ICP_QAT_HW_51_COMP_H_ +#define ICP_QAT_HW_51_COMP_H_ + +#include <linux/types.h> + +#include "icp_qat_fw.h" +#include "icp_qat_hw_51_comp_defs.h" + +struct icp_qat_hw_comp_51_config_csr_lower { + enum icp_qat_hw_comp_51_abd abd; + enum icp_qat_hw_comp_51_lllbd_ctrl lllbd; + enum icp_qat_hw_comp_51_search_depth sd; + enum icp_qat_hw_comp_51_min_match_control mmctrl; + enum icp_qat_hw_comp_51_lz4_block_checksum lbc; +}; + +static inline u32 +ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_51_config_csr_lower csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.abd, + ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK); + QAT_FIELD_SET(val32, csr.lllbd, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK); + QAT_FIELD_SET(val32, csr.sd, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK); + QAT_FIELD_SET(val32, csr.mmctrl, + ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK); + QAT_FIELD_SET(val32, csr.lbc, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK); + + return val32; +} + +struct icp_qat_hw_comp_51_config_csr_upper { + enum icp_qat_hw_comp_51_dmm_algorithm edmm; + enum icp_qat_hw_comp_51_bms bms; + enum icp_qat_hw_comp_51_scb_mode_reset_mask scb_mode_reset; +}; + +static inline u32 +ICP_QAT_FW_COMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_51_config_csr_upper csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.edmm, + ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK); + QAT_FIELD_SET(val32, csr.bms, + ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK); + QAT_FIELD_SET(val32, csr.scb_mode_reset, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS, + ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK); + + return val32; +} + +struct icp_qat_hw_decomp_51_config_csr_lower { + enum icp_qat_hw_decomp_51_lz4_block_checksum lbc; +}; + +static inline u32 +ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_51_config_csr_lower csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.lbc, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK); + + return val32; +} + +struct icp_qat_hw_decomp_51_config_csr_upper { + enum icp_qat_hw_decomp_51_bms bms; +}; + +static inline u32 +ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_51_config_csr_upper csr) +{ + u32 val32 = 0; + + QAT_FIELD_SET(val32, csr.bms, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS, + ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK); + + return val32; +} + +#endif /* ICP_QAT_HW_51_COMP_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h new file mode 100644 index 000000000000..e745688c5da4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2025 Intel Corporation */ +#ifndef ICP_QAT_HW_51_COMP_DEFS_H_ +#define ICP_QAT_HW_51_COMP_DEFS_H_ + +#include <linux/bits.h> + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_BITPOS 28 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_MASK GENMASK(1, 0) +enum icp_qat_hw_comp_51_som_control { + ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE = 0x0, + ICP_QAT_HW_COMP_51_SOM_CONTROL_DICTIONARY_MODE = 0x1, + ICP_QAT_HW_COMP_51_SOM_CONTROL_INPUT_CRC = 0x2, + ICP_QAT_HW_COMP_51_SOM_CONTROL_RESERVED_MODE = 0x3, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_rd_control { + ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_BITPOS 25 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_bypass_compression { + ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED = 0x0, + ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_ENABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS 22 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_dmm_algorithm { + ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_DMM_ALGORITHM_ZSTD_DMM_LITE = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_BITPOS 21 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_token_fusion_internal_only { + ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS 19 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0) +enum icp_qat_hw_comp_51_bms { + ICP_QAT_HW_COMP_51_BMS_BMS_64KB = 0x0, + ICP_QAT_HW_COMP_51_BMS_BMS_256KB = 0x1, + ICP_QAT_HW_COMP_51_BMS_BMS_1MB = 0x2, + ICP_QAT_HW_COMP_51_BMS_BMS_4MB = 0x3, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_BMS_BMS_64KB +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_scb_mode_reset_mask { + ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT = 0x0, + ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_RESET_HB_HT = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_zstd_frame_gen_dec_en { + ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0, + ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_BITPOS 1 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_cnv_disable { + ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_BITPOS 0 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_asb_disable { + ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_spec_decoder_internal_only { + ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_mini_xcam_internal_only { + ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_BITPOS 19 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_rep_off_enc_internal_only { + ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_BITPOS 18 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_prog_block_drop_internal_only { + ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE = 0x0, + ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_ENABLE = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_BITPOS 17 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_override_internal_only { + ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_OVERRIDE_HASH_PARAMS = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_BITPOS 14 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0) +enum icp_qat_hw_comp_51_hbs { + ICP_QAT_HW_COMP_51_HBS_32KB = 0x0, + ICP_QAT_HW_COMP_51_HBS_64KB = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_HBS_32KB +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS 13 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_abd { + ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_ABD_ABD_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS 12 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_lllbd_ctrl { + ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED = 0x0, + ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK GENMASK(3, 0) +enum icp_qat_hw_comp_51_search_depth { + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 = 0x1, + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_6 = 0x3, + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_9 = 0x4, + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_10 = 0x4, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_BITPOS 5 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0) +enum icp_qat_hw_comp_51_format { + ICP_QAT_HW_COMP_51_FORMAT_ILZ77 = 0x1, + ICP_QAT_HW_COMP_51_FORMAT_LZ4 = 0x2, + ICP_QAT_HW_COMP_51_FORMAT_LZ4s = 0x3, + ICP_QAT_HW_COMP_51_FORMAT_ZSTD = 0x4, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_FORMAT_ILZ77 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_min_match_control { + ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B = 0x0, + ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_4B = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_collision { + ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_skip_hash_update { + ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW = 0x0, + ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_BITPOS 1 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_byte_skip { + ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN = 0x0, + ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_LITERAL = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0 +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0) +enum icp_qat_hw_comp_51_lz4_block_checksum { + ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0, + ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1, +}; + +#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \ + ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_BITPOS 26 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_discard_data { + ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED = 0x0, + ICP_QAT_HW_DECOMP_51_DISCARD_DATA_ENABLED = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS 19 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0) +enum icp_qat_hw_decomp_51_bms { + ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB = 0x0, + ICP_QAT_HW_DECOMP_51_BMS_BMS_256KB = 0x1, + ICP_QAT_HW_DECOMP_51_BMS_BMS_1MB = 0x2, + ICP_QAT_HW_DECOMP_51_BMS_BMS_4MB = 0x3, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_zstd_frame_gen_dec_en { + ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0, + ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_spec_decoder_internal_only { + ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_mini_xcam_internal_only { + ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0, + ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_BITPOS 14 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0) +enum icp_qat_hw_decomp_51_hbs { + ICP_QAT_HW_DECOMP_51_HBS_32KB = 0x0, + ICP_QAT_HW_DECOMP_51_HBS_64KB = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_HBS_32KB +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_BITPOS 5 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0) +enum icp_qat_hw_decomp_51_format { + ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 = 0x1, + ICP_QAT_HW_DECOMP_51_FORMAT_LZ4 = 0x2, + ICP_QAT_HW_DECOMP_51_FORMAT_RESERVED = 0x3, + ICP_QAT_HW_DECOMP_51_FORMAT_ZSTD = 0x4, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0 +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0) +enum icp_qat_hw_decomp_51_lz4_block_checksum { + ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0, + ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1, +}; + +#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \ + ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT + +#endif /* ICP_QAT_HW_51_COMP_DEFS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index 1c7bcd8e4055..6313c35eff0c 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -7,6 +7,7 @@ #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 +#define ICP_QAT_AC_6XXX_DEV_TYPE 0x80000000 #define ICP_QAT_UCLO_MAX_AE 17 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) @@ -81,6 +82,21 @@ #define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000 #define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000 +/* All lengths below are in bytes */ +#define ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN 12 +#define ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN 16 +#define ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN 3540 +#define ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN 64 +#define ICP_QAT_DUALSIGN_XMSS_SIG_LEN 2692 +#define ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN 2696 +#define ICP_QAT_DUALSIGN_MISC_INFO_LEN 16 +#define ICP_QAT_DUALSIGN_FW_TYPE_LEN 7 +#define ICP_QAT_DUALSIGN_MODULE_TYPE 0x14 +#define ICP_QAT_DUALSIGN_HDR_LEN 0x375 +#define ICP_QAT_DUALSIGN_HDR_VER 0x40001 +#define ICP_QAT_DUALSIGN_HDR_LEN_OFFSET 4 +#define ICP_QAT_DUALSIGN_HDR_VER_OFFSET 8 + #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) #define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1) @@ -440,6 +456,13 @@ struct icp_qat_fw_auth_desc { unsigned int img_ae_init_data_low; unsigned int img_ae_insts_high; unsigned int img_ae_insts_low; + unsigned int cpp_mask; + unsigned int reserved; + unsigned int xmss_pubkey_high; + unsigned int xmss_pubkey_low; + unsigned int xmss_sig_high; + unsigned int xmss_sig_low; + unsigned int reserved2[2]; }; struct icp_qat_auth_chunk { diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index a6e02405d402..8b123472b71c 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -8,6 +8,7 @@ #include <linux/workqueue.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_dc.h" #include "qat_bl.h" #include "qat_comp_req.h" #include "qat_compression.h" @@ -145,9 +146,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) return -EINVAL; ctx->inst = inst; - ctx->inst->build_deflate_ctx(ctx->comp_ctx); - - return 0; + return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE); } static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) @@ -241,13 +240,13 @@ static struct acomp_alg qat_acomp[] = { { .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct qat_compression_ctx), + .cra_reqsize = sizeof(struct qat_compression_req), .cra_module = THIS_MODULE, }, .init = qat_comp_alg_init_tfm, .exit = qat_comp_alg_exit_tfm, .compress = qat_comp_alg_compress, .decompress = qat_comp_alg_decompress, - .reqsize = sizeof(struct qat_compression_req), }}; int qat_comp_algs_register(void) diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c index 7842a9f22178..c285b45b8679 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_compression.c +++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c @@ -144,7 +144,6 @@ static int qat_compression_create_instances(struct adf_accel_dev *accel_dev) inst->id = i; atomic_set(&inst->refctr, 0); inst->accel_dev = accel_dev; - inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx; snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h index aebac2302dcf..5ced3ed0e5ea 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_compression.h +++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h @@ -20,7 +20,6 @@ struct qat_compression_instance { atomic_t refctr; struct qat_instance_backlog backlog; struct adf_dc_data *dc_data; - void (*build_deflate_ctx)(void *ctx); }; static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev) diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index ef8a9cf74f0c..da4eca6e1633 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -694,16 +694,17 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, handle->pci_dev = pci_info->pci_dev; switch (handle->pci_dev->device) { - case ADF_4XXX_PCI_DEVICE_ID: - case ADF_401XX_PCI_DEVICE_ID: - case ADF_402XX_PCI_DEVICE_ID: - case ADF_420XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_4XXX: + case PCI_DEVICE_ID_INTEL_QAT_401XX: + case PCI_DEVICE_ID_INTEL_QAT_402XX: + case PCI_DEVICE_ID_INTEL_QAT_420XX: + case PCI_DEVICE_ID_INTEL_QAT_6XXX: handle->chip_info->mmp_sram_size = 0; handle->chip_info->nn = false; handle->chip_info->lm2lm3 = true; handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X; handle->chip_info->icp_rst_csr = ICP_RESET_CPP0; - if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID) + if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX) handle->chip_info->icp_rst_mask = 0x100155; else handle->chip_info->icp_rst_mask = 0x100015; @@ -712,6 +713,8 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, handle->chip_info->wakeup_event_val = 0x80000000; handle->chip_info->fw_auth = true; handle->chip_info->css_3k = true; + if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) + handle->chip_info->dual_sign = true; handle->chip_info->tgroup_share_ustore = true; handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX; handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 7678a93c6853..21d652a1c8ef 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -1,11 +1,16 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ + +#define pr_fmt(fmt) "QAT: " fmt + #include <linux/align.h> +#include <linux/bitops.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/pci_ids.h> +#include <linux/wordpart.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" @@ -59,7 +64,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) unsigned int i; if (!ae_data) { - pr_err("QAT: bad argument, ae_data is NULL\n"); + pr_err("bad argument, ae_data is NULL\n"); return -EINVAL; } @@ -86,12 +91,11 @@ static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) int min = hdr->min_ver & 0xff; if (hdr->file_id != ICP_QAT_UOF_FID) { - pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); + pr_err("Invalid header 0x%x\n", hdr->file_id); return -EINVAL; } if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { - pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", - maj, min); + pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min); return -EINVAL; } return 0; @@ -103,20 +107,19 @@ static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr) int min = suof_hdr->min_ver & 0xff; if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { - pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); + pr_err("invalid header 0x%x\n", suof_hdr->file_id); return -EINVAL; } if (suof_hdr->fw_type != 0) { - pr_err("QAT: unsupported firmware type\n"); + pr_err("unsupported firmware type\n"); return -EINVAL; } if (suof_hdr->num_chunks <= 0x1) { - pr_err("QAT: SUOF chunk amount is incorrect\n"); + pr_err("SUOF chunk amount is incorrect\n"); return -EINVAL; } if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { - pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", - maj, min); + pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min); return -EINVAL; } return 0; @@ -223,24 +226,24 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, char *str; if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { - pr_err("QAT: initmem is out of range"); + pr_err("initmem is out of range"); return -EINVAL; } if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { - pr_err("QAT: Memory scope for init_mem error\n"); + pr_err("Memory scope for init_mem error\n"); return -EINVAL; } str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); if (!str) { - pr_err("QAT: AE name assigned in UOF init table is NULL\n"); + pr_err("AE name assigned in UOF init table is NULL\n"); return -EINVAL; } if (qat_uclo_parse_num(str, ae)) { - pr_err("QAT: Parse num for AE number failed\n"); + pr_err("Parse num for AE number failed\n"); return -EINVAL; } if (*ae >= ICP_QAT_UCLO_MAX_AE) { - pr_err("QAT: ae %d out of range\n", *ae); + pr_err("ae %d out of range\n", *ae); return -EINVAL; } return 0; @@ -356,8 +359,7 @@ static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, return -EINVAL; break; default: - pr_err("QAT: initmem region error. region type=0x%x\n", - init_mem->region); + pr_err("initmem region error. region type=0x%x\n", init_mem->region); return -EINVAL; } return 0; @@ -431,7 +433,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { if (qat_hal_batch_wr_lm(handle, ae, obj_handle->lm_init_tab[ae])) { - pr_err("QAT: fail to batch init lmem for AE %d\n", ae); + pr_err("fail to batch init lmem for AE %d\n", ae); return -EINVAL; } qat_uclo_cleanup_batch_init_list(handle, @@ -539,26 +541,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, code_page->imp_expr_tab_offset); if (uc_var_tab->entry_num || imp_var_tab->entry_num || imp_expr_tab->entry_num) { - pr_err("QAT: UOF can't contain imported variable to be parsed\n"); + pr_err("UOF can't contain imported variable to be parsed\n"); return -EINVAL; } neigh_reg_tab = (struct icp_qat_uof_objtable *) (encap_uof_obj->beg_uof + code_page->neigh_reg_tab_offset); if (neigh_reg_tab->entry_num) { - pr_err("QAT: UOF can't contain neighbor register table\n"); + pr_err("UOF can't contain neighbor register table\n"); return -EINVAL; } if (image->numpages > 1) { - pr_err("QAT: UOF can't contain multiple pages\n"); + pr_err("UOF can't contain multiple pages\n"); return -EINVAL; } if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { - pr_err("QAT: UOF can't use shared control store feature\n"); + pr_err("UOF can't use shared control store feature\n"); return -EFAULT; } if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { - pr_err("QAT: UOF can't use reloadable feature\n"); + pr_err("UOF can't use reloadable feature\n"); return -EFAULT; } return 0; @@ -677,7 +679,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) } } if (!mflag) { - pr_err("QAT: uimage uses AE not set\n"); + pr_err("uimage uses AE not set\n"); return -EINVAL; } return 0; @@ -731,14 +733,15 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) return ICP_QAT_AC_C62X_DEV_TYPE; case PCI_DEVICE_ID_INTEL_QAT_C3XXX: return ICP_QAT_AC_C3XXX_DEV_TYPE; - case ADF_4XXX_PCI_DEVICE_ID: - case ADF_401XX_PCI_DEVICE_ID: - case ADF_402XX_PCI_DEVICE_ID: - case ADF_420XX_PCI_DEVICE_ID: + case PCI_DEVICE_ID_INTEL_QAT_4XXX: + case PCI_DEVICE_ID_INTEL_QAT_401XX: + case PCI_DEVICE_ID_INTEL_QAT_402XX: + case PCI_DEVICE_ID_INTEL_QAT_420XX: return ICP_QAT_AC_4XXX_A_DEV_TYPE; + case PCI_DEVICE_ID_INTEL_QAT_6XXX: + return ICP_QAT_AC_6XXX_DEV_TYPE; default: - pr_err("QAT: unsupported device 0x%x\n", - handle->pci_dev->device); + pr_err("unsupported device 0x%x\n", handle->pci_dev->device); return 0; } } @@ -748,7 +751,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) unsigned int maj_ver, prod_type = obj_handle->prod_type; if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { - pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", + pr_err("UOF type 0x%x doesn't match with platform 0x%x\n", obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, prod_type); return -EINVAL; @@ -756,7 +759,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) maj_ver = obj_handle->prod_rev & 0xff; if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver || obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) { - pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver); + pr_err("UOF majVer 0x%x out of range\n", maj_ver); return -EINVAL; } return 0; @@ -799,7 +802,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_NEIGH_REL: return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); default: - pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type); + pr_err("UOF uses not supported reg type 0x%x\n", reg_type); return -EFAULT; } return 0; @@ -835,8 +838,7 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, case ICP_QAT_UOF_INIT_REG_CTX: /* check if ctx is appropriate for the ctxMode */ if (!((1 << init_regsym->ctx) & ctx_mask)) { - pr_err("QAT: invalid ctx num = 0x%x\n", - init_regsym->ctx); + pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx); return -EINVAL; } qat_uclo_init_reg(handle, ae, @@ -848,10 +850,10 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, exp_res); break; case ICP_QAT_UOF_INIT_EXPR: - pr_err("QAT: INIT_EXPR feature not supported\n"); + pr_err("INIT_EXPR feature not supported\n"); return -EINVAL; case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: - pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n"); + pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n"); return -EINVAL; default: break; @@ -871,7 +873,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) return 0; if (obj_handle->init_mem_tab.entry_num) { if (qat_uclo_init_memory(handle)) { - pr_err("QAT: initialize memory failed\n"); + pr_err("initialize memory failed\n"); return -EINVAL; } } @@ -900,40 +902,40 @@ static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle, mode = ICP_QAT_CTX_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_ctx_mode(handle, ae, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); + pr_err("qat_hal_set_ae_ctx_mode error\n"); return ret; } if (handle->chip_info->nn) { mode = ICP_QAT_NN_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_nn_mode(handle, ae, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); + pr_err("qat_hal_set_ae_nn_mode error\n"); return ret; } } mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n"); return ret; } mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n"); return ret; } if (handle->chip_info->lm2lm3) { mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n"); return ret; } mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode); ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode); if (ret) { - pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n"); + pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n"); return ret; } mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode); @@ -997,7 +999,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { - pr_err("QAT: UOF incompatible\n"); + pr_err("UOF incompatible\n"); return -EINVAL; } obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64), @@ -1008,7 +1010,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) if (!obj_handle->obj_hdr->file_buff || !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, &obj_handle->str_table)) { - pr_err("QAT: UOF doesn't have effective images\n"); + pr_err("UOF doesn't have effective images\n"); goto out_err; } obj_handle->uimage_num = @@ -1017,7 +1019,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) if (!obj_handle->uimage_num) goto out_err; if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { - pr_err("QAT: Bad object\n"); + pr_err("Bad object\n"); goto out_check_uof_aemask_err; } qat_uclo_init_uword_num(handle); @@ -1034,6 +1036,36 @@ out_err: return -EFAULT; } +static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle) +{ + if (handle->chip_info->dual_sign) + return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN; + + return ICP_QAT_AE_IMG_OFFSET(handle); +} + +static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle) +{ + if (handle->chip_info->dual_sign) + return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN; + + return ICP_QAT_AE_IMG_OFFSET(handle); +} + +static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr) +{ + struct icp_qat_css_hdr *hdr = img_ptr; + char *fw_hdr = img_ptr; + unsigned int offset; + + if (handle->chip_info->dual_sign) { + offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN; + return *(fw_hdr + offset); + } + + return hdr->fw_type; +} + static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_filehdr *suof_ptr, int suof_size) @@ -1050,7 +1082,7 @@ static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver, min_ver_offset); if (check_sum != suof_ptr->check_sum) { - pr_err("QAT: incorrect SUOF checksum\n"); + pr_err("incorrect SUOF checksum\n"); return -EINVAL; } suof_handle->check_sum = suof_ptr->check_sum; @@ -1065,9 +1097,9 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; - unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle); - struct icp_qat_simg_ae_mode *ae_mode; + unsigned int offset = qat_uclo_simg_hdr2cont_len(handle); struct icp_qat_suof_objhdr *suof_objhdr; + struct icp_qat_simg_ae_mode *ae_mode; suof_img_hdr->simg_buf = (suof_handle->suof_buf + suof_chunk_hdr->offset + @@ -1112,14 +1144,13 @@ static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (img_ae_mode->dev_type != prod_type) { - pr_err("QAT: incompatible product type %x\n", - img_ae_mode->dev_type); + pr_err("incompatible product type %x\n", img_ae_mode->dev_type); return -EINVAL; } maj_ver = prod_rev & 0xff; if (maj_ver > img_ae_mode->devmax_ver || maj_ver < img_ae_mode->devmin_ver) { - pr_err("QAT: incompatible device majver 0x%x\n", maj_ver); + pr_err("incompatible device majver 0x%x\n", maj_ver); return -EINVAL; } return 0; @@ -1162,7 +1193,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_img_hdr img_header; if (!suof_ptr || suof_size == 0) { - pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); + pr_err("input parameter SUOF pointer/size is NULL\n"); return -EINVAL; } if (qat_uclo_check_suof_format(suof_ptr)) @@ -1205,7 +1236,6 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, } #define ADD_ADDR(high, low) ((((u64)high) << 32) + low) -#define BITS_IN_DWORD 32 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) @@ -1223,7 +1253,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi; fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo; - SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD)); + SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32)); SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr); SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); @@ -1237,7 +1267,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, return 0; } while (retry++ < FW_AUTH_MAX_RETRY); auth_fail: - pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", + pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n", fcu_sts & FCU_AUTH_STS_MASK, retry); return -EINVAL; } @@ -1273,14 +1303,13 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, fcu_sts_csr = handle->chip_info->fcu_sts_csr; fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr; } else { - pr_err("Chip 0x%x doesn't support broadcast load\n", - handle->pci_dev->device); + pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device); return -EINVAL; } for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { if (qat_hal_check_ae_active(handle, (unsigned char)ae)) { - pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n"); + pr_err("Broadcast load failed. AE is not enabled or active.\n"); return -EINVAL; } @@ -1312,7 +1341,7 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, } while (retry++ < FW_AUTH_MAX_RETRY); if (retry > FW_AUTH_MAX_RETRY) { - pr_err("QAT: broadcast load failed timeout %d\n", retry); + pr_err("broadcast load failed timeout %d\n", retry); return -EINVAL; } } @@ -1366,24 +1395,38 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, } static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, - char *image, unsigned int size, + void *image, unsigned int size, unsigned int fw_type) { char *fw_type_name = fw_type ? "MMP" : "AE"; unsigned int css_dword_size = sizeof(u32); + unsigned int header_len, simg_type; + struct icp_qat_css_hdr *css_hdr; if (handle->chip_info->fw_auth) { - struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; - unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle); + header_len = qat_uclo_simg_hdr2sign_len(handle); + simg_type = qat_uclo_simg_fw_type(handle, image); + css_hdr = image; + + if (handle->chip_info->dual_sign) { + if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE) + goto err; + if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN) + goto err; + if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER) + goto err; + } else { + if (css_hdr->header_len * css_dword_size != header_len) + goto err; + if (css_hdr->size * css_dword_size != size) + goto err; + if (size <= header_len) + goto err; + } - if ((css_hdr->header_len * css_dword_size) != header_len) - goto err; - if ((css_hdr->size * css_dword_size) != size) - goto err; - if (fw_type != css_hdr->fw_type) - goto err; - if (size <= header_len) + if (fw_type != simg_type) goto err; + size -= header_len; } @@ -1397,123 +1440,95 @@ static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN) goto err; } else { - pr_err("QAT: Unsupported firmware type\n"); + pr_err("Unsupported firmware type\n"); return -EINVAL; } return 0; err: - pr_err("QAT: Invalid %s firmware image\n", fw_type_name); + pr_err("Invalid %s firmware image\n", fw_type_name); return -EINVAL; } -static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, - char *image, unsigned int size, - struct icp_qat_fw_auth_desc **desc) +static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_firml_dram_desc *dram_desc, + unsigned int fw_type, struct icp_qat_fw_auth_desc **desc) { struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; - struct icp_qat_fw_auth_desc *auth_desc; - struct icp_qat_auth_chunk *auth_chunk; - u64 virt_addr, bus_addr, virt_base; - unsigned int simg_offset = sizeof(*auth_chunk); struct icp_qat_simg_ae_mode *simg_ae_mode; - struct icp_firml_dram_desc img_desc; - int ret; - - ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); - if (ret) { - pr_err("QAT: error, allocate continuous dram fail\n"); - return ret; - } - - if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) { - pr_debug("QAT: invalid address\n"); - qat_uclo_simg_free(handle, &img_desc); - return -EINVAL; - } + struct icp_qat_fw_auth_desc *auth_desc; + char *virt_addr, *virt_base; + u64 bus_addr; - auth_chunk = img_desc.dram_base_addr_v; - auth_chunk->chunk_size = img_desc.dram_size; - auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; - virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset; - bus_addr = img_desc.dram_bus_addr + simg_offset; - auth_desc = img_desc.dram_base_addr_v; - auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->css_hdr_low = (unsigned int)bus_addr; + virt_base = dram_desc->dram_base_addr_v; + virt_base += sizeof(struct icp_qat_auth_chunk); + bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk); + auth_desc = dram_desc->dram_base_addr_v; + auth_desc->css_hdr_high = upper_32_bits(bus_addr); + auth_desc->css_hdr_low = lower_32_bits(bus_addr); virt_addr = virt_base; - memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); + memcpy(virt_addr, image, sizeof(*css_hdr)); /* pub key */ bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + sizeof(*css_hdr); virt_addr = virt_addr + sizeof(*css_hdr); - auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->fwsk_pub_low = (unsigned int)bus_addr; + auth_desc->fwsk_pub_high = upper_32_bits(bus_addr); + auth_desc->fwsk_pub_low = lower_32_bits(bus_addr); - memcpy((void *)(uintptr_t)virt_addr, - (void *)(image + sizeof(*css_hdr)), - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); + memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); /* padding */ memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)), 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle)); /* exponent */ - memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + - ICP_QAT_CSS_FWSK_PAD_LEN(handle)), - (void *)(image + sizeof(*css_hdr) + - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)), - sizeof(unsigned int)); + memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + + ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) + + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int)); /* signature */ bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) + ICP_QAT_CSS_FWSK_PUB_LEN(handle); virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle); - auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->signature_low = (unsigned int)bus_addr; + auth_desc->signature_high = upper_32_bits(bus_addr); + auth_desc->signature_low = lower_32_bits(bus_addr); - memcpy((void *)(uintptr_t)virt_addr, - (void *)(image + sizeof(*css_hdr) + - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + - ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)), - ICP_QAT_CSS_SIGNATURE_LEN(handle)); + memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + + ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle)); bus_addr = ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) + ICP_QAT_CSS_SIGNATURE_LEN(handle); virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle); - auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); - auth_desc->img_low = (unsigned int)bus_addr; - auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle); - if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr + - ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { - pr_err("QAT: insufficient memory size for authentication data\n"); - qat_uclo_simg_free(handle, &img_desc); + auth_desc->img_high = upper_32_bits(bus_addr); + auth_desc->img_low = lower_32_bits(bus_addr); + auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle); + if (bus_addr + auth_desc->img_len > + dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { + pr_err("insufficient memory size for authentication data\n"); + qat_uclo_simg_free(handle, dram_desc); return -ENOMEM; } - memcpy((void *)(uintptr_t)virt_addr, - (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)), - auth_desc->img_len); + memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len); virt_addr = virt_base; /* AE firmware */ - if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == - CSS_AE_FIRMWARE) { + if (fw_type == CSS_AE_FIRMWARE) { auth_desc->img_ae_mode_data_high = auth_desc->img_high; auth_desc->img_ae_mode_data_low = auth_desc->img_low; bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, auth_desc->img_ae_mode_data_low) + sizeof(struct icp_qat_simg_ae_mode); - auth_desc->img_ae_init_data_high = (unsigned int) - (bus_addr >> BITS_IN_DWORD); - auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; + auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr); + auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr); bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; - auth_desc->img_ae_insts_high = (unsigned int) - (bus_addr >> BITS_IN_DWORD); - auth_desc->img_ae_insts_low = (unsigned int)bus_addr; + auth_desc->img_ae_insts_high = upper_32_bits(bus_addr); + auth_desc->img_ae_insts_low = lower_32_bits(bus_addr); virt_addr += sizeof(struct icp_qat_css_hdr); virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle); virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle); @@ -1527,6 +1542,141 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, return 0; } +static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_firml_dram_desc *dram_desc, + unsigned int fw_type, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_qat_simg_ae_mode *simg_ae_mode; + struct icp_qat_fw_auth_desc *auth_desc; + unsigned int chunk_offset, img_offset; + u64 bus_addr, addr; + char *virt_addr; + + virt_addr = dram_desc->dram_base_addr_v; + virt_addr += sizeof(struct icp_qat_auth_chunk); + bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk); + + auth_desc = dram_desc->dram_base_addr_v; + auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle); + auth_desc->css_hdr_high = upper_32_bits(bus_addr); + auth_desc->css_hdr_low = lower_32_bits(bus_addr); + memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN); + + img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN; + chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN; + + /* RSA pub key */ + addr = bus_addr + chunk_offset; + auth_desc->fwsk_pub_high = upper_32_bits(addr); + auth_desc->fwsk_pub_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); + + img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle); + chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle); + /* RSA padding */ + memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle)); + + chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle); + /* RSA exponent */ + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)); + + img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); + chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); + /* RSA signature */ + addr = bus_addr + chunk_offset; + auth_desc->signature_high = upper_32_bits(addr); + auth_desc->signature_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle)); + + img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle); + chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle); + /* XMSS pubkey */ + addr = bus_addr + chunk_offset; + auth_desc->xmss_pubkey_high = upper_32_bits(addr); + auth_desc->xmss_pubkey_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN); + + img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN; + chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN; + /* XMSS signature */ + addr = bus_addr + chunk_offset; + auth_desc->xmss_sig_high = upper_32_bits(addr); + auth_desc->xmss_sig_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN); + + img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN; + chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN; + + if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) { + pr_err("auth chunk memory size is not enough to store data\n"); + return -ENOMEM; + } + + /* Signed data */ + addr = bus_addr + chunk_offset; + auth_desc->img_high = upper_32_bits(addr); + auth_desc->img_low = lower_32_bits(addr); + memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len); + + chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN; + /* AE firmware */ + if (fw_type == CSS_AE_FIRMWARE) { + /* AE mode data */ + addr = bus_addr + chunk_offset; + auth_desc->img_ae_mode_data_high = upper_32_bits(addr); + auth_desc->img_ae_mode_data_low = lower_32_bits(addr); + simg_ae_mode = + (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset); + auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask; + + chunk_offset += sizeof(struct icp_qat_simg_ae_mode); + /* AE init seq */ + addr = bus_addr + chunk_offset; + auth_desc->img_ae_init_data_high = upper_32_bits(addr); + auth_desc->img_ae_init_data_low = lower_32_bits(addr); + + chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; + /* AE instructions */ + addr = bus_addr + chunk_offset; + auth_desc->img_ae_insts_high = upper_32_bits(addr); + auth_desc->img_ae_insts_low = lower_32_bits(addr); + } else { + addr = bus_addr + chunk_offset; + auth_desc->img_ae_insts_high = upper_32_bits(addr); + auth_desc->img_ae_insts_low = lower_32_bits(addr); + } + *desc = auth_desc; + return 0; +} + +static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_qat_auth_chunk *auth_chunk; + struct icp_firml_dram_desc img_desc; + unsigned int simg_fw_type; + int ret; + + ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); + if (ret) + return ret; + + simg_fw_type = qat_uclo_simg_fw_type(handle, image); + auth_chunk = img_desc.dram_base_addr_v; + auth_chunk->chunk_size = img_desc.dram_size; + auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; + + if (handle->chip_info->dual_sign) + return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc, + simg_fw_type, desc); + + return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc, + simg_fw_type, desc); +} + static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *desc) { @@ -1546,7 +1696,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, if (!((desc->ae_mask >> i) & 0x1)) continue; if (qat_hal_check_ae_active(handle, i)) { - pr_err("QAT: AE %d is active\n", i); + pr_err("AE %d is active\n", i); return -EINVAL; } SET_CAP_CSR(handle, fcu_ctl_csr, @@ -1566,7 +1716,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, } } while (retry++ < FW_AUTH_MAX_RETRY); if (retry > FW_AUTH_MAX_RETRY) { - pr_err("QAT: firmware load failed timeout %x\n", retry); + pr_err("firmware load failed timeout %x\n", retry); return -EINVAL; } } @@ -1584,7 +1734,7 @@ static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, handle->sobj_handle = suof_handle; if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { qat_uclo_del_suof(handle); - pr_err("QAT: map SUOF failed\n"); + pr_err("map SUOF failed\n"); return -EINVAL; } return 0; @@ -1608,7 +1758,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, qat_uclo_ummap_auth_fw(handle, &desc); } else { if (handle->chip_info->mmp_sram_size < mem_size) { - pr_err("QAT: MMP size is too large: 0x%x\n", mem_size); + pr_err("MMP size is too large: 0x%x\n", mem_size); return -EFBIG; } qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size); @@ -1634,7 +1784,7 @@ static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, ICP_QAT_UOF_OBJS); if (!objhdl->obj_hdr) { - pr_err("QAT: object file chunk is null\n"); + pr_err("object file chunk is null\n"); goto out_objhdr_err; } handle->obj_handle = objhdl; @@ -1669,7 +1819,7 @@ static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle, checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver, min_ver_offset); if (checksum != mof_ptr->checksum) { - pr_err("QAT: incorrect MOF checksum\n"); + pr_err("incorrect MOF checksum\n"); return -EINVAL; } @@ -1705,7 +1855,7 @@ static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, } } - pr_err("QAT: object %s is not found inside MOF\n", obj_name); + pr_err("object %s is not found inside MOF\n", obj_name); return -EINVAL; } @@ -1722,7 +1872,7 @@ static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle, ICP_QAT_MOF_OBJ_CHUNKID_LEN)) { obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset; } else { - pr_err("QAT: unsupported chunk id\n"); + pr_err("unsupported chunk id\n"); return -EINVAL; } mobj_hdr->obj_buf = obj; @@ -1783,7 +1933,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) } if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) { - pr_err("QAT: inconsistent UOF/SUOF chunk amount\n"); + pr_err("inconsistent UOF/SUOF chunk amount\n"); return -EINVAL; } return 0; @@ -1824,17 +1974,16 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr) int min = mof_hdr->min_ver & 0xff; if (mof_hdr->file_id != ICP_QAT_MOF_FID) { - pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id); + pr_err("invalid header 0x%x\n", mof_hdr->file_id); return -EINVAL; } if (mof_hdr->num_chunks <= 0x1) { - pr_err("QAT: MOF chunk amount is incorrect\n"); + pr_err("MOF chunk amount is incorrect\n"); return -EINVAL; } if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) { - pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n", - maj, min); + pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min); return -EINVAL; } return 0; diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile index 5bf5c890c362..1427fe76f171 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index e48bcf1818cd..5b4bd0ba1ccb 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -4,7 +4,6 @@ #include <adf_admin.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -24,7 +23,6 @@ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = { static struct adf_hw_device_class dh895xcc_class = { .name = ADF_DH895XCC_DEVICE_NAME, .type = DEV_DH895XCC, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 07e9d7e52861..b59e0cc49e52 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -19,24 +19,6 @@ #include <adf_dbgfs.h> #include "adf_dh895xcc_hw_data.h" -static const struct pci_device_id adf_pci_tbl[] = { - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), }, - { } -}; -MODULE_DEVICE_TABLE(pci, adf_pci_tbl); - -static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); -static void adf_remove(struct pci_dev *dev); - -static struct pci_driver adf_driver = { - .id_table = adf_pci_tbl, - .name = ADF_DH895XCC_DEVICE_NAME, - .probe = adf_probe, - .remove = adf_remove, - .sriov_configure = adf_sriov_configure, - .err_handler = &adf_err_handler, -}; - static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) { pci_release_regions(accel_dev->accel_pci_dev.pci_dev); @@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev) kfree(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC) }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_DH895XCC_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .shutdown = adf_shutdown, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + static int __init adfdrv_init(void) { request_module("intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile index 93f9c81edf09..c2fdb6e0f68f 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index f4ee4c2e00da..828456c43b76 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -3,7 +3,6 @@ #include <adf_accel_devices.h> #include <adf_common_drv.h> #include <adf_gen2_config.h> -#include <adf_gen2_dc.h> #include <adf_gen2_hw_csr_data.h> #include <adf_gen2_hw_data.h> #include <adf_gen2_pfvf.h> @@ -13,7 +12,6 @@ static struct adf_hw_device_class dh895xcciov_class = { .name = ADF_DH895XCCVF_DEVICE_NAME, .type = DEV_DH895XCCVF, - .instances = 0 }; static u32 get_accel_mask(struct adf_hw_device_data *self) diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index fa08f10e6f3f..9c21f5d835d2 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) { - if (engine->chain.first && engine->chain.last) + if (engine->chain_hw.first && engine->chain_hw.last) return mv_cesa_tdma_process(engine, status); return mv_cesa_std_process(engine, status); diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index d215a6bed6bc..50ca1039fdaa 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -440,8 +440,10 @@ struct mv_cesa_dev { * SRAM * @queue: fifo of the pending crypto requests * @load: engine load counter, useful for load balancing - * @chain: list of the current tdma descriptors being processed - * by this engine. + * @chain_hw: list of the current tdma descriptors being processed + * by the hardware. + * @chain_sw: list of the current tdma descriptors that will be + * submitted to the hardware. * @complete_queue: fifo of the processed requests by the engine * * Structure storing CESA engine information. @@ -463,7 +465,8 @@ struct mv_cesa_engine { struct gen_pool *pool; struct crypto_queue queue; atomic_t load; - struct mv_cesa_tdma_chain chain; + struct mv_cesa_tdma_chain chain_hw; + struct mv_cesa_tdma_chain chain_sw; struct list_head complete_queue; int irq; }; diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index cf62db50f958..48c5c8ea8c43 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); struct mv_cesa_engine *engine; + if (!req->cryptlen) + return 0; + ret = mv_cesa_skcipher_req_init(req, tmpl); if (ret) return ret; diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index f150861ceaf6..6815eddc9068 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (ret) goto err_free_tdma; - if (iter.src.sg) { + if (iter.base.len > iter.src.op_offset) { /* * Add all the new data, inserting an operation block and * launch command between each full SRAM block-worth of diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c index 388a06e180d6..243305354420 100644 --- a/drivers/crypto/marvell/cesa/tdma.c +++ b/drivers/crypto/marvell/cesa/tdma.c @@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq) { struct mv_cesa_engine *engine = dreq->engine; + spin_lock_bh(&engine->lock); + if (engine->chain_sw.first == dreq->chain.first) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; + } + engine->chain_hw.first = dreq->chain.first; + engine->chain_hw.last = dreq->chain.last; + spin_unlock_bh(&engine->lock); + writel_relaxed(0, engine->regs + CESA_SA_CFG); mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); @@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, struct mv_cesa_req *dreq) { - if (engine->chain.first == NULL && engine->chain.last == NULL) { - engine->chain.first = dreq->chain.first; - engine->chain.last = dreq->chain.last; - } else { - struct mv_cesa_tdma_desc *last; + struct mv_cesa_tdma_desc *last = engine->chain_sw.last; - last = engine->chain.last; + /* + * Break the DMA chain if the request being queued needs the IV + * regs to be set before lauching the request. + */ + if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE) + engine->chain_sw.first = dreq->chain.first; + else { last->next = dreq->chain.first; - engine->chain.last = dreq->chain.last; - - /* - * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on - * the last element of the current chain, or if the request - * being queued needs the IV regs to be set before lauching - * the request. - */ - if (!(last->flags & CESA_TDMA_BREAK_CHAIN) && - !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) - last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + } + last = dreq->chain.last; + engine->chain_sw.last = last; + /* + * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on + * the last element of the current chain. + */ + if (last->flags & CESA_TDMA_BREAK_CHAIN) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; } } @@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) tdma_cur = readl(engine->regs + CESA_TDMA_CUR); - for (tdma = engine->chain.first; tdma; tdma = next) { + for (tdma = engine->chain_hw.first; tdma; tdma = next) { spin_lock_bh(&engine->lock); next = tdma->next; spin_unlock_bh(&engine->lock); @@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) &backlog); /* Re-chaining to the next request */ - engine->chain.first = tdma->next; + engine->chain_hw.first = tdma->next; tdma->next = NULL; /* If this is the last request, clear the chain */ - if (engine->chain.first == NULL) - engine->chain.last = NULL; + if (engine->chain_hw.first == NULL) + engine->chain_hw.last = NULL; spin_unlock_bh(&engine->lock); ctx = crypto_tfm_ctx(req->tfm); diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c index 5cae8fafa151..d4aab9e20f2a 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c @@ -6,6 +6,7 @@ #include "otx2_cptvf.h" #include "otx2_cptlf.h" #include "cn10k_cpt.h" +#include "otx2_cpt_common.h" static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, struct otx2_cptlf_info *lf); @@ -27,7 +28,7 @@ static struct cpt_hw_ops cn10k_hw_ops = { static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, struct otx2_cptlf_info *lf) { - void __iomem *lmtline = lf->lmtline; + void *lmtline = lf->lfs->lmt_info.base + (lf->slot * LMTLINE_SIZE); u64 val = (lf->slot & 0x7FF); u64 tar_addr = 0; @@ -41,15 +42,49 @@ static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, dma_wmb(); /* Copy CPT command to LMTLINE */ - memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE); + memcpy(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE); cn10k_lmt_flush(val, tar_addr); } +void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs) +{ + struct otx2_lmt_info *lmt_info = &lfs->lmt_info; + + if (!lmt_info->base) + return; + + dma_free_attrs(&pdev->dev, lmt_info->size, + lmt_info->base - lmt_info->align, + lmt_info->iova - lmt_info->align, + DMA_ATTR_FORCE_CONTIGUOUS); +} +EXPORT_SYMBOL_NS_GPL(cn10k_cpt_lmtst_free, "CRYPTO_DEV_OCTEONTX2_CPT"); + +static int cn10k_cpt_lmtst_alloc(struct pci_dev *pdev, + struct otx2_cptlfs_info *lfs, u32 size) +{ + struct otx2_lmt_info *lmt_info = &lfs->lmt_info; + dma_addr_t align_iova; + dma_addr_t iova; + + lmt_info->base = dma_alloc_attrs(&pdev->dev, size, &iova, GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); + if (!lmt_info->base) + return -ENOMEM; + + align_iova = ALIGN((u64)iova, LMTLINE_ALIGN); + lmt_info->iova = align_iova; + lmt_info->align = align_iova - iova; + lmt_info->size = size; + lmt_info->base += lmt_info->align; + return 0; +} + int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf) { struct pci_dev *pdev = cptpf->pdev; - resource_size_t size; - u64 lmt_base; + u32 size; + int ret; if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) { cptpf->lfs.ops = &otx2_hw_ops; @@ -57,18 +92,19 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf) } cptpf->lfs.ops = &cn10k_hw_ops; - lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR); - if (!lmt_base) { - dev_err(&pdev->dev, "PF LMTLINE address not configured\n"); - return -ENOMEM; + size = OTX2_CPT_MAX_VFS_NUM * LMTLINE_SIZE + LMTLINE_ALIGN; + ret = cn10k_cpt_lmtst_alloc(pdev, &cptpf->lfs, size); + if (ret) { + dev_err(&pdev->dev, "PF-%d LMTLINE memory allocation failed\n", + cptpf->pf_id); + return ret; } - size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); - size -= ((1 + cptpf->max_vfs) * MBOX_SIZE); - cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size); - if (!cptpf->lfs.lmt_base) { - dev_err(&pdev->dev, - "Mapping of PF LMTLINE address failed\n"); - return -ENOMEM; + + ret = otx2_cpt_lmtst_tbl_setup_msg(&cptpf->lfs); + if (ret) { + dev_err(&pdev->dev, "PF-%d: LMTST Table setup failed\n", + cptpf->pf_id); + cn10k_cpt_lmtst_free(pdev, &cptpf->lfs); } return 0; @@ -78,18 +114,25 @@ EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT"); int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf) { struct pci_dev *pdev = cptvf->pdev; - resource_size_t offset, size; + u32 size; + int ret; if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) return 0; - offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM); - size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); - /* Map VF LMILINE region */ - cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size); - if (!cptvf->lfs.lmt_base) { - dev_err(&pdev->dev, "Unable to map BAR4\n"); - return -ENOMEM; + size = cptvf->lfs.lfs_num * LMTLINE_SIZE + LMTLINE_ALIGN; + ret = cn10k_cpt_lmtst_alloc(pdev, &cptvf->lfs, size); + if (ret) { + dev_err(&pdev->dev, "VF-%d LMTLINE memory allocation failed\n", + cptvf->vf_id); + return ret; + } + + ret = otx2_cpt_lmtst_tbl_setup_msg(&cptvf->lfs); + if (ret) { + dev_err(&pdev->dev, "VF-%d: LMTST Table setup failed\n", + cptvf->vf_id); + cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); } return 0; diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h index 92be3ecf570f..ea5990048c21 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h @@ -50,6 +50,7 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result) int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf); int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf); +void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs); void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval); int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev, struct cn10k_cpt_errata_ctx *er_ctx); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index c5b7c57574ef..d529bcb03775 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -145,11 +145,8 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot, static inline bool is_dev_otx2(struct pci_dev *pdev) { - if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID || - pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID) - return true; - - return false; + return pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID || + pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID; } static inline bool is_dev_cn10ka(struct pci_dev *pdev) @@ -159,12 +156,10 @@ static inline bool is_dev_cn10ka(struct pci_dev *pdev) static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev) { - if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && - ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 || - (pdev->revision & 0xff) == 0x51)) - return true; - - return false; + return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && + ((pdev->revision & 0xFF) == 4 || + (pdev->revision & 0xFF) == 0x50 || + (pdev->revision & 0xFF) == 0x51); } static inline bool is_dev_cn10kb(struct pci_dev *pdev) @@ -174,11 +169,8 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev) static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) { - if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && - (pdev->revision & 0xFF) == 0x54) - return true; - - return false; + return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && + (pdev->revision & 0xFF) == 0x54; } static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev, @@ -192,18 +184,12 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev, static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev) { - if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev)) - return true; - - return false; + return is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev); } static inline bool cpt_feature_sgv2(struct pci_dev *pdev) { - if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev)) - return true; - - return false; + return !is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev); } int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); @@ -223,5 +209,6 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox); int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot); +int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs); #endif /* __OTX2_CPT_COMMON_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index b8b7c8a3c0ca..95f3de3a34eb 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -255,3 +255,28 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot) return ret; } EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT"); + +int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs) +{ + struct otx2_mbox *mbox = lfs->mbox; + struct pci_dev *pdev = lfs->pdev; + struct lmtst_tbl_setup_req *req; + + req = (struct lmtst_tbl_setup_req *) + otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), + sizeof(struct msg_rsp)); + if (!req) { + dev_err(&pdev->dev, "RVU MBOX failed to alloc message.\n"); + return -EFAULT; + } + + req->hdr.id = MBOX_MSG_LMTST_TBL_SETUP; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = 0; + + req->use_local_lmt_region = true; + req->lmt_iova = lfs->lmt_info.iova; + + return otx2_cpt_send_mbox_msg(mbox, pdev); +} +EXPORT_SYMBOL_NS_GPL(otx2_cpt_lmtst_tbl_setup_msg, "CRYPTO_DEV_OCTEONTX2_CPT"); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c index b5d66afcc030..dc7c7a2650a5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c @@ -433,10 +433,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, for (slot = 0; slot < lfs->lfs_num; slot++) { lfs->lf[slot].lfs = lfs; lfs->lf[slot].slot = slot; - if (lfs->lmt_base) - lfs->lf[slot].lmtline = lfs->lmt_base + - (slot * LMTLINE_SIZE); - else + if (!lfs->lmt_info.base) lfs->lf[slot].lmtline = lfs->reg_base + OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot, OTX2_CPT_LMT_LF_LMTLINEX(0)); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h index bd8604be2952..6e004a5568d8 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h @@ -105,11 +105,19 @@ struct cpt_hw_ops { gfp_t gfp); }; +#define LMTLINE_SIZE 128 +#define LMTLINE_ALIGN 128 +struct otx2_lmt_info { + void *base; + dma_addr_t iova; + u32 size; + u8 align; +}; + struct otx2_cptlfs_info { /* Registers start address of VF/PF LFs are attached to */ void __iomem *reg_base; -#define LMTLINE_SIZE 128 - void __iomem *lmt_base; + struct otx2_lmt_info lmt_info; struct pci_dev *pdev; /* Device LFs are attached to */ struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM]; struct otx2_mbox *mbox; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 12971300296d..1c5c262af48d 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -639,6 +639,12 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) /* Disable all cores */ ret = otx2_cpt_disable_all_cores(cptpf); + otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base, + &cptpf->afpf_mbox, BLKADDR_CPT0); + if (cptpf->has_cpt1) + otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev, + cptpf->reg_base, &cptpf->afpf_mbox, + BLKADDR_CPT1); return ret; } @@ -786,19 +792,19 @@ static int otx2_cptpf_probe(struct pci_dev *pdev, cptpf->max_vfs = pci_sriov_get_totalvfs(pdev); cptpf->kvf_limits = 1; - err = cn10k_cptpf_lmtst_init(cptpf); + /* Initialize CPT PF device */ + err = cptpf_device_init(cptpf); if (err) goto unregister_intr; - /* Initialize CPT PF device */ - err = cptpf_device_init(cptpf); + err = cn10k_cptpf_lmtst_init(cptpf); if (err) goto unregister_intr; /* Initialize engine groups */ err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps); if (err) - goto unregister_intr; + goto free_lmtst; err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group); if (err) @@ -814,6 +820,8 @@ sysfs_grp_del: sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group); cleanup_eng_grps: otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps); +free_lmtst: + cn10k_cpt_lmtst_free(pdev, &cptpf->lfs); unregister_intr: cptpf_disable_afpf_mbox_intr(cptpf); destroy_afpf_mbox: @@ -848,6 +856,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev) cptpf_disable_afpf_mbox_intr(cptpf); /* Destroy AF-PF mbox */ cptpf_afpf_mbox_destroy(cptpf); + /* Free LMTST memory */ + cn10k_cpt_lmtst_free(pdev, &cptpf->lfs); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c index ec1ac7e836a3..12c0e966fa65 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c @@ -264,8 +264,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, return -ENOENT; } - otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base, - &cptpf->afpf_mbox, BLKADDR_CPT0); cptpf->lfs.global_slot = 0; cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen; @@ -278,9 +276,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, if (cptpf->has_cpt1) { cptpf->rsrc_req_blkaddr = BLKADDR_CPT1; - otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev, - cptpf->reg_base, &cptpf->afpf_mbox, - BLKADDR_CPT1); cptpf->cpt1_lfs.global_slot = num_lfs; cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen; @@ -507,6 +502,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, case MBOX_MSG_CPT_INLINE_IPSEC_CFG: case MBOX_MSG_NIX_INLINE_IPSEC_CFG: case MBOX_MSG_CPT_LF_RESET: + case MBOX_MSG_LMTST_TBL_SETUP: break; default: diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 42c5484ce66a..78367849c3d5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1513,8 +1513,6 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) if (ret) goto delete_grps; - otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base, - &cptpf->afpf_mbox, BLKADDR_CPT0); ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, OTX2_CPT_QUEUE_HI_PRIO, 1); if (ret) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index d84eebdf2fa8..56904bdfd6e8 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -283,8 +283,6 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) lfs_num = cptvf->lfs.kvf_limits; - otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base, - &cptvf->pfvf_mbox, cptvf->blkaddr); ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, lfs_num); if (ret) @@ -378,10 +376,6 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag); - ret = cn10k_cptvf_lmtst_init(cptvf); - if (ret) - goto clear_drvdata; - /* Initialize PF<=>VF mailbox */ ret = cptvf_pfvf_mbox_init(cptvf); if (ret) @@ -396,6 +390,9 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, cptvf_hw_ops_get(cptvf); + otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base, + &cptvf->pfvf_mbox, cptvf->blkaddr); + ret = otx2_cptvf_send_caps_msg(cptvf); if (ret) { dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n"); @@ -404,13 +401,19 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35)) cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create; + ret = cn10k_cptvf_lmtst_init(cptvf); + if (ret) + goto unregister_interrupts; + /* Initialize CPT LFs */ ret = cptvf_lf_init(cptvf); if (ret) - goto unregister_interrupts; + goto free_lmtst; return 0; +free_lmtst: + cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); unregister_interrupts: cptvf_disable_pfvf_mbox_intrs(cptvf); destroy_pfvf_mbox: @@ -434,6 +437,8 @@ static void otx2_cptvf_remove(struct pci_dev *pdev) cptvf_disable_pfvf_mbox_intrs(cptvf); /* Destroy PF-VF mbox */ cptvf_pfvf_mbox_destroy(cptvf); + /* Free LMTST memory */ + cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c index d9fa5f6e204d..931b72580fd9 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c @@ -134,6 +134,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf, sizeof(cptvf->eng_caps)); break; case MBOX_MSG_CPT_LF_RESET: + case MBOX_MSG_LMTST_TBL_SETUP: break; default: dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n", diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 0e440f704a8f..35fa5bad1d9f 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -8,10 +8,12 @@ */ #include <crypto/aes.h> -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include <asm/vio.h> #include "nx_csbcpb.h" diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index dfa3ad1a12f2..709b3ee74657 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -9,10 +9,12 @@ #include <crypto/aes.h> #include <crypto/ctr.h> -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include <asm/vio.h> #include "nx_csbcpb.h" diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 502a565074e9..4039cf3b22d4 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -8,10 +8,12 @@ */ #include <crypto/aes.h> -#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/err.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include <asm/vio.h> #include "nx_csbcpb.h" diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index eb5c8f689360..bf465d824e2c 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -7,13 +7,14 @@ * Author: Kent Yoder <yoder1@us.ibm.com> */ -#include <crypto/internal/hash.h> #include <crypto/aes.h> -#include <crypto/algapi.h> +#include <crypto/internal/hash.h> +#include <linux/atomic.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <linux/types.h> -#include <linux/crypto.h> -#include <asm/vio.h> +#include <linux/spinlock.h> +#include <linux/string.h> #include "nx_csbcpb.h" #include "nx.h" @@ -21,8 +22,6 @@ struct xcbc_state { u8 state[AES_BLOCK_SIZE]; - unsigned int count; - u8 buffer[AES_BLOCK_SIZE]; }; static int nx_xcbc_set_key(struct crypto_shash *desc, @@ -58,7 +57,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc, */ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u8 keys[2][AES_BLOCK_SIZE]; @@ -135,9 +134,9 @@ out: return rc; } -static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm) +static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; int err; @@ -166,31 +165,24 @@ static int nx_xcbc_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct xcbc_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; struct nx_sg *out_sg; - u32 to_process = 0, leftover, total; unsigned int max_sg_len; unsigned long irq_flags; + u32 to_process, total; int rc = 0; int data_len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE); + NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - total = sctx->count + len; - - /* 2 cases for total data len: - * 1: <= AES_BLOCK_SIZE: copy into state, return 0 - * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover - */ - if (total <= AES_BLOCK_SIZE) { - memcpy(sctx->buffer + sctx->count, data, len); - sctx->count += len; - goto out; - } + total = len; in_sg = nx_ctx->in_sg; max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), @@ -200,7 +192,7 @@ static int nx_xcbc_update(struct shash_desc *desc, data_len = AES_BLOCK_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, - &len, nx_ctx->ap->sglen); + &data_len, nx_ctx->ap->sglen); if (data_len != AES_BLOCK_SIZE) { rc = -EINVAL; @@ -210,56 +202,21 @@ static int nx_xcbc_update(struct shash_desc *desc, nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); do { - to_process = total - to_process; - to_process = to_process & ~(AES_BLOCK_SIZE - 1); - - leftover = total - to_process; - - /* the hardware will not accept a 0 byte operation for this - * algorithm and the operation MUST be finalized to be correct. - * So if we happen to get an update that falls on a block sized - * boundary, we must save off the last block to finalize with - * later. */ - if (!leftover) { - to_process -= AES_BLOCK_SIZE; - leftover = AES_BLOCK_SIZE; - } - - if (sctx->count) { - data_len = sctx->count; - in_sg = nx_build_sg_list(nx_ctx->in_sg, - (u8 *) sctx->buffer, - &data_len, - max_sg_len); - if (data_len != sctx->count) { - rc = -EINVAL; - goto out; - } - } + to_process = total & ~(AES_BLOCK_SIZE - 1); - data_len = to_process - sctx->count; in_sg = nx_build_sg_list(in_sg, (u8 *) data, - &data_len, + &to_process, max_sg_len); - if (data_len != to_process - sctx->count) { - rc = -EINVAL; - goto out; - } - nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); /* we've hit the nx chip previously and we're updating again, * so copy over the partial digest */ - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - memcpy(csbcpb->cpb.aes_xcbc.cv, - csbcpb->cpb.aes_xcbc.out_cv_mac, - AES_BLOCK_SIZE); - } + memcpy(csbcpb->cpb.aes_xcbc.cv, + csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); - NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; @@ -271,28 +228,24 @@ static int nx_xcbc_update(struct shash_desc *desc, atomic_inc(&(nx_ctx->stats->aes_ops)); - /* everything after the first update is continuation */ - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - total -= to_process; - data += to_process - sctx->count; - sctx->count = 0; + data += to_process; in_sg = nx_ctx->in_sg; - } while (leftover > AES_BLOCK_SIZE); + } while (total >= AES_BLOCK_SIZE); - /* copy the leftover back into the state struct */ - memcpy(sctx->buffer, data, leftover); - sctx->count = leftover; + rc = total; + memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } -static int nx_xcbc_final(struct shash_desc *desc, u8 *out) +static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src, + unsigned int nbytes, u8 *out) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct xcbc_state *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; @@ -301,12 +254,10 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) spin_lock_irqsave(&nx_ctx->lock, irq_flags); - if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { - /* we've hit the nx chip previously, now we're finalizing, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.aes_xcbc.cv, - csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); - } else if (sctx->count == 0) { + if (nbytes) { + /* non-zero final, so copy over the partial digest */ + memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE); + } else { /* * we've never seen an update, so this is a 0 byte op. The * hardware cannot handle a 0 byte op, so just ECB to @@ -320,11 +271,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out) * this is not an intermediate operation */ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - len = sctx->count; - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, - &len, nx_ctx->ap->sglen); + len = nbytes; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, + nx_ctx->ap->sglen); - if (len != sctx->count) { + if (len != nbytes) { rc = -EINVAL; goto out; } @@ -362,18 +313,19 @@ struct shash_alg nx_shash_aes_xcbc_alg = { .digestsize = AES_BLOCK_SIZE, .init = nx_xcbc_init, .update = nx_xcbc_update, - .final = nx_xcbc_final, + .finup = nx_xcbc_finup, .setkey = nx_xcbc_set_key, .descsize = sizeof(struct xcbc_state), - .statesize = sizeof(struct xcbc_state), + .init_tfm = nx_crypto_ctx_aes_xcbc_init2, + .exit_tfm = nx_crypto_ctx_shash_exit, .base = { .cra_name = "xcbc(aes)", .cra_driver_name = "xcbc-aes-nx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINAL_NONZERO, .cra_blocksize = AES_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_aes_xcbc_init2, - .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index c3bebf0feabe..5b29dd026df2 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -9,9 +9,12 @@ #include <crypto/internal/hash.h> #include <crypto/sha2.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/vio.h> -#include <asm/byteorder.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/unaligned.h> #include "nx_csbcpb.h" #include "nx.h" @@ -19,12 +22,11 @@ struct sha256_state_be { __be32 state[SHA256_DIGEST_SIZE / 4]; u64 count; - u8 buf[SHA256_BLOCK_SIZE]; }; -static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) +static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); @@ -40,11 +42,10 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) return 0; } -static int nx_sha256_init(struct shash_desc *desc) { +static int nx_sha256_init(struct shash_desc *desc) +{ struct sha256_state_be *sctx = shash_desc_ctx(desc); - memset(sctx, 0, sizeof *sctx); - sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); sctx->state[2] = __cpu_to_be32(SHA256_H2); @@ -61,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) { static int nx_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct sha256_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 to_process, leftover, total = len; struct nx_sg *out_sg; - u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; - u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); - /* 2 cases for total data len: - * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 - * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover - */ - total = (sctx->count % SHA256_BLOCK_SIZE) + len; - if (total < SHA256_BLOCK_SIZE) { - memcpy(sctx->buf + buf_len, data, len); - sctx->count += len; - goto out; - } - memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; @@ -105,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, } do { - int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; - if (buf_len) { - data_len = buf_len; - in_sg = nx_build_sg_list(in_sg, - (u8 *) sctx->buf, - &data_len, - max_sg_len); - - if (data_len != buf_len) { - rc = -EINVAL; - goto out; - } - used_sgs = in_sg - nx_ctx->in_sg; - } + to_process = total & ~(SHA256_BLOCK_SIZE - 1); - /* to_process: SHA256_BLOCK_SIZE aligned chunk to be - * processed in this iteration. This value is restricted - * by sg list limits and number of sgs we already used - * for leftover data. (see above) - * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, - * but because data may not be aligned, we need to account - * for that too. */ - to_process = min_t(u64, total, - (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); - to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); - - data_len = to_process - buf_len; + data_len = to_process; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - to_process = data_len + buf_len; + to_process = data_len; leftover = total - to_process; /* @@ -162,26 +127,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha256_ops)); total -= to_process; - data += to_process - buf_len; - buf_len = 0; - + data += to_process; + sctx->count += to_process; } while (leftover >= SHA256_BLOCK_SIZE); - /* copy the leftover back into the state struct */ - if (leftover) - memcpy(sctx->buf, data, leftover); - - sctx->count += len; + rc = leftover; memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } -static int nx_sha256_final(struct shash_desc *desc, u8 *out) +static int nx_sha256_finup(struct shash_desc *desc, const u8 *src, + unsigned int nbytes, u8 *out) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct sha256_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; @@ -197,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that - * this is not an intermediate operation */ - if (sctx->count >= SHA256_BLOCK_SIZE) { - /* we've hit the nx chip previously, now we're finalizing, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - } else { - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; - } + * this is not an intermediate operation + * copy over the partial digest */ + memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); + NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + sctx->count += nbytes; csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); - len = sctx->count & (SHA256_BLOCK_SIZE - 1); - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, - &len, max_sg_len); + len = nbytes; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len); - if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { + if (len != nbytes) { rc = -EINVAL; goto out; } @@ -251,18 +206,34 @@ out: static int nx_sha256_export(struct shash_desc *desc, void *out) { struct sha256_state_be *sctx = shash_desc_ctx(desc); + union { + u8 *u8; + u32 *u32; + u64 *u64; + } p = { .u8 = out }; + int i; - memcpy(out, sctx, sizeof(*sctx)); + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++) + put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++); + put_unaligned(sctx->count, p.u64++); return 0; } static int nx_sha256_import(struct shash_desc *desc, const void *in) { struct sha256_state_be *sctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u32 *u32; + const u64 *u64; + } p = { .u8 = in }; + int i; - memcpy(sctx, in, sizeof(*sctx)); + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++) + sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++)); + sctx->count = get_unaligned(p.u64++); return 0; } @@ -270,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = nx_sha256_init, .update = nx_sha256_update, - .final = nx_sha256_final, + .finup = nx_sha256_finup, .export = nx_sha256_export, .import = nx_sha256_import, + .init_tfm = nx_crypto_ctx_sha256_init, + .exit_tfm = nx_crypto_ctx_shash_exit, .descsize = sizeof(struct sha256_state_be), .statesize = sizeof(struct sha256_state_be), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-nx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha256_init, - .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index 1ffb40d2c324..f74776b7d7d7 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -9,8 +9,12 @@ #include <crypto/internal/hash.h> #include <crypto/sha2.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/vio.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/unaligned.h> #include "nx_csbcpb.h" #include "nx.h" @@ -18,12 +22,11 @@ struct sha512_state_be { __be64 state[SHA512_DIGEST_SIZE / 8]; u64 count[2]; - u8 buf[SHA512_BLOCK_SIZE]; }; -static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm) +static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); @@ -43,8 +46,6 @@ static int nx_sha512_init(struct shash_desc *desc) { struct sha512_state_be *sctx = shash_desc_ctx(desc); - memset(sctx, 0, sizeof *sctx); - sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); sctx->state[2] = __cpu_to_be64(SHA512_H2); @@ -54,6 +55,7 @@ static int nx_sha512_init(struct shash_desc *desc) sctx->state[6] = __cpu_to_be64(SHA512_H6); sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->count[0] = 0; + sctx->count[1] = 0; return 0; } @@ -61,30 +63,18 @@ static int nx_sha512_init(struct shash_desc *desc) static int nx_sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct sha512_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + u64 to_process, leftover, total = len; struct nx_sg *out_sg; - u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; - u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); - /* 2 cases for total data len: - * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 - * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover - */ - total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len; - if (total < SHA512_BLOCK_SIZE) { - memcpy(sctx->buf + buf_len, data, len); - sctx->count[0] += len; - goto out; - } - memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; @@ -105,45 +95,17 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, } do { - int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; - if (buf_len) { - data_len = buf_len; - in_sg = nx_build_sg_list(in_sg, - (u8 *) sctx->buf, - &data_len, max_sg_len); - - if (data_len != buf_len) { - rc = -EINVAL; - goto out; - } - used_sgs = in_sg - nx_ctx->in_sg; - } + to_process = total & ~(SHA512_BLOCK_SIZE - 1); - /* to_process: SHA512_BLOCK_SIZE aligned chunk to be - * processed in this iteration. This value is restricted - * by sg list limits and number of sgs we already used - * for leftover data. (see above) - * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, - * but because data may not be aligned, we need to account - * for that too. */ - to_process = min_t(u64, total, - (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); - to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); - - data_len = to_process - buf_len; + data_len = to_process; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); - if (data_len != (to_process - buf_len)) { - rc = -EINVAL; - goto out; - } - - to_process = data_len + buf_len; + to_process = data_len; leftover = total - to_process; /* @@ -166,30 +128,29 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha512_ops)); total -= to_process; - data += to_process - buf_len; - buf_len = 0; - + data += to_process; + sctx->count[0] += to_process; + if (sctx->count[0] < to_process) + sctx->count[1]++; } while (leftover >= SHA512_BLOCK_SIZE); - /* copy the leftover back into the state struct */ - if (leftover) - memcpy(sctx->buf, data, leftover); - sctx->count[0] += len; + rc = leftover; memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } -static int nx_sha512_final(struct shash_desc *desc, u8 *out) +static int nx_sha512_finup(struct shash_desc *desc, const u8 *src, + unsigned int nbytes, u8 *out) { struct sha512_state_be *sctx = shash_desc_ctx(desc); - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u32 max_sg_len; - u64 count0; unsigned long irq_flags; + u64 count0, count1; int rc = 0; int len; @@ -201,30 +162,23 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that - * this is not an intermediate operation */ - if (sctx->count[0] >= SHA512_BLOCK_SIZE) { - /* we've hit the nx chip previously, now we're finalizing, - * so copy over the partial digest */ - memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, - SHA512_DIGEST_SIZE); - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - } else { - NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; - NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; - } - + * this is not an intermediate operation + * copy over the partial digest */ + memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; + NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; - count0 = sctx->count[0] * 8; + count0 = sctx->count[0] + nbytes; + count1 = sctx->count[1]; - csbcpb->cpb.sha512.message_bit_length_lo = count0; + csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3; + csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) | + (count0 >> 61); - len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); - in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, - max_sg_len); + len = nbytes; + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len); - if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { + if (len != nbytes) { rc = -EINVAL; goto out; } @@ -246,7 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); - atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); + atomic64_add(count0, &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: @@ -257,18 +211,34 @@ out: static int nx_sha512_export(struct shash_desc *desc, void *out) { struct sha512_state_be *sctx = shash_desc_ctx(desc); + union { + u8 *u8; + u64 *u64; + } p = { .u8 = out }; + int i; - memcpy(out, sctx, sizeof(*sctx)); + for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++) + put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++); + put_unaligned(sctx->count[0], p.u64++); + put_unaligned(sctx->count[1], p.u64++); return 0; } static int nx_sha512_import(struct shash_desc *desc, const void *in) { struct sha512_state_be *sctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u64 *u64; + } p = { .u8 = in }; + int i; - memcpy(sctx, in, sizeof(*sctx)); + for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++) + sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++)); + sctx->count[0] = get_unaligned(p.u64++); + sctx->count[1] = get_unaligned(p.u64++); return 0; } @@ -276,19 +246,20 @@ struct shash_alg nx_shash_sha512_alg = { .digestsize = SHA512_DIGEST_SIZE, .init = nx_sha512_init, .update = nx_sha512_update, - .final = nx_sha512_final, + .finup = nx_sha512_finup, .export = nx_sha512_export, .import = nx_sha512_import, + .init_tfm = nx_crypto_ctx_sha512_init, + .exit_tfm = nx_crypto_ctx_shash_exit, .descsize = sizeof(struct sha512_state_be), .statesize = sizeof(struct sha512_state_be), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-nx", .cra_priority = 300, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_init = nx_crypto_ctx_sha512_init, - .cra_exit = nx_crypto_ctx_exit, } }; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index a3b979193d9b..78135fb13f5c 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -7,11 +7,11 @@ * Author: Kent Yoder <yoder1@us.ibm.com> */ +#include <crypto/aes.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> -#include <crypto/aes.h> +#include <crypto/internal/skcipher.h> #include <crypto/sha2.h> -#include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -124,8 +124,6 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, } if ((sg - sg_head) == sgmax) { - pr_err("nx: scatter/gather list overflow, pid: %d\n", - current->pid); sg++; break; } @@ -702,14 +700,14 @@ int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm) NX_MODE_AES_ECB); } -int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm) +int nx_crypto_ctx_sha_init(struct crypto_shash *tfm) { - return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); + return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); } -int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm) +int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm) { - return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES, NX_MODE_AES_XCBC_MAC); } @@ -744,6 +742,11 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) kfree_sensitive(nx_ctx->kmem); } +void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm) +{ + nx_crypto_ctx_exit(crypto_shash_ctx(tfm)); +} + static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) { dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index e1b4b6927bec..36974f08490a 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -3,7 +3,11 @@ #ifndef __NX_H__ #define __NX_H__ +#include <asm/vio.h> #include <crypto/ctr.h> +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> #define NX_NAME "nx-crypto" #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" @@ -139,19 +143,20 @@ struct nx_crypto_ctx { } priv; }; -struct crypto_aead; +struct scatterlist; /* prototypes */ int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm); int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); -int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm); int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm); int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm); int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm); -int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_sha_init(struct crypto_shash *tfm); void nx_crypto_ctx_exit(struct crypto_tfm *tfm); void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm); void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm); +void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm); void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, u32 may_sleep); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 551dd32a8db0..1ecf5f6ac04e 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1086,10 +1086,7 @@ static struct attribute *omap_aes_attrs[] = { &dev_attr_fallback.attr, NULL, }; - -static const struct attribute_group omap_aes_attr_group = { - .attrs = omap_aes_attrs, -}; +ATTRIBUTE_GROUPS(omap_aes); static int omap_aes_probe(struct platform_device *pdev) { @@ -1215,12 +1212,6 @@ static int omap_aes_probe(struct platform_device *pdev) } } - err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group); - if (err) { - dev_err(dev, "could not create sysfs device attrs\n"); - goto err_aead_algs; - } - return 0; err_aead_algs: for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { @@ -1277,8 +1268,6 @@ static void omap_aes_remove(struct platform_device *pdev) tasklet_kill(&dd->done_task); omap_aes_dma_cleanup(dd); pm_runtime_disable(dd->dev); - - sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group); } #ifdef CONFIG_PM_SLEEP @@ -1304,6 +1293,7 @@ static struct platform_driver omap_aes_driver = { .name = "omap-aes", .pm = &omap_aes_pm_ops, .of_match_table = omap_aes_of_match, + .dev_groups = omap_aes_groups, }, }; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 7021481bf027..56f192cb976d 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2039,10 +2039,7 @@ static struct attribute *omap_sham_attrs[] = { &dev_attr_fallback.attr, NULL, }; - -static const struct attribute_group omap_sham_attr_group = { - .attrs = omap_sham_attrs, -}; +ATTRIBUTE_GROUPS(omap_sham); static int omap_sham_probe(struct platform_device *pdev) { @@ -2158,12 +2155,6 @@ static int omap_sham_probe(struct platform_device *pdev) } } - err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group); - if (err) { - dev_err(dev, "could not create sysfs device attrs\n"); - goto err_algs; - } - return 0; err_algs: @@ -2210,8 +2201,6 @@ static void omap_sham_remove(struct platform_device *pdev) if (!dd->polling_mode) dma_release_channel(dd->dma_lch); - - sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group); } static struct platform_driver omap_sham_driver = { @@ -2220,6 +2209,7 @@ static struct platform_driver omap_sham_driver = { .driver = { .name = "omap-sham", .of_match_table = omap_sham_of_match, + .dev_groups = omap_sham_groups, }, }; diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index db9e84c0c9fb..329f60ad422e 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -7,59 +7,89 @@ * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> */ +#include <asm/cpu_device_id.h> #include <crypto/internal/hash.h> #include <crypto/padlock.h> #include <crypto/sha1.h> #include <crypto/sha2.h> +#include <linux/cpufeature.h> #include <linux/err.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/scatterlist.h> -#include <asm/cpu_device_id.h> -#include <asm/fpu/api.h> +#include <linux/module.h> -struct padlock_sha_desc { - struct shash_desc fallback; -}; +#define PADLOCK_SHA_DESCSIZE (128 + ((PADLOCK_ALIGNMENT - 1) & \ + ~(CRYPTO_MINALIGN - 1))) struct padlock_sha_ctx { - struct crypto_shash *fallback; + struct crypto_ahash *fallback; }; -static int padlock_sha_init(struct shash_desc *desc) +static inline void *padlock_shash_desc_ctx(struct shash_desc *desc) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + return PTR_ALIGN(shash_desc_ctx(desc), PADLOCK_ALIGNMENT); +} + +static int padlock_sha1_init(struct shash_desc *desc) +{ + struct sha1_state *sctx = padlock_shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha256_init(struct shash_desc *desc) +{ + struct crypto_sha256_state *sctx = padlock_shash_desc_ctx(desc); - dctx->fallback.tfm = ctx->fallback; - return crypto_shash_init(&dctx->fallback); + sha256_block_init(sctx); + return 0; } static int padlock_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); + u8 *state = padlock_shash_desc_ctx(desc); + struct crypto_shash *tfm = desc->tfm; + int err, remain; + + remain = length - round_down(length, crypto_shash_blocksize(tfm)); + { + struct padlock_sha_ctx *ctx = crypto_shash_ctx(tfm); + HASH_REQUEST_ON_STACK(req, ctx->fallback); + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, data, NULL, length - remain); + err = crypto_ahash_import_core(req, state) ?: + crypto_ahash_update(req) ?: + crypto_ahash_export_core(req, state); + HASH_REQUEST_ZERO(req); + } - return crypto_shash_update(&dctx->fallback, data, length); + return err ?: remain; } static int padlock_sha_export(struct shash_desc *desc, void *out) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - - return crypto_shash_export(&dctx->fallback, out); + memcpy(out, padlock_shash_desc_ctx(desc), + crypto_shash_coresize(desc->tfm)); + return 0; } static int padlock_sha_import(struct shash_desc *desc, const void *in) { - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int ss = crypto_shash_coresize(desc->tfm); + u64 *state = padlock_shash_desc_ctx(desc); + + memcpy(state, in, ss); + + /* Stop evil imports from generating a fault. */ + state[ss / 8 - 1] &= ~(bs - 1); - dctx->fallback.tfm = ctx->fallback; - return crypto_shash_import(&dctx->fallback, in); + return 0; } static inline void padlock_output_block(uint32_t *src, @@ -69,65 +99,38 @@ static inline void padlock_output_block(uint32_t *src, *dst++ = swab32(*src++); } +static int padlock_sha_finup(struct shash_desc *desc, const u8 *in, + unsigned int count, u8 *out) +{ + struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); + HASH_REQUEST_ON_STACK(req, ctx->fallback); + + ahash_request_set_callback(req, 0, NULL, NULL); + ahash_request_set_virt(req, in, out, count); + return crypto_ahash_import_core(req, padlock_shash_desc_ctx(desc)) ?: + crypto_ahash_finup(req); +} + static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, unsigned int count, u8 *out) { /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct sha1_state state; - unsigned int space; - unsigned int leftover; - int err; - - err = crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; + struct sha1_state *state = padlock_shash_desc_ctx(desc); + u64 start = state->count; - if (state.count + count > ULONG_MAX) - return crypto_shash_finup(&dctx->fallback, in, count, out); - - leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1; - space = SHA1_BLOCK_SIZE - leftover; - if (space) { - if (count > space) { - err = crypto_shash_update(&dctx->fallback, in, space) ?: - crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; - count -= space; - in += space; - } else { - memcpy(state.buffer + leftover, in, count); - in = state.buffer; - count += leftover; - state.count &= ~(SHA1_BLOCK_SIZE - 1); - } - } - - memcpy(result, &state.state, SHA1_DIGEST_SIZE); + if (start + count > ULONG_MAX) + return padlock_sha_finup(desc, in, count, out); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : \ - : "c"((unsigned long)state.count + count), \ - "a"((unsigned long)state.count), \ - "S"(in), "D"(result)); - - padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); + : "c"((unsigned long)start + count), \ + "a"((unsigned long)start), \ + "S"(in), "D"(state)); -out: - return err; -} - -static int padlock_sha1_final(struct shash_desc *desc, u8 *out) -{ - const u8 *buf = (void *)desc; - - return padlock_sha1_finup(desc, buf, 0, out); + padlock_output_block(state->state, (uint32_t *)out, 5); + return 0; } static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, @@ -136,78 +139,46 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); - struct sha256_state state; - unsigned int space; - unsigned int leftover; - int err; - - err = crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; + struct sha256_state *state = padlock_shash_desc_ctx(desc); + u64 start = state->count; - if (state.count + count > ULONG_MAX) - return crypto_shash_finup(&dctx->fallback, in, count, out); - - leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1; - space = SHA256_BLOCK_SIZE - leftover; - if (space) { - if (count > space) { - err = crypto_shash_update(&dctx->fallback, in, space) ?: - crypto_shash_export(&dctx->fallback, &state); - if (err) - goto out; - count -= space; - in += space; - } else { - memcpy(state.buf + leftover, in, count); - in = state.buf; - count += leftover; - state.count &= ~(SHA1_BLOCK_SIZE - 1); - } - } - - memcpy(result, &state.state, SHA256_DIGEST_SIZE); + if (start + count > ULONG_MAX) + return padlock_sha_finup(desc, in, count, out); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : \ - : "c"((unsigned long)state.count + count), \ - "a"((unsigned long)state.count), \ - "S"(in), "D"(result)); + : "c"((unsigned long)start + count), \ + "a"((unsigned long)start), \ + "S"(in), "D"(state)); - padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); - -out: - return err; -} - -static int padlock_sha256_final(struct shash_desc *desc, u8 *out) -{ - const u8 *buf = (void *)desc; - - return padlock_sha256_finup(desc, buf, 0, out); + padlock_output_block(state->state, (uint32_t *)out, 8); + return 0; } static int padlock_init_tfm(struct crypto_shash *hash) { const char *fallback_driver_name = crypto_shash_alg_name(hash); struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash); - struct crypto_shash *fallback_tfm; + struct crypto_ahash *fallback_tfm; /* Allocate a fallback and abort if it failed. */ - fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, - CRYPTO_ALG_NEED_FALLBACK); + fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); if (IS_ERR(fallback_tfm)) { printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", fallback_driver_name); return PTR_ERR(fallback_tfm); } + if (crypto_shash_statesize(hash) != + crypto_ahash_statesize(fallback_tfm)) { + crypto_free_ahash(fallback_tfm); + return -EINVAL; + } + ctx->fallback = fallback_tfm; - hash->descsize += crypto_shash_descsize(fallback_tfm); + return 0; } @@ -215,26 +186,27 @@ static void padlock_exit_tfm(struct crypto_shash *hash) { struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash); - crypto_free_shash(ctx->fallback); + crypto_free_ahash(ctx->fallback); } static struct shash_alg sha1_alg = { .digestsize = SHA1_DIGEST_SIZE, - .init = padlock_sha_init, + .init = padlock_sha1_init, .update = padlock_sha_update, .finup = padlock_sha1_finup, - .final = padlock_sha1_final, .export = padlock_sha_export, .import = padlock_sha_import, .init_tfm = padlock_init_tfm, .exit_tfm = padlock_exit_tfm, - .descsize = sizeof(struct padlock_sha_desc), - .statesize = sizeof(struct sha1_state), + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-padlock", .cra_priority = PADLOCK_CRA_PRIORITY, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, @@ -243,21 +215,22 @@ static struct shash_alg sha1_alg = { static struct shash_alg sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, - .init = padlock_sha_init, + .init = padlock_sha256_init, .update = padlock_sha_update, .finup = padlock_sha256_finup, - .final = padlock_sha256_final, + .init_tfm = padlock_init_tfm, .export = padlock_sha_export, .import = padlock_sha_import, - .init_tfm = padlock_init_tfm, .exit_tfm = padlock_exit_tfm, - .descsize = sizeof(struct padlock_sha_desc), - .statesize = sizeof(struct sha256_state), + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = sizeof(struct crypto_sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-padlock", .cra_priority = PADLOCK_CRA_PRIORITY, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, @@ -266,207 +239,58 @@ static struct shash_alg sha256_alg = { /* Add two shash_alg instance for hardware-implemented * * multiple-parts hash supported by VIA Nano Processor.*/ -static int padlock_sha1_init_nano(struct shash_desc *desc) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha1_state){ - .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, - }; - - return 0; -} static int padlock_sha1_update_nano(struct shash_desc *desc, - const u8 *data, unsigned int len) + const u8 *src, unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial, done; - const u8 *src; /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ - u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - - partial = sctx->count & 0x3f; - sctx->count += len; - done = 0; - src = data; - memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); - - if ((partial + len) >= SHA1_BLOCK_SIZE) { - - /* Append the bytes in state's buffer to a block to handle */ - if (partial) { - done = -partial; - memcpy(sctx->buffer + partial, data, - done + SHA1_BLOCK_SIZE); - src = sctx->buffer; - asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" - : "+S"(src), "+D"(dst) \ - : "a"((long)-1), "c"((unsigned long)1)); - done += SHA1_BLOCK_SIZE; - src = data + done; - } - - /* Process the left bytes from the input data */ - if (len - done >= SHA1_BLOCK_SIZE) { - asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" - : "+S"(src), "+D"(dst) - : "a"((long)-1), - "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); - done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); - src = data + done; - } - partial = 0; - } - memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); - memcpy(sctx->buffer + partial, src, len - done); - - return 0; -} - -static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out) -{ - struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); - unsigned int partial, padlen; - __be64 bits; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_be64(state->count << 3); - - /* Pad out to 56 mod 64 */ - partial = state->count & 0x3f; - padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); - padlock_sha1_update_nano(desc, padding, padlen); - - /* Append length field bytes */ - padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits)); - - /* Swap to output */ - padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); - - return 0; -} - -static int padlock_sha256_init_nano(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - *sctx = (struct sha256_state){ - .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \ - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}, - }; - - return 0; + struct sha1_state *state = padlock_shash_desc_ctx(desc); + int blocks = len / SHA1_BLOCK_SIZE; + + len -= blocks * SHA1_BLOCK_SIZE; + state->count += blocks * SHA1_BLOCK_SIZE; + + /* Process the left bytes from the input data */ + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(state) + : "a"((long)-1), + "c"((unsigned long)blocks)); + return len; } -static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, +static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *src, unsigned int len) { - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int partial, done; - const u8 *src; /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ - u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - - partial = sctx->count & 0x3f; - sctx->count += len; - done = 0; - src = data; - memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); - - if ((partial + len) >= SHA256_BLOCK_SIZE) { - - /* Append the bytes in state's buffer to a block to handle */ - if (partial) { - done = -partial; - memcpy(sctx->buf + partial, data, - done + SHA256_BLOCK_SIZE); - src = sctx->buf; - asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" - : "+S"(src), "+D"(dst) - : "a"((long)-1), "c"((unsigned long)1)); - done += SHA256_BLOCK_SIZE; - src = data + done; - } - - /* Process the left bytes from input data*/ - if (len - done >= SHA256_BLOCK_SIZE) { - asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" - : "+S"(src), "+D"(dst) - : "a"((long)-1), - "c"((unsigned long)((len - done) / 64))); - done += ((len - done) - (len - done) % 64); - src = data + done; - } - partial = 0; - } - memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); - memcpy(sctx->buf + partial, src, len - done); - - return 0; -} - -static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *state = - (struct sha256_state *)shash_desc_ctx(desc); - unsigned int partial, padlen; - __be64 bits; - static const u8 padding[64] = { 0x80, }; - - bits = cpu_to_be64(state->count << 3); - - /* Pad out to 56 mod 64 */ - partial = state->count & 0x3f; - padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); - padlock_sha256_update_nano(desc, padding, padlen); - - /* Append length field bytes */ - padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits)); - - /* Swap to output */ - padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); - - return 0; -} - -static int padlock_sha_export_nano(struct shash_desc *desc, - void *out) -{ - int statesize = crypto_shash_statesize(desc->tfm); - void *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, statesize); - return 0; -} - -static int padlock_sha_import_nano(struct shash_desc *desc, - const void *in) -{ - int statesize = crypto_shash_statesize(desc->tfm); - void *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, statesize); - return 0; + struct crypto_sha256_state *state = padlock_shash_desc_ctx(desc); + int blocks = len / SHA256_BLOCK_SIZE; + + len -= blocks * SHA256_BLOCK_SIZE; + state->count += blocks * SHA256_BLOCK_SIZE; + + /* Process the left bytes from input data*/ + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(state) + : "a"((long)-1), + "c"((unsigned long)blocks)); + return len; } static struct shash_alg sha1_alg_nano = { .digestsize = SHA1_DIGEST_SIZE, - .init = padlock_sha1_init_nano, + .init = padlock_sha1_init, .update = padlock_sha1_update_nano, - .final = padlock_sha1_final_nano, - .export = padlock_sha_export_nano, - .import = padlock_sha_import_nano, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = padlock_sha1_finup, + .export = padlock_sha_export, + .import = padlock_sha_import, + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "sha1-padlock-nano", .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -474,17 +298,19 @@ static struct shash_alg sha1_alg_nano = { static struct shash_alg sha256_alg_nano = { .digestsize = SHA256_DIGEST_SIZE, - .init = padlock_sha256_init_nano, + .init = padlock_sha256_init, .update = padlock_sha256_update_nano, - .final = padlock_sha256_final_nano, - .export = padlock_sha_export_nano, - .import = padlock_sha_import_nano, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), + .finup = padlock_sha256_finup, + .export = padlock_sha_export, + .import = padlock_sha_import, + .descsize = PADLOCK_SHA_DESCSIZE, + .statesize = sizeof(struct crypto_sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-padlock-nano", .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 69d6019d8abc..d6928ebe9526 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -52,12 +52,11 @@ static int rk_ahash_digest_fb(struct ahash_request *areq) algt->stat_fb++; ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); - rctx->fallback_req.base.flags = areq->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = areq->nbytes; - rctx->fallback_req.src = areq->src; - rctx->fallback_req.result = areq->result; + ahash_request_set_callback(&rctx->fallback_req, + areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + areq->base.complete, areq->base.data); + ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result, + areq->nbytes); return crypto_ahash_digest(&rctx->fallback_req); } @@ -124,8 +123,9 @@ static int rk_ahash_init(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -137,10 +137,10 @@ static int rk_ahash_update(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -152,9 +152,10 @@ static int rk_ahash_final(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -166,12 +167,11 @@ static int rk_ahash_finup(struct ahash_request *req) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -183,8 +183,9 @@ static int rk_ahash_import(struct ahash_request *req, const void *in) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -196,8 +197,9 @@ static int rk_ahash_export(struct ahash_request *req, void *out) struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index b4c3c14dafd5..b829c84f60f2 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -9,11 +9,17 @@ // // Hash part based on omap-sham.c driver. +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/md5.h> +#include <crypto/scatterwalk.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> #include <linux/clk.h> -#include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/err.h> -#include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -22,17 +28,9 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> - -#include <crypto/ctr.h> -#include <crypto/aes.h> -#include <crypto/algapi.h> -#include <crypto/scatterwalk.h> - -#include <crypto/hash.h> -#include <crypto/md5.h> -#include <crypto/sha1.h> -#include <crypto/sha2.h> -#include <crypto/internal/hash.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> #define _SBF(s, v) ((v) << (s)) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 091612b066f1..fdc0b2486069 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -1415,22 +1415,13 @@ static int sa_sha_run(struct ahash_request *req) (auth_len >= SA_UNSAFE_DATA_SZ_MIN && auth_len <= SA_UNSAFE_DATA_SZ_MAX)) { struct ahash_request *subreq = &rctx->fallback_req; - int ret = 0; + int ret; ahash_request_set_tfm(subreq, ctx->fallback.ahash); - subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - - crypto_ahash_init(subreq); - - subreq->nbytes = auth_len; - subreq->src = req->src; - subreq->result = req->result; - - ret |= crypto_ahash_update(subreq); - - subreq->nbytes = 0; + ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(subreq, req->src, req->result, auth_len); - ret |= crypto_ahash_final(subreq); + ret = crypto_ahash_digest(subreq); return ret; } @@ -1502,8 +1493,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) return ret; if (alg_base) { - ctx->shash = crypto_alloc_shash(alg_base, 0, - CRYPTO_ALG_NEED_FALLBACK); + ctx->shash = crypto_alloc_shash(alg_base, 0, 0); if (IS_ERR(ctx->shash)) { dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", alg_base); @@ -1511,8 +1501,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } /* for fallback */ ctx->fallback.ahash = - crypto_alloc_ahash(alg_base, 0, - CRYPTO_ALG_NEED_FALLBACK); + crypto_alloc_ahash(alg_base, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ctx->fallback.ahash)) { dev_err(ctx->dev_data->dev, "Could not load fallback driver\n"); @@ -1546,54 +1535,38 @@ static int sa_sha_init(struct ahash_request *req) crypto_ahash_digestsize(tfm), rctx); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, NULL, NULL, 0); return crypto_ahash_init(&rctx->fallback_req); } static int sa_sha_update(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } static int sa_sha_final(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } static int sa_sha_finup(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); - ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, req->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -1605,8 +1578,7 @@ static int sa_sha_import(struct ahash_request *req, const void *in) struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -1614,12 +1586,9 @@ static int sa_sha_import(struct ahash_request *req, const void *in) static int sa_sha_export(struct ahash_request *req, void *out) { struct sa_sha_req_ctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *subreq = &rctx->fallback_req; - ahash_request_set_tfm(subreq, ctx->fallback.ahash); - subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); return crypto_ahash_export(subreq, out); } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 42d007b7af45..d09b4aaeecef 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -117,8 +117,9 @@ static int tegra_sha_fallback_init(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_init(&rctx->fallback_req); } @@ -130,10 +131,10 @@ static int tegra_sha_fallback_update(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes); return crypto_ahash_update(&rctx->fallback_req); } @@ -145,9 +146,10 @@ static int tegra_sha_fallback_final(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0); return crypto_ahash_final(&rctx->fallback_req); } @@ -159,12 +161,11 @@ static int tegra_sha_fallback_finup(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); return crypto_ahash_finup(&rctx->fallback_req); } @@ -176,12 +177,11 @@ static int tegra_sha_fallback_digest(struct ahash_request *req) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; - - rctx->fallback_req.nbytes = req->nbytes; - rctx->fallback_req.src = req->src; - rctx->fallback_req.result = req->result; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); + ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, + req->nbytes); return crypto_ahash_digest(&rctx->fallback_req); } @@ -193,8 +193,9 @@ static int tegra_sha_fallback_import(struct ahash_request *req, const void *in) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_import(&rctx->fallback_req, in); } @@ -206,8 +207,9 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out) struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); - rctx->fallback_req.base.flags = req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP; + ahash_request_set_callback(&rctx->fallback_req, + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, + req->base.complete, req->base.data); return crypto_ahash_export(&rctx->fallback_req, out); } diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index 580649f9bff8..5813017b6b79 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -3,18 +3,18 @@ * Xilinx ZynqMP SHA Driver. * Copyright (c) 2022 Xilinx Inc. */ -#include <linux/cacheflush.h> -#include <crypto/hash.h> #include <crypto/internal/hash.h> #include <crypto/sha3.h> -#include <linux/crypto.h> +#include <linux/cacheflush.h> +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/firmware/xlnx-zynqmp.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/spinlock.h> #include <linux/platform_device.h> #define ZYNQMP_DMA_BIT_MASK 32U @@ -36,13 +36,11 @@ struct zynqmp_sha_tfm_ctx { struct crypto_shash *fbk_tfm; }; -struct zynqmp_sha_desc_ctx { - struct shash_desc fbk_req; -}; - static dma_addr_t update_dma_addr, final_dma_addr; static char *ubuf, *fbuf; +static DEFINE_SPINLOCK(zynqmp_sha_lock); + static int zynqmp_sha_init_tfm(struct crypto_shash *hash) { const char *fallback_driver_name = crypto_shash_alg_name(hash); @@ -60,8 +58,13 @@ static int zynqmp_sha_init_tfm(struct crypto_shash *hash) if (IS_ERR(fallback_tfm)) return PTR_ERR(fallback_tfm); + if (crypto_shash_descsize(hash) < + crypto_shash_statesize(tfm_ctx->fbk_tfm)) { + crypto_free_shash(fallback_tfm); + return -EINVAL; + } + tfm_ctx->fbk_tfm = fallback_tfm; - hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm); return 0; } @@ -70,61 +73,55 @@ static void zynqmp_sha_exit_tfm(struct crypto_shash *hash) { struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash); - if (tfm_ctx->fbk_tfm) { - crypto_free_shash(tfm_ctx->fbk_tfm); - tfm_ctx->fbk_tfm = NULL; - } + crypto_free_shash(tfm_ctx->fbk_tfm); +} - memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx)); +static int zynqmp_sha_continue(struct shash_desc *desc, + struct shash_desc *fbdesc, int err) +{ + err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc)); + shash_desc_zero(fbdesc); + return err; } static int zynqmp_sha_init(struct shash_desc *desc) { - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct crypto_shash *fbtfm = tctx->fbk_tfm; + SHASH_DESC_ON_STACK(fbdesc, fbtfm); + int err; - dctx->fbk_req.tfm = tctx->fbk_tfm; - return crypto_shash_init(&dctx->fbk_req); + fbdesc->tfm = fbtfm; + err = crypto_shash_init(fbdesc); + return zynqmp_sha_continue(desc, fbdesc, err); } static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) { - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - - return crypto_shash_update(&dctx->fbk_req, data, length); -} - -static int zynqmp_sha_final(struct shash_desc *desc, u8 *out) -{ - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct crypto_shash *fbtfm = tctx->fbk_tfm; + SHASH_DESC_ON_STACK(fbdesc, fbtfm); + int err; - return crypto_shash_final(&dctx->fbk_req, out); + fbdesc->tfm = fbtfm; + err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?: + crypto_shash_update(fbdesc, data, length); + return zynqmp_sha_continue(desc, fbdesc, err); } static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - - return crypto_shash_finup(&dctx->fbk_req, data, length, out); -} - -static int zynqmp_sha_import(struct shash_desc *desc, const void *in) -{ - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct crypto_shash *fbtfm = tctx->fbk_tfm; + SHASH_DESC_ON_STACK(fbdesc, fbtfm); - dctx->fbk_req.tfm = tctx->fbk_tfm; - return crypto_shash_import(&dctx->fbk_req, in); + fbdesc->tfm = fbtfm; + return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?: + crypto_shash_finup(fbdesc, data, length, out); } -static int zynqmp_sha_export(struct shash_desc *desc, void *out) -{ - struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - - return crypto_shash_export(&dctx->fbk_req, out); -} - -static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { unsigned int remaining_len = len; int update_size; @@ -159,26 +156,27 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i return ret; } +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +{ + scoped_guard(spinlock_bh, &zynqmp_sha_lock) + return __zynqmp_sha_digest(desc, data, len, out); +} + static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { .sha3_384 = { .init = zynqmp_sha_init, .update = zynqmp_sha_update, - .final = zynqmp_sha_final, .finup = zynqmp_sha_finup, .digest = zynqmp_sha_digest, - .export = zynqmp_sha_export, - .import = zynqmp_sha_import, .init_tfm = zynqmp_sha_init_tfm, .exit_tfm = zynqmp_sha_exit_tfm, - .descsize = sizeof(struct zynqmp_sha_desc_ctx), - .statesize = sizeof(struct sha3_state), + .descsize = SHA3_384_EXPORT_SIZE, .digestsize = SHA3_384_DIGEST_SIZE, .base = { .cra_name = "sha3-384", .cra_driver_name = "zynqmp-sha3-384", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA3_384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx), diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index c9aba2304de7..5d3c0ae6b342 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -10,7 +10,7 @@ #include <linux/interrupt.h> #include <linux/dca.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> /* either a kernel change is needed, or we need something like this in kernel */ #ifndef CONFIG_SMP diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index dcd7008fe06b..20333608b983 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -2131,8 +2131,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev) edac->irq_chip.name = pdev->dev.of_node->name; edac->irq_chip.irq_mask = a10_eccmgr_irq_mask; edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask; - edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64, - &a10_eccmgr_ic_ops, edac); + edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node), + 64, &a10_eccmgr_ic_ops, edac); if (!edac->domain) { dev_err(&pdev->dev, "Error adding IRQ domain\n"); return -ENOMEM; diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 90f0eb7cc5b9..58b1482a0fbb 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2,8 +2,8 @@ #include <linux/ras.h> #include <linux/string_choices.h> #include "amd64_edac.h" -#include <asm/amd_nb.h> -#include <asm/amd_node.h> +#include <asm/amd/nb.h> +#include <asm/amd/node.h> static struct edac_pci_ctl_info *pci_ctl; @@ -2942,13 +2942,13 @@ static void dct_read_mc_regs(struct amd64_pvt *pvt) * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since * those are Read-As-Zero. */ - rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); + rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem); edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); /* Check first whether TOP_MEM2 is enabled: */ - rdmsrl(MSR_AMD64_SYSCFG, msr_val); + rdmsrq(MSR_AMD64_SYSCFG, msr_val); if (msr_val & BIT(21)) { - rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); + rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2); edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); } else { edac_dbg(0, " TOP_MEM2 disabled\n"); diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 204834149579..5ddd83dc94ba 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -52,6 +52,7 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <asm/mce.h> +#include <asm/msr.h> #include "edac_module.h" #define EDAC_MOD_STR "ie31200_edac" diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 50d74d3bf0f5..af3c12284a1e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -3,6 +3,7 @@ #include <linux/slab.h> #include <asm/cpu.h> +#include <asm/msr.h> #include "mce_amd.h" diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index d23a1b9fed75..2f173391b63d 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -85,7 +85,6 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \ lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o lib-$(CONFIG_X86) += x86-stub.o smbios.o -lib-$(CONFIG_EFI_MIXED) += x86-mixed.o lib-$(CONFIG_X86_64) += x86-5lvl.o lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c index 77359e802181..f1c5fb45d5f7 100644 --- a/drivers/firmware/efi/libstub/x86-5lvl.c +++ b/drivers/firmware/efi/libstub/x86-5lvl.c @@ -62,7 +62,7 @@ efi_status_t efi_setup_5level_paging(void) void efi_5level_switch(void) { - bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl; + bool want_la57 = !efi_no5lvl; bool have_la57 = native_read_cr4() & X86_CR4_LA57; bool need_toggle = want_la57 ^ have_la57; u64 *pgt = (void *)la57_toggle + PAGE_SIZE; diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c index a85b2dbdd9f0..15e991b99f5a 100644 --- a/drivers/firmware/samsung/exynos-acpm.c +++ b/drivers/firmware/samsung/exynos-acpm.c @@ -185,6 +185,29 @@ struct acpm_match_data { #define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle) /** + * acpm_get_saved_rx() - get the response if it was already saved. + * @achan: ACPM channel info. + * @xfer: reference to the transfer to get response for. + * @tx_seqnum: xfer TX sequence number. + */ +static void acpm_get_saved_rx(struct acpm_chan *achan, + const struct acpm_xfer *xfer, u32 tx_seqnum) +{ + const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1]; + u32 rx_seqnum; + + if (!rx_data->response) + return; + + rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]); + + if (rx_seqnum == tx_seqnum) { + memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen); + clear_bit(rx_seqnum - 1, achan->bitmap_seqnum); + } +} + +/** * acpm_get_rx() - get response from RX queue. * @achan: ACPM channel info. * @xfer: reference to the transfer to get response for. @@ -204,15 +227,16 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer) rx_front = readl(achan->rx.front); i = readl(achan->rx.rear); - /* Bail out if RX is empty. */ - if (i == rx_front) + tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); + + if (i == rx_front) { + acpm_get_saved_rx(achan, xfer, tx_seqnum); return 0; + } base = achan->rx.base; mlen = achan->mlen; - tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); - /* Drain RX queue. */ do { /* Read RX seqnum. */ @@ -259,16 +283,8 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer) * If the response was not in this iteration of the queue, check if the * RX data was previously saved. */ - rx_data = &achan->rx_data[tx_seqnum - 1]; - if (!rx_set && rx_data->response) { - rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, - rx_data->cmd[0]); - - if (rx_seqnum == tx_seqnum) { - memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen); - clear_bit(rx_seqnum - 1, achan->bitmap_seqnum); - } - } + if (!rx_set) + acpm_get_saved_rx(achan, xfer, tx_seqnum); return 0; } diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index ca3472977431..e7671bcd5c07 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -437,7 +437,7 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev, int err; priv->irq_domain = - irq_domain_add_linear(np, priv->num_gpios, + irq_domain_create_linear(of_fwnode_handle(np), priv->num_gpios, &brcmstb_gpio_irq_domain_ops, priv); if (!priv->irq_domain) { diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 63fc7888c1d4..3c3b3ed46d9b 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -479,9 +479,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) return irq; } - irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0, - &davinci_gpio_irq_ops, - chips); + irq_domain = irq_domain_create_legacy(of_fwnode_handle(dev->of_node), ngpio, irq, 0, + &davinci_gpio_irq_ops, chips); if (!irq_domain) { dev_err(dev, "Couldn't register an IRQ domain\n"); return -ENODEV; diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index 6c862c572322..8d86f205f53e 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -323,8 +323,9 @@ static int em_gio_probe(struct platform_device *pdev) irq_chip->irq_release_resources = em_gio_irq_relres; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; - p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0, - &em_gio_irq_domain_ops, p); + p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node), + ngpios, 0, + &em_gio_irq_domain_ops, p); if (!p->irq_domain) { dev_err(dev, "cannot initialize irq domain\n"); return -ENXIO; diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 30a0522ae735..641df8f2fd3d 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -397,7 +397,7 @@ static int grgpio_probe(struct platform_device *ofdev) return -EINVAL; } - priv->domain = irq_domain_add_linear(np, gc->ngpio, + priv->domain = irq_domain_create_linear(of_fwnode_handle(np), gc->ngpio, &grgpio_irq_domain_ops, priv); if (!priv->domain) { diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c index 00f547d26254..535f25514455 100644 --- a/drivers/gpio/gpio-idt3243x.c +++ b/drivers/gpio/gpio-idt3243x.c @@ -37,7 +37,7 @@ static void idt_gpio_dispatch(struct irq_desc *desc) pending = readl(ctrl->pic + IDT_PIC_IRQ_PEND); pending &= ~ctrl->mask_cache; for_each_set_bit(bit, &pending, gc->ngpio) { - virq = irq_linear_revmap(gc->irq.domain, bit); + virq = irq_find_mapping(gc->irq.domain, bit); if (virq) generic_handle_irq(virq); } diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c index 2cf9fb4637a2..ae6182cce723 100644 --- a/drivers/gpio/gpio-lpc18xx.c +++ b/drivers/gpio/gpio-lpc18xx.c @@ -240,11 +240,9 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc) raw_spin_lock_init(&ic->lock); - ic->domain = irq_domain_add_hierarchy(parent_domain, 0, - NR_LPC18XX_GPIO_PIN_IC_IRQS, - dev->of_node, - &lpc18xx_gpio_pin_ic_domain_ops, - ic); + ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS, + of_fwnode_handle(dev->of_node), + &lpc18xx_gpio_pin_ic_domain_ops, ic); if (!ic->domain) { pr_err("unable to add irq domain\n"); ret = -ENODEV; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 3604abcb6fec..57633a7b4270 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -408,9 +408,8 @@ static void mvebu_gpio_irq_ack(struct irq_data *d) struct mvebu_gpio_chip *mvchip = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); mvebu_gpio_write_edge_cause(mvchip, ~mask); - irq_gc_unlock(gc); } static void mvebu_gpio_edge_irq_mask(struct irq_data *d) @@ -420,10 +419,9 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); ct->mask_cache_priv &= ~mask; mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) @@ -433,11 +431,10 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); mvebu_gpio_write_edge_cause(mvchip, ~mask); ct->mask_cache_priv |= mask; mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } static void mvebu_gpio_level_irq_mask(struct irq_data *d) @@ -447,10 +444,9 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); ct->mask_cache_priv &= ~mask; mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } static void mvebu_gpio_level_irq_unmask(struct irq_data *d) @@ -460,10 +456,9 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); ct->mask_cache_priv |= mask; mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv); - irq_gc_unlock(gc); } /***************************************************************************** @@ -1242,7 +1237,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) return 0; mvchip->domain = - irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL); + irq_domain_create_linear(of_fwnode_handle(np), ngpios, &irq_generic_chip_ops, NULL); if (!mvchip->domain) { dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", mvchip->chip.label); diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 619b6fb9d833..74bc8f06a97a 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -502,7 +502,7 @@ static int mxc_gpio_probe(struct platform_device *pdev) goto out_bgio; } - port->domain = irq_domain_add_legacy(np, 32, irq_base, 0, + port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0, &irq_domain_simple_ops, NULL); if (!port->domain) { err = -ENODEV; diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index 024ad077e98d..b418fbccb26c 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -303,8 +303,8 @@ static int mxs_gpio_probe(struct platform_device *pdev) goto out_iounmap; } - port->domain = irq_domain_add_legacy(np, 32, irq_base, 0, - &irq_domain_simple_ops, NULL); + port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0, + &irq_domain_simple_ops, NULL); if (!port->domain) { err = -ENODEV; goto out_iounmap; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 91cea97255fa..c3dfaed45c43 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -636,9 +636,9 @@ static int pxa_gpio_probe(struct platform_device *pdev) if (!pxa_last_gpio) return -EINVAL; - pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node, - pxa_last_gpio + 1, irq_base, - 0, &pxa_irq_domain_ops, pchip); + pchip->irqdomain = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), + pxa_last_gpio + 1, irq_base, 0, + &pxa_irq_domain_ops, pchip); if (!pchip->irqdomain) return -ENOMEM; diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index 01a3b3dac58b..c63352f2f1ec 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -521,7 +521,7 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank) struct irq_chip_generic *gc; int ret; - bank->domain = irq_domain_add_linear(bank->of_node, 32, + bank->domain = irq_domain_create_linear(of_fwnode_handle(bank->of_node), 32, &irq_generic_chip_ops, NULL); if (!bank->domain) { dev_warn(bank->dev, "could not init irq domain for bank %s\n", diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 242dad763ac4..3f3ee36bc3cb 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c @@ -319,7 +319,7 @@ void __init sa1100_init_gpio(void) gpiochip_add_data(&sa1100_gpio_chip.chip, NULL); - sa1100_gpio_irqdomain = irq_domain_add_simple(NULL, + sa1100_gpio_irqdomain = irq_domain_create_simple(NULL, 28, IRQ_GPIO0, &sa1100_gpio_irqdomain_ops, sgc); diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c index c2a2c76c1652..6a3c4c625138 100644 --- a/drivers/gpio/gpio-sodaville.c +++ b/drivers/gpio/gpio-sodaville.c @@ -169,7 +169,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd, IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); - sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS, + sd->id = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), SDV_NUM_PUB_GPIOS, sd->irq_base, 0, &irq_domain_sdv_ops, sd); if (!sd->id) return -ENODEV; diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c index b6335cde455f..8cf676fd0a0b 100644 --- a/drivers/gpio/gpio-tb10x.c +++ b/drivers/gpio/gpio-tb10x.c @@ -183,7 +183,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev) if (ret != 0) return ret; - tb10x_gpio->domain = irq_domain_add_linear(np, + tb10x_gpio->domain = irq_domain_create_linear(of_fwnode_handle(np), tb10x_gpio->gc.ngpio, &irq_generic_chip_ops, NULL); if (!tb10x_gpio->domain) { diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index bcd692229c7c..0d17985a5fdc 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c @@ -502,7 +502,6 @@ static void gpio_twl4030_power_off_action(void *data) static int gpio_twl4030_probe(struct platform_device *pdev) { struct twl4030_gpio_platform_data *pdata; - struct device_node *node = pdev->dev.of_node; struct gpio_twl4030_priv *priv; int ret, irq_base; @@ -524,8 +523,8 @@ static int gpio_twl4030_probe(struct platform_device *pdev) return irq_base; } - irq_domain_add_legacy(node, TWL4030_GPIO_MAX, irq_base, 0, - &irq_domain_simple_ops, NULL); + irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), TWL4030_GPIO_MAX, irq_base, 0, + &irq_domain_simple_ops, NULL); ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base); if (ret < 0) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index cd4fecbb41f2..113c5d90f2df 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -742,6 +742,12 @@ EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask); bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset) { + /* + * hog pins are requested before registering GPIO chip + */ + if (!gc->gpiodev) + return true; + /* No mask means all valid */ if (likely(!gc->gpiodev->valid_mask)) return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 19ce4da285e8..38e7043016e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -725,8 +725,8 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { */ int amdgpu_irq_add_domain(struct amdgpu_device *adev) { - adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, - &amdgpu_hw_irqdomain_ops, adev); + adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, + &amdgpu_hw_irqdomain_ops, adev); if (!adev->irq.domain) { DRM_ERROR("GPU irq add domain failed\n"); return -ENODEV; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index ae3343c81a64..5e784db9f315 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -305,36 +305,20 @@ void __shmem_writeback(size_t size, struct address_space *mapping) .range_end = LLONG_MAX, .for_reclaim = 1, }; - unsigned long i; + struct folio *folio = NULL; + int error = 0; /* * Leave mmapings intact (GTT will have been revoked on unbinding, - * leaving only CPU mmapings around) and add those pages to the LRU + * leaving only CPU mmapings around) and add those folios to the LRU * instead of invoking writeback so they are aged and paged out * as normal. */ - - /* Begin writeback on each dirty page */ - for (i = 0; i < size >> PAGE_SHIFT; i++) { - struct page *page; - - page = find_lock_page(mapping, i); - if (!page) - continue; - - if (!page_mapped(page) && clear_page_dirty_for_io(page)) { - int ret; - - SetPageReclaim(page); - ret = mapping->a_ops->writepage(page, &wbc); - if (!PageWriteback(page)) - ClearPageReclaim(page); - if (!ret) - goto put; - } - unlock_page(page); -put: - put_page(page); + while ((folio = writeback_iter(mapping, &wbc, folio, &error))) { + if (folio_mapped(folio)) + folio_redirty_for_writepage(&wbc, folio); + else + error = shmem_writeout(folio, &wbc); } } diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c index eb03b5b28bad..25b8726b9dff 100644 --- a/drivers/gpu/drm/i915/selftests/librapl.c +++ b/drivers/gpu/drm/i915/selftests/librapl.c @@ -22,12 +22,12 @@ u64 librapl_energy_uJ(void) unsigned long long power; u32 units; - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) + if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power)) return 0; units = (power & 0x1f00) >> 8; - if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power)) + if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power)) return 0; return (1000000 * power) >> units; /* convert to uJ */ diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index dcb49fd30402..9d006ee88a8a 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -150,7 +150,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss) dev = msm_mdss->dev; - domain = irq_domain_add_linear(dev->of_node, 32, + domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32, &msm_mdss_irqdomain_ops, msm_mdss); if (!domain) { dev_err(dev, "failed to add irq_domain\n"); diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c index 9e2d72c447ee..ffaab68bd5dd 100644 --- a/drivers/gpu/drm/ttm/ttm_backup.c +++ b/drivers/gpu/drm/ttm/ttm_backup.c @@ -120,13 +120,13 @@ ttm_backup_backup_page(struct file *backup, struct page *page, .for_reclaim = 1, }; folio_set_reclaim(to_folio); - ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc); + ret = shmem_writeout(to_folio, &wbc); if (!folio_test_writeback(to_folio)) folio_clear_reclaim(to_folio); /* - * If writepage succeeds, it unlocks the folio. - * writepage() errors are otherwise dropped, since writepage() - * is only best effort here. + * If writeout succeeds, it unlocks the folio. errors + * are otherwise dropped, since writeout is only best + * effort here. */ if (ret) folio_unlock(to_folio); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index fa77e4e64f12..333f36e0a715 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1008,7 +1008,7 @@ int ipu_map_irq(struct ipu_soc *ipu, int irq) { int virq; - virq = irq_linear_revmap(ipu->domain, irq); + virq = irq_find_mapping(ipu->domain, irq); if (!virq) virq = irq_create_mapping(ipu->domain, irq); @@ -1169,8 +1169,8 @@ static int ipu_irq_init(struct ipu_soc *ipu) }; int ret, i; - ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS, - &irq_generic_chip_ops, ipu); + ipu->domain = irq_domain_create_linear(of_fwnode_handle(ipu->dev->of_node), IPU_NUM_IRQS, + &irq_generic_chip_ops, ipu); if (!ipu->domain) { dev_err(ipu->dev, "failed to add irq domain\n"); return -ENODEV; @@ -1219,7 +1219,7 @@ static void ipu_irq_exit(struct ipu_soc *ipu) /* TODO: remove irq_domain_generic_chips */ for (i = 0; i < IPU_NUM_IRQS; i++) { - irq = irq_linear_revmap(ipu->domain, i); + irq = irq_find_mapping(ipu->domain, i); if (irq) irq_dispose_mapping(irq); } diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 9ed2c4b6734e..8ecebea53651 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -143,8 +143,8 @@ static void do_read_registers_on_cu(void *_data) */ cu = topology_core_id(smp_processor_id()); - rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]); - rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]); + rdmsrq_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]); + rdmsrq_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]); data->cu_on[cu] = 1; } @@ -424,7 +424,7 @@ static int fam15h_power_init_data(struct pci_dev *f4, */ data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); - if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) { + if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) { pr_err("Failed to read max compute unit power accumulator MSR\n"); return -ENODEV; } diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index 6d1175a51832..2df4956296ed 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -15,6 +15,10 @@ #include <linux/kernel.h> #include <linux/hwmon-vid.h> +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif + /* * Common code for decoding VID pins. * diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 3685906cc57c..472bcf6092f6 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -20,7 +20,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/pci_ids.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include <asm/processor.h> MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 83c88c79afe2..bbbd6240fa6e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -200,7 +200,7 @@ config I2C_ISMT config I2C_PIIX4 tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)" - depends on PCI && HAS_IOPORT + depends on PCI && HAS_IOPORT && X86 select I2C_SMBUS help If you say yes to this option, support will be included for the Intel diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 26a36a65521e..606ac071cb80 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -467,7 +467,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev) return ret; /* Alloc and register client IRQ */ - adap->irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL); + adap->irq_domain = irq_domain_create_linear(NULL, 1, &irq_domain_simple_ops, NULL); if (!adap->irq_domain) return -ENOMEM; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index dd75916157f0..59ecaa990bce 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -34,6 +34,7 @@ #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/io.h> +#include <asm/amd/fch.h> #include "i2c-piix4.h" @@ -80,12 +81,11 @@ #define SB800_PIIX4_PORT_IDX_MASK 0x06 #define SB800_PIIX4_PORT_IDX_SHIFT 1 -/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ -#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 -#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 +/* SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ +#define SB800_PIIX4_PORT_IDX_KERNCZ (FCH_PM_DECODEEN + 0x02) +#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ (FCH_PM_DECODEEN_SMBUS0SEL >> 16) #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 -#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300 #define SB800_PIIX4_FCH_PM_SIZE 8 #define SB800_ASF_ACPI_PATH "\\_SB.ASFC" @@ -162,19 +162,19 @@ int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_c if (mmio_cfg->use_mmio) { void __iomem *addr; - if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR, + if (!request_mem_region_muxed(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE, "sb800_piix4_smb")) { dev_err(dev, "SMBus base address memory region 0x%x already in use.\n", - SB800_PIIX4_FCH_PM_ADDR); + FCH_PM_BASE); return -EBUSY; } - addr = ioremap(SB800_PIIX4_FCH_PM_ADDR, + addr = ioremap(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE); if (!addr) { - release_mem_region(SB800_PIIX4_FCH_PM_ADDR, + release_mem_region(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE); dev_err(dev, "SMBus base address mapping failed.\n"); return -ENOMEM; @@ -201,7 +201,7 @@ void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_ { if (mmio_cfg->use_mmio) { iounmap(mmio_cfg->addr); - release_mem_region(SB800_PIIX4_FCH_PM_ADDR, + release_mem_region(FCH_PM_BASE, SB800_PIIX4_FCH_PM_SIZE); return; } diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index db95113a5b49..5bb26af0f532 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -442,9 +442,9 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc) raw_spin_lock_init(&data->lock); - data->irq = irq_domain_add_linear(client->dev.of_node, - data->chip->nchans, - &irq_domain_simple_ops, data); + data->irq = irq_domain_create_linear(of_fwnode_handle(client->dev.of_node), + data->chip->nchans, + &irq_domain_simple_ops, data); if (!data->irq) return -ENODEV; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 976f5be54e36..433d858b7be1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -51,11 +51,12 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/moduleparam.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/mwait.h> #include <asm/spec-ctrl.h> +#include <asm/msr.h> #include <asm/tsc.h> #include <asm/fpu/api.h> #include <asm/smp.h> @@ -1928,35 +1929,35 @@ static void __init bxt_idle_state_table_update(void) unsigned long long msr; unsigned int usec; - rdmsrl(MSR_PKGC6_IRTL, msr); + rdmsrq(MSR_PKGC6_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[2].exit_latency = usec; bxt_cstates[2].target_residency = usec; } - rdmsrl(MSR_PKGC7_IRTL, msr); + rdmsrq(MSR_PKGC7_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[3].exit_latency = usec; bxt_cstates[3].target_residency = usec; } - rdmsrl(MSR_PKGC8_IRTL, msr); + rdmsrq(MSR_PKGC8_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[4].exit_latency = usec; bxt_cstates[4].target_residency = usec; } - rdmsrl(MSR_PKGC9_IRTL, msr); + rdmsrq(MSR_PKGC9_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[5].exit_latency = usec; bxt_cstates[5].target_residency = usec; } - rdmsrl(MSR_PKGC10_IRTL, msr); + rdmsrq(MSR_PKGC10_IRTL, msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[6].exit_latency = usec; @@ -1984,7 +1985,7 @@ static void __init sklh_idle_state_table_update(void) if ((mwait_substates & (0xF << 28)) == 0) return; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); /* PC10 is not enabled in PKG C-state limit */ if ((msr & 0xF) != 8) @@ -1996,7 +1997,7 @@ static void __init sklh_idle_state_table_update(void) /* if SGX is present */ if (ebx & (1 << 2)) { - rdmsrl(MSR_IA32_FEAT_CTL, msr); + rdmsrq(MSR_IA32_FEAT_CTL, msr); /* if SGX is enabled */ if (msr & (1 << 18)) @@ -2015,7 +2016,7 @@ static void __init skx_idle_state_table_update(void) { unsigned long long msr; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); /* * 000b: C0/C1 (no package C-state support) @@ -2068,7 +2069,7 @@ static void __init spr_idle_state_table_update(void) * C6. However, if PC6 is disabled, we update the numbers to match * core C6. */ - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); /* Limit value 2 and above allow for PC6. */ if ((msr & 0x7) < 2) { @@ -2082,8 +2083,8 @@ static void __init spr_idle_state_table_update(void) */ static void __init byt_cht_auto_demotion_disable(void) { - wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); - wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); + wrmsrq(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); + wrmsrq(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); } static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) @@ -2241,27 +2242,27 @@ static void auto_demotion_disable(void) { unsigned long long msr_bits; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); msr_bits &= ~auto_demotion_disable_flags; - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } static void c1e_promotion_enable(void) { unsigned long long msr_bits; - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + rdmsrq(MSR_IA32_POWER_CTL, msr_bits); msr_bits |= 0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); + wrmsrq(MSR_IA32_POWER_CTL, msr_bits); } static void c1e_promotion_disable(void) { unsigned long long msr_bits; - rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + rdmsrq(MSR_IA32_POWER_CTL, msr_bits); msr_bits &= ~0x2; - wrmsrl(MSR_IA32_POWER_CTL, msr_bits); + wrmsrq(MSR_IA32_POWER_CTL, msr_bits); } /** diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index 0914148d1a22..bd3458965bff 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -421,9 +421,10 @@ static int stm32_adc_irq_probe(struct platform_device *pdev, return priv->irq[i]; } - priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0, - &stm32_adc_domain_ops, - priv); + priv->domain = irq_domain_create_simple(of_fwnode_handle(np), + STM32_ADC_MAX_ADCS, 0, + &stm32_adc_domain_ops, + priv); if (!priv->domain) { dev_err(&pdev->dev, "Failed to add irq domain\n"); return -ENOMEM; diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index b9f4a2937c3a..2098de762bf5 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -90,7 +90,7 @@ static int create_file(const char *name, umode_t mode, int error; inode_lock(d_inode(parent)); - *dentry = lookup_one_len(name, parent, strlen(name)); + *dentry = lookup_noperm(&QSTR(name), parent); if (!IS_ERR(*dentry)) error = qibfs_mknod(d_inode(parent), *dentry, mode, fops, data); @@ -433,7 +433,7 @@ static int remove_device_files(struct super_block *sb, char unit[10]; snprintf(unit, sizeof(unit), "%u", dd->unit); - dir = lookup_one_len_unlocked(unit, sb->s_root, strlen(unit)); + dir = lookup_noperm_unlocked(&QSTR(unit), sb->s_root); if (IS_ERR(dir)) { pr_err("Lookup of %s failed\n", unit); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 57a5ff3d1992..1008858f78e2 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -290,6 +290,8 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE }, + { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, @@ -354,6 +356,7 @@ static const struct xpad_device { { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, { 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index d760af4cc12e..f1947f03b06a 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -4,6 +4,7 @@ * Copyright (C) 2016 Zodiac Inflight Innovations */ +#include "linux/device.h" #include <linux/kernel.h> #include <linux/rmi.h> #include <linux/firmware.h> @@ -289,39 +290,30 @@ static int rmi_f34_update_firmware(struct f34_data *f34, return rmi_f34_flash_firmware(f34, syn_fw); } -static int rmi_f34_status(struct rmi_function *fn) -{ - struct f34_data *f34 = dev_get_drvdata(&fn->dev); - - /* - * The status is the percentage complete, or once complete, - * zero for success or a negative return code. - */ - return f34->update_status; -} - static ssize_t rmi_driver_bootloader_id_show(struct device *dev, struct device_attribute *dattr, char *buf) { struct rmi_driver_data *data = dev_get_drvdata(dev); - struct rmi_function *fn = data->f34_container; + struct rmi_function *fn; struct f34_data *f34; - if (fn) { - f34 = dev_get_drvdata(&fn->dev); - - if (f34->bl_version == 5) - return sysfs_emit(buf, "%c%c\n", - f34->bootloader_id[0], - f34->bootloader_id[1]); - else - return sysfs_emit(buf, "V%d.%d\n", - f34->bootloader_id[1], - f34->bootloader_id[0]); - } + fn = data->f34_container; + if (!fn) + return -ENODEV; - return 0; + f34 = dev_get_drvdata(&fn->dev); + if (!f34) + return -ENODEV; + + if (f34->bl_version == 5) + return sysfs_emit(buf, "%c%c\n", + f34->bootloader_id[0], + f34->bootloader_id[1]); + else + return sysfs_emit(buf, "V%d.%d\n", + f34->bootloader_id[1], + f34->bootloader_id[0]); } static DEVICE_ATTR(bootloader_id, 0444, rmi_driver_bootloader_id_show, NULL); @@ -334,13 +326,16 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev, struct rmi_function *fn = data->f34_container; struct f34_data *f34; - if (fn) { - f34 = dev_get_drvdata(&fn->dev); + fn = data->f34_container; + if (!fn) + return -ENODEV; + + f34 = dev_get_drvdata(&fn->dev); + if (!f34) + return -ENODEV; - return sysfs_emit(buf, "%s\n", f34->configuration_id); - } - return 0; + return sysfs_emit(buf, "%s\n", f34->configuration_id); } static DEVICE_ATTR(configuration_id, 0444, @@ -356,10 +351,14 @@ static int rmi_firmware_update(struct rmi_driver_data *data, if (!data->f34_container) { dev_warn(dev, "%s: No F34 present!\n", __func__); - return -EINVAL; + return -ENODEV; } f34 = dev_get_drvdata(&data->f34_container->dev); + if (!f34) { + dev_warn(dev, "%s: No valid F34 present!\n", __func__); + return -ENODEV; + } if (f34->bl_version >= 7) { if (data->pdt_props & HAS_BSR) { @@ -485,10 +484,18 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev, char *buf) { struct rmi_driver_data *data = dev_get_drvdata(dev); - int update_status = 0; + struct f34_data *f34; + int update_status = -ENODEV; - if (data->f34_container) - update_status = rmi_f34_status(data->f34_container); + /* + * The status is the percentage complete, or once complete, + * zero for success or a negative return code. + */ + if (data->f34_container) { + f34 = dev_get_drvdata(&data->f34_container->dev); + if (f34) + update_status = f34->update_status; + } return sysfs_emit(buf, "%d\n", update_status); } @@ -508,33 +515,21 @@ static const struct attribute_group rmi_firmware_attr_group = { .attrs = rmi_firmware_attrs, }; -static int rmi_f34_probe(struct rmi_function *fn) +static int rmi_f34v5_probe(struct f34_data *f34) { - struct f34_data *f34; - unsigned char f34_queries[9]; + struct rmi_function *fn = f34->fn; + u8 f34_queries[9]; bool has_config_id; - u8 version = fn->fd.function_version; - int ret; - - f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL); - if (!f34) - return -ENOMEM; - - f34->fn = fn; - dev_set_drvdata(&fn->dev, f34); - - /* v5 code only supported version 0, try V7 probe */ - if (version > 0) - return rmi_f34v7_probe(f34); + int error; f34->bl_version = 5; - ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, - f34_queries, sizeof(f34_queries)); - if (ret) { + error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, + f34_queries, sizeof(f34_queries)); + if (error) { dev_err(&fn->dev, "%s: Failed to query properties\n", __func__); - return ret; + return error; } snprintf(f34->bootloader_id, sizeof(f34->bootloader_id), @@ -560,11 +555,11 @@ static int rmi_f34_probe(struct rmi_function *fn) f34->v5.config_blocks); if (has_config_id) { - ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr, - f34_queries, sizeof(f34_queries)); - if (ret) { + error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr, + f34_queries, sizeof(f34_queries)); + if (error) { dev_err(&fn->dev, "Failed to read F34 config ID\n"); - return ret; + return error; } snprintf(f34->configuration_id, sizeof(f34->configuration_id), @@ -573,12 +568,34 @@ static int rmi_f34_probe(struct rmi_function *fn) f34_queries[2], f34_queries[3]); rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n", - f34->configuration_id); + f34->configuration_id); } return 0; } +static int rmi_f34_probe(struct rmi_function *fn) +{ + struct f34_data *f34; + u8 version = fn->fd.function_version; + int error; + + f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL); + if (!f34) + return -ENOMEM; + + f34->fn = fn; + + /* v5 code only supported version 0 */ + error = version == 0 ? rmi_f34v5_probe(f34) : rmi_f34v7_probe(f34); + if (error) + return error; + + dev_set_drvdata(&fn->dev, f34); + + return 0; +} + int rmi_f34_create_sysfs(struct rmi_device *rmi_dev) { return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 4f91a740c15f..9d728800a862 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -3366,10 +3366,12 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain, int ret; for_each_group_device(group, device) { - ret = domain->ops->set_dev_pasid(domain, device->dev, - pasid, old); - if (ret) - goto err_revert; + if (device->dev->iommu->max_pasids > 0) { + ret = domain->ops->set_dev_pasid(domain, device->dev, + pasid, old); + if (ret) + goto err_revert; + } } return 0; @@ -3379,15 +3381,18 @@ err_revert: for_each_group_device(group, device) { if (device == last_gdev) break; - /* - * If no old domain, undo the succeeded devices/pasid. - * Otherwise, rollback the succeeded devices/pasid to the old - * domain. And it is a driver bug to fail attaching with a - * previously good domain. - */ - if (!old || WARN_ON(old->ops->set_dev_pasid(old, device->dev, + if (device->dev->iommu->max_pasids > 0) { + /* + * If no old domain, undo the succeeded devices/pasid. + * Otherwise, rollback the succeeded devices/pasid to + * the old domain. And it is a driver bug to fail + * attaching with a previously good domain. + */ + if (!old || + WARN_ON(old->ops->set_dev_pasid(old, device->dev, pasid, domain))) - iommu_remove_dev_pasid(device->dev, pasid, domain); + iommu_remove_dev_pasid(device->dev, pasid, domain); + } } return ret; } @@ -3398,8 +3403,10 @@ static void __iommu_remove_group_pasid(struct iommu_group *group, { struct group_device *device; - for_each_group_device(group, device) - iommu_remove_dev_pasid(device->dev, pasid, domain); + for_each_group_device(group, device) { + if (device->dev->iommu->max_pasids > 0) + iommu_remove_dev_pasid(device->dev, pasid, domain); + } } /* @@ -3440,7 +3447,13 @@ int iommu_attach_device_pasid(struct iommu_domain *domain, mutex_lock(&group->mutex); for_each_group_device(group, device) { - if (pasid >= device->dev->iommu->max_pasids) { + /* + * Skip PASID validation for devices without PASID support + * (max_pasids = 0). These devices cannot issue transactions + * with PASID, so they don't affect group's PASID usage. + */ + if ((device->dev->iommu->max_pasids > 0) && + (pasid >= device->dev->iommu->max_pasids)) { ret = -EINVAL; goto out_unlock; } diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 08bb3b031f23..0d196e447142 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -166,6 +166,11 @@ config DW_APB_ICTL select GENERIC_IRQ_CHIP select IRQ_DOMAIN_HIERARCHY +config ECONET_EN751221_INTC + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + config FARADAY_FTINTC010 bool select IRQ_DOMAIN diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 365bcea9a61f..23ca4959e6ce 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o +obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index 552aa04ff063..e7dfcf0cda43 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c @@ -180,7 +180,7 @@ static void __init combiner_init(void __iomem *combiner_base, if (!combiner_data) return; - combiner_irq_domain = irq_domain_add_linear(np, nr_irq, + combiner_irq_domain = irq_domain_create_linear(of_fwnode_handle(np), nr_irq, &combiner_irq_domain_ops, combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warn("%s: irq domain init failed\n", __func__); diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c index dfb761e86c9c..8f300843bbca 100644 --- a/drivers/irqchip/irq-al-fic.c +++ b/drivers/irqchip/irq-al-fic.c @@ -65,15 +65,13 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); struct al_fic *fic = gc->private; enum al_fic_state new_state; - int ret = 0; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) && ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) { pr_debug("fic doesn't support flow type %d\n", flow_type); - ret = -EINVAL; - goto err; + return -EINVAL; } new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ? @@ -91,16 +89,10 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type) if (fic->state == AL_FIC_UNCONFIGURED) { al_fic_set_trigger(fic, gc, new_state); } else if (fic->state != new_state) { - pr_debug("fic %s state already configured to %d\n", - fic->name, fic->state); - ret = -EINVAL; - goto err; + pr_debug("fic %s state already configured to %d\n", fic->name, fic->state); + return -EINVAL; } - -err: - irq_gc_unlock(gc); - - return ret; + return 0; } static void al_fic_irq_handler(struct irq_desc *desc) @@ -139,7 +131,7 @@ static int al_fic_register(struct device_node *node, struct irq_chip_generic *gc; int ret; - fic->domain = irq_domain_add_linear(node, + fic->domain = irq_domain_create_linear(of_fwnode_handle(node), NR_FIC_IRQS, &irq_generic_chip_ops, fic); diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c index a1430ab60a8a..a5289dc26dca 100644 --- a/drivers/irqchip/irq-alpine-msi.c +++ b/drivers/irqchip/irq-alpine-msi.c @@ -205,15 +205,14 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv, return -ENXIO; } - middle_domain = irq_domain_add_hierarchy(gic_domain, 0, 0, NULL, - &alpine_msix_middle_domain_ops, - priv); + middle_domain = irq_domain_create_hierarchy(gic_domain, 0, 0, NULL, + &alpine_msix_middle_domain_ops, priv); if (!middle_domain) { pr_err("Failed to create the MSIX middle domain\n"); return -ENOMEM; } - msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node), &alpine_msix_domain_info, middle_domain); if (!msi_domain) { diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 974dc088c853..032d66dceb8e 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -1014,7 +1014,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p irqc->info.die_stride = off - start_off; - irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node), + irqc->hw_domain = irq_domain_create_tree(of_fwnode_handle(node), &aic_irq_domain_ops, irqc); if (WARN_ON(!irqc->hw_domain)) goto err_unmap; @@ -1067,7 +1067,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p if (is_kernel_in_hyp_mode()) { struct irq_fwspec mi = { - .fwnode = of_node_to_fwnode(node), + .fwnode = of_fwnode_handle(node), .param_count = 3, .param = { [0] = AIC_FIQ, /* This is a lie */ diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 2aa6a51e05d0..67b672a78862 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -348,12 +348,12 @@ static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node, mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; } - mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size, + mpic->msi_inner_domain = irq_domain_create_linear(NULL, mpic->msi_doorbell_size, &mpic_msi_domain_ops, mpic); if (!mpic->msi_inner_domain) return -ENOMEM; - mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info, + mpic->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node), &mpic_msi_domain_info, mpic->msi_inner_domain); if (!mpic->msi_domain) { irq_domain_remove(mpic->msi_inner_domain); @@ -492,7 +492,7 @@ static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node) { int base_ipi; - mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR, + mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR, &mpic_ipi_domain_ops, mpic); if (WARN_ON(!mpic->ipi_domain)) return -ENOMEM; @@ -546,7 +546,7 @@ static void mpic_reenable_percpu(struct mpic *mpic) { /* Re-enable per-CPU interrupts that were enabled before suspend */ for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) { - unsigned int virq = irq_linear_revmap(mpic->domain, i); + unsigned int virq = irq_find_mapping(mpic->domain, i); struct irq_data *d; if (!virq || !irq_percpu_is_enabled(virq)) @@ -740,7 +740,7 @@ static void mpic_resume(void) /* Re-enable interrupts */ for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { - unsigned int virq = irq_linear_revmap(mpic->domain, i); + unsigned int virq = irq_find_mapping(mpic->domain, i); struct irq_data *d; if (!virq) @@ -861,7 +861,7 @@ static int __init mpic_of_init(struct device_node *node, struct device_node *par if (!mpic_is_ipi_available(mpic)) nr_irqs = MPIC_PER_CPU_IRQS_NR; - mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic); + mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic); if (!mpic->domain) { pr_err("%pOF: Unable to add IRQ domain\n", node); return -ENOMEM; diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c index 9c9fc3e2967e..87c1feb999ff 100644 --- a/drivers/irqchip/irq-aspeed-i2c-ic.c +++ b/drivers/irqchip/irq-aspeed-i2c-ic.c @@ -82,7 +82,7 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node, goto err_iounmap; } - i2c_ic->irq_domain = irq_domain_add_linear(node, ASPEED_I2C_IC_NUM_BUS, + i2c_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), ASPEED_I2C_IC_NUM_BUS, &aspeed_i2c_ic_irq_domain_ops, NULL); if (!i2c_ic->irq_domain) { diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c index bd3b759b4b2c..8330221799a0 100644 --- a/drivers/irqchip/irq-aspeed-intc.c +++ b/drivers/irqchip/irq-aspeed-intc.c @@ -102,7 +102,7 @@ static int __init aspeed_intc_ic_of_init(struct device_node *node, writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG); writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG); - intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD, + intc_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), INTC_IRQS_PER_WORD, &aspeed_intc_ic_irq_domain_ops, intc_ic); if (!intc_ic->irq_domain) { ret = -ENOMEM; diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c index 94a7223e95df..1c7045467c48 100644 --- a/drivers/irqchip/irq-aspeed-scu-ic.c +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -165,7 +165,7 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, goto err; } - scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs, + scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs, &aspeed_scu_ic_domain_ops, scu_ic); if (!scu_ic->irq_domain) { diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c index 62ccf2c0c414..9b665b5bb531 100644 --- a/drivers/irqchip/irq-aspeed-vic.c +++ b/drivers/irqchip/irq-aspeed-vic.c @@ -211,8 +211,8 @@ static int __init avic_of_init(struct device_node *node, set_handle_irq(avic_handle_irq); /* Register our domain */ - vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0, - &avic_dom_ops, vic); + vic->dom = irq_domain_create_simple(of_fwnode_handle(node), NUM_IRQS, 0, + &avic_dom_ops, vic); return 0; } diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c index 92f001a5ff8d..268cc18b781f 100644 --- a/drivers/irqchip/irq-ath79-misc.c +++ b/drivers/irqchip/irq-ath79-misc.c @@ -147,7 +147,7 @@ static int __init ath79_misc_intc_of_init( return -ENOMEM; } - domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT, + domain = irq_domain_create_linear(of_fwnode_handle(node), ATH79_MISC_IRQ_COUNT, &misc_irq_domain_ops, base); if (!domain) { pr_err("Failed to add MISC irqdomain\n"); @@ -188,7 +188,7 @@ void __init ath79_misc_irq_init(void __iomem *regs, int irq, else ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; - domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT, + domain = irq_domain_create_legacy(NULL, ATH79_MISC_IRQ_COUNT, irq_base, 0, &misc_irq_domain_ops, regs); if (!domain) panic("Failed to create MISC irqdomain"); diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 4525366d16d6..3cad30a40c19 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -228,7 +228,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, goto err_iounmap; } - domain = irq_domain_add_linear(node, nchips * 32, ops, aic); + domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32, ops, aic); if (!domain) { ret = -ENOMEM; goto err_free_aic; diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 3839ad79ad31..03aeed39a4d2 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -78,9 +78,8 @@ static int aic_retrigger(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); - irq_gc_unlock(gc); return 1; } @@ -106,30 +105,27 @@ static void aic_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); - irq_gc_unlock(gc); } static void aic_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); - irq_gc_unlock(gc); } static void aic_pm_shutdown(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); - irq_gc_unlock(gc); } #else #define aic_suspend NULL @@ -175,10 +171,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d, { struct irq_domain_chip_generic *dgc = d->gc; struct irq_chip_generic *gc; - unsigned long flags; unsigned smr; - int idx; - int ret; + int idx, ret; if (!dgc) return -EINVAL; @@ -194,11 +188,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d, gc = dgc->gc[idx]; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irq)(&gc->lock); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); - irq_gc_unlock_irqrestore(gc, flags); return ret; } diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index e98c2875af9e..60b00d2c3d7a 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -92,11 +92,10 @@ static void aic5_mask(struct irq_data *d) * Disable interrupt on AIC5. We always take the lock of the * first irq chip as all chips share the same registers. */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IDCR); gc->mask_cache &= ~d->mask; - irq_gc_unlock(bgc); } static void aic5_unmask(struct irq_data *d) @@ -109,11 +108,10 @@ static void aic5_unmask(struct irq_data *d) * Enable interrupt on AIC5. We always take the lock of the * first irq chip as all chips share the same registers. */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(gc, 1, AT91_AIC5_IECR); gc->mask_cache |= d->mask; - irq_gc_unlock(bgc); } static int aic5_retrigger(struct irq_data *d) @@ -122,11 +120,9 @@ static int aic5_retrigger(struct irq_data *d) struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); /* Enable interrupt on AIC5 */ - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_ISCR); - irq_gc_unlock(bgc); - return 1; } @@ -137,14 +133,12 @@ static int aic5_set_type(struct irq_data *d, unsigned type) unsigned int smr; int ret; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); ret = aic_common_set_type(d, type, &smr); if (!ret) irq_reg_writel(bgc, smr, AT91_AIC5_SMR); - irq_gc_unlock(bgc); - return ret; } @@ -166,7 +160,7 @@ static void aic5_suspend(struct irq_data *d) smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR); } - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); for (i = 0; i < dgc->irqs_per_chip; i++) { mask = 1 << i; if ((mask & gc->mask_cache) == (mask & gc->wake_active)) @@ -178,7 +172,6 @@ static void aic5_suspend(struct irq_data *d) else irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); } - irq_gc_unlock(bgc); } static void aic5_resume(struct irq_data *d) @@ -190,7 +183,7 @@ static void aic5_resume(struct irq_data *d) int i; u32 mask; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); if (smr_cache) { irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU); @@ -214,7 +207,6 @@ static void aic5_resume(struct irq_data *d) else irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); } - irq_gc_unlock(bgc); } static void aic5_pm_shutdown(struct irq_data *d) @@ -225,13 +217,12 @@ static void aic5_pm_shutdown(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); int i; - irq_gc_lock(bgc); + guard(raw_spinlock)(&bgc->lock); for (i = 0; i < dgc->irqs_per_chip; i++) { irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); } - irq_gc_unlock(bgc); } #else #define aic5_suspend NULL @@ -277,7 +268,6 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, unsigned int *out_type) { struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); - unsigned long flags; unsigned smr; int ret; @@ -289,13 +279,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, if (ret) return ret; - irq_gc_lock_irqsave(bgc, flags); + guard(raw_spinlock_irq)(&bgc->lock); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(bgc, smr, AT91_AIC5_SMR); - irq_gc_unlock_irqrestore(bgc, flags); - return ret; } diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c index 4cce24233f0f..63de5ef6cf2d 100644 --- a/drivers/irqchip/irq-bcm2712-mip.c +++ b/drivers/irqchip/irq-bcm2712-mip.c @@ -11,7 +11,7 @@ #include <linux/of_address.h> #include <linux/of_platform.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #define MIP_INT_RAISE 0x00 #define MIP_INT_CLEAR 0x10 @@ -174,8 +174,8 @@ static int mip_init_domains(struct mip_priv *mip, struct device_node *np) { struct irq_domain *middle; - middle = irq_domain_add_hierarchy(mip->parent, 0, mip->num_msis, np, - &mip_middle_domain_ops, mip); + middle = irq_domain_create_hierarchy(mip->parent, 0, mip->num_msis, of_fwnode_handle(np), + &mip_middle_domain_ops, mip); if (!middle) return -ENOMEM; diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index 6c20604c2242..1e384c870350 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -144,7 +144,7 @@ static int __init armctrl_of_init(struct device_node *node, if (!base) panic("%pOF: unable to map IC registers\n", node); - intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), + intc.domain = irq_domain_create_linear(of_fwnode_handle(node), MAKE_HWIRQ(NR_BANKS, 0), &armctrl_ops, NULL); if (!intc.domain) panic("%pOF: unable to create IRQ domain\n", node); diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index e366257684b5..fafd1f71348e 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -325,7 +325,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, bcm2835_init_local_timer_frequency(); - intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1, + intc.domain = irq_domain_create_linear(of_fwnode_handle(node), LAST_IRQ + 1, &bcm2836_arm_irqchip_intc_ops, NULL); if (!intc.domain) diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index 90daa274ef23..ca4e141c5bc2 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -316,7 +316,7 @@ static int __init bcm6345_l1_of_init(struct device_node *dn, raw_spin_lock_init(&intc->lock); - intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, + intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words, &bcm6345_l1_domain_ops, intc); if (!intc->domain) { diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 36e71af054e9..04fac0cc857f 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -416,7 +416,7 @@ static int __init bcm7038_l1_of_init(struct device_node *dn, } } - intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, + intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words, &bcm7038_l1_domain_ops, intc); if (!intc->domain) { diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 1e9dab6e0d86..ff22c3104401 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -63,16 +63,15 @@ static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc) for (idx = 0; idx < b->n_words; idx++) { int base = idx * IRQS_PER_WORD; - struct irq_chip_generic *gc = - irq_get_domain_generic_chip(b->domain, base); + struct irq_chip_generic *gc; unsigned long pending; int hwirq; - irq_gc_lock(gc); - pending = irq_reg_readl(gc, b->stat_offset[idx]) & - gc->mask_cache & - data->irq_map_mask[idx]; - irq_gc_unlock(gc); + gc = irq_get_domain_generic_chip(b->domain, base); + scoped_guard (raw_spinlock, &gc->lock) { + pending = irq_reg_readl(gc, b->stat_offset[idx]) & gc->mask_cache & + data->irq_map_mask[idx]; + } for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) generic_handle_domain_irq(b->domain, base + hwirq); @@ -86,11 +85,9 @@ static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc) struct bcm7120_l2_intc_data *b = gc->private; struct irq_chip_type *ct = gc->chip_types; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); if (b->can_wake) - irq_reg_writel(gc, gc->mask_cache | gc->wake_active, - ct->regs.mask); - irq_gc_unlock(gc); + irq_reg_writel(gc, gc->mask_cache | gc->wake_active, ct->regs.mask); } static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) @@ -98,9 +95,8 @@ static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) struct irq_chip_type *ct = gc->chip_types; /* Restore the saved mask */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, gc->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); } static int bcm7120_l2_intc_init_one(struct device_node *dn, @@ -264,7 +260,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, goto out_free_l1_data; } - data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words, + data->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * data->n_words, &irq_generic_chip_ops, NULL); if (!data->domain) { ret = -ENOMEM; diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index db4c9721fcf2..1bec5b2cd3f0 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -97,9 +97,8 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; - unsigned long flags; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irqsave)(&gc->lock); /* Save the current mask */ if (save) b->saved_mask = irq_reg_readl(gc, ct->regs.mask); @@ -109,7 +108,6 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save) irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); irq_reg_writel(gc, gc->wake_active, ct->regs.enable); } - irq_gc_unlock_irqrestore(gc, flags); } static void brcmstb_l2_intc_shutdown(struct irq_data *d) @@ -127,9 +125,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; - unsigned long flags; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irqsave)(&gc->lock); if (ct->chip.irq_ack) { /* Clear unmasked non-wakeup interrupts */ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, @@ -139,7 +136,6 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) /* Restore the saved mask */ irq_reg_writel(gc, b->saved_mask, ct->regs.disable); irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); - irq_gc_unlock_irqrestore(gc, flags); } static int __init brcmstb_l2_intc_of_init(struct device_node *np, @@ -182,7 +178,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, goto out_unmap; } - data->domain = irq_domain_add_linear(np, 32, + data->domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &irq_generic_chip_ops, NULL); if (!data->domain) { ret = -ENOMEM; diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c index 48c73c948ddf..c4b73ba2323b 100644 --- a/drivers/irqchip/irq-clps711x.c +++ b/drivers/irqchip/irq-clps711x.c @@ -184,8 +184,8 @@ static int __init _clps711x_intc_init(struct device_node *np, clps711x_intc->ops.map = clps711x_intc_irq_map; clps711x_intc->ops.xlate = irq_domain_xlate_onecell; clps711x_intc->domain = - irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs), - 0, 0, &clps711x_intc->ops, NULL); + irq_domain_create_legacy(of_fwnode_handle(np), ARRAY_SIZE(clps711x_irqs), 0, 0, + &clps711x_intc->ops, NULL); if (!clps711x_intc->domain) { err = -ENOMEM; goto out_irqfree; diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index a05a7501e107..66bb39e24a52 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -351,10 +351,8 @@ static int __init irqcrossbar_init(struct device_node *node, if (err) return err; - domain = irq_domain_add_hierarchy(parent_domain, 0, - cb->max_crossbar_sources, - node, &crossbar_domain_ops, - NULL); + domain = irq_domain_create_hierarchy(parent_domain, 0, cb->max_crossbar_sources, + of_fwnode_handle(node), &crossbar_domain_ops, NULL); if (!domain) { pr_err("%pOF: failed to allocated domain\n", node); return -ENOMEM; diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c index 6710691e4c25..5b7150705d29 100644 --- a/drivers/irqchip/irq-csky-apb-intc.c +++ b/drivers/irqchip/irq-csky-apb-intc.c @@ -50,11 +50,10 @@ static void irq_ck_mask_set_bit(struct irq_data *d) unsigned long ifr = ct->regs.mask - 8; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); *ct->mask_cache |= mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr); - irq_gc_unlock(gc); } static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base, @@ -114,7 +113,7 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent) return -EINVAL; } - root_domain = irq_domain_add_linear(node, nr_irq, + root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq, &irq_generic_chip_ops, NULL); if (!root_domain) { pr_err("C-SKY Intc irq_domain_add failed.\n"); diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c index 4aebd67d4f8f..1d1f5091f26f 100644 --- a/drivers/irqchip/irq-csky-mpintc.c +++ b/drivers/irqchip/irq-csky-mpintc.c @@ -255,7 +255,7 @@ csky_mpintc_init(struct device_node *node, struct device_node *parent) writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR); } - root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops, + root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq, &csky_irqdomain_ops, NULL); if (!root_domain) return -ENXIO; diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c index d7948c55f542..00cdcc90f614 100644 --- a/drivers/irqchip/irq-davinci-cp-intc.c +++ b/drivers/irqchip/irq-davinci-cp-intc.c @@ -204,8 +204,10 @@ static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num return irq_base; } - davinci_cp_intc_irq_domain = irq_domain_add_legacy(node, num_irqs, irq_base, 0, - &davinci_cp_intc_irq_domain_ops, NULL); + davinci_cp_intc_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), num_irqs, + irq_base, 0, + &davinci_cp_intc_irq_domain_ops, + NULL); if (!davinci_cp_intc_irq_domain) { pr_err("%s: unable to create an interrupt domain\n", __func__); diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c index 3b0d78aac13b..eb5a8de82751 100644 --- a/drivers/irqchip/irq-digicolor.c +++ b/drivers/irqchip/irq-digicolor.c @@ -95,7 +95,7 @@ static int __init digicolor_of_init(struct device_node *node, regmap_write(ucregs, UC_IRQ_CONTROL, 1); digicolor_irq_domain = - irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); + irq_domain_create_linear(of_fwnode_handle(node), 64, &irq_generic_chip_ops, NULL); if (!digicolor_irq_domain) { pr_err("%pOF: unable to create IRQ domain\n", node); return -ENOMEM; diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c index d5c1c750c8d2..4240a0dbf627 100644 --- a/drivers/irqchip/irq-dw-apb-ictl.c +++ b/drivers/irqchip/irq-dw-apb-ictl.c @@ -101,10 +101,9 @@ static void dw_apb_ictl_resume(struct irq_data *d) struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct irq_chip_type *ct = irq_data_get_chip_type(d); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); writel_relaxed(~0, gc->reg_base + ct->regs.enable); writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); - irq_gc_unlock(gc); } #else #define dw_apb_ictl_resume NULL @@ -173,7 +172,7 @@ static int __init dw_apb_ictl_init(struct device_node *np, else nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L)); - domain = irq_domain_add_linear(np, nrirqs, domain_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs, domain_ops, NULL); if (!domain) { pr_err("%pOF: unable to add irq domain\n", np); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-econet-en751221.c b/drivers/irqchip/irq-econet-en751221.c new file mode 100644 index 000000000000..d83d5eb12795 --- /dev/null +++ b/drivers/irqchip/irq-econet-en751221.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * EN751221 Interrupt Controller Driver. + * + * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller + * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can + * be routed to either VPE but not both, so to support per-CPU interrupts, a + * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In + * this driver, these are called "shadow interrupts". The assignment of shadow + * interrupts is defined by the SoC integrator when wiring the interrupt lines, + * so they are configurable in the device tree. + * + * If an interrupt (say 30) needs per-CPU capability, the SoC integrator + * allocates another IRQ number (say 29) to be its shadow. The device tree + * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts" + * property. + * + * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29, + * telling the hardware to mask VPE#1's view of IRQ 30. + * + * Copyright (C) 2025 Caleb James DeLisle <cjd@cjdns.fr> + */ + +#include <linux/cleanup.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> + +#define IRQ_COUNT 40 + +#define NOT_PERCPU 0xff +#define IS_SHADOW 0xfe + +#define REG_MASK0 0x04 +#define REG_MASK1 0x50 +#define REG_PENDING0 0x08 +#define REG_PENDING1 0x54 + +/** + * @membase: Base address of the interrupt controller registers + * @interrupt_shadows: Array of all interrupts, for each value, + * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow + * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt + * - else: This is a per-cpu interrupt whose shadow is the value + */ +static struct { + void __iomem *membase; + u8 interrupt_shadows[IRQ_COUNT]; +} econet_intc __ro_after_init; + +static DEFINE_RAW_SPINLOCK(irq_lock); + +/* IRQs must be disabled */ +static void econet_wreg(u32 reg, u32 val, u32 mask) +{ + u32 v; + + guard(raw_spinlock)(&irq_lock); + + v = ioread32(econet_intc.membase + reg); + v &= ~mask; + v |= val & mask; + iowrite32(v, econet_intc.membase + reg); +} + +/* IRQs must be disabled */ +static void econet_chmask(u32 hwirq, bool unmask) +{ + u32 reg, mask; + u8 shadow; + + /* + * If the IRQ is a shadow, it should never be manipulated directly. + * It should only be masked/unmasked as a result of the "real" per-cpu + * irq being manipulated by a thread running on VPE#1. + * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask. + * This is single processor only, so smp_processor_id() never exceeds 1. + */ + shadow = econet_intc.interrupt_shadows[hwirq]; + if (WARN_ON_ONCE(shadow == IS_SHADOW)) + return; + else if (shadow != NOT_PERCPU && smp_processor_id() == 1) + hwirq = shadow; + + if (hwirq >= 32) { + reg = REG_MASK1; + mask = BIT(hwirq - 32); + } else { + reg = REG_MASK0; + mask = BIT(hwirq); + } + + econet_wreg(reg, unmask ? mask : 0, mask); +} + +/* IRQs must be disabled */ +static void econet_intc_mask(struct irq_data *d) +{ + econet_chmask(d->hwirq, false); +} + +/* IRQs must be disabled */ +static void econet_intc_unmask(struct irq_data *d) +{ + econet_chmask(d->hwirq, true); +} + +static void econet_mask_all(void) +{ + /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */ + guard(irqsave)(); + econet_wreg(REG_MASK0, 0, ~0); + econet_wreg(REG_MASK1, 0, ~0); +} + +static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset) +{ + int hwirq; + + while (pending) { + hwirq = fls(pending) - 1; + generic_handle_domain_irq(d, hwirq + offset); + pending &= ~BIT(hwirq); + } +} + +static void econet_intc_from_parent(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_domain *domain; + u32 pending0, pending1; + + chained_irq_enter(chip, desc); + + pending0 = ioread32(econet_intc.membase + REG_PENDING0); + pending1 = ioread32(econet_intc.membase + REG_PENDING1); + + if (unlikely(!(pending0 | pending1))) { + spurious_interrupt(); + } else { + domain = irq_desc_get_handler_data(desc); + econet_intc_handle_pending(domain, pending0, 0); + econet_intc_handle_pending(domain, pending1, 32); + } + + chained_irq_exit(chip, desc); +} + +static const struct irq_chip econet_irq_chip; + +static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq) +{ + int ret; + + if (hwirq >= IRQ_COUNT) { + pr_err("%s: hwirq %lu out of range\n", __func__, hwirq); + return -EINVAL; + } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) { + pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq); + return -EINVAL; + } + + if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) { + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq); + } else { + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq); + ret = irq_set_percpu_devid(irq); + if (ret) + pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret); + } + + irq_set_chip_data(irq, NULL); + return 0; +} + +static const struct irq_chip econet_irq_chip = { + .name = "en751221-intc", + .irq_unmask = econet_intc_unmask, + .irq_mask = econet_intc_mask, + .irq_mask_ack = econet_intc_mask, +}; + +static const struct irq_domain_ops econet_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = econet_intc_map +}; + +static int __init get_shadow_interrupts(struct device_node *node) +{ + const char *field = "econet,shadow-interrupts"; + int num_shadows; + + num_shadows = of_property_count_u32_elems(node, field); + + memset(econet_intc.interrupt_shadows, NOT_PERCPU, + sizeof(econet_intc.interrupt_shadows)); + + if (num_shadows <= 0) { + return 0; + } else if (num_shadows % 2) { + pr_err("%pOF: %s count is odd, ignoring\n", node, field); + return 0; + } + + u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL); + if (!shadows) + return -ENOMEM; + + if (of_property_read_u32_array(node, field, shadows, num_shadows)) { + pr_err("%pOF: Failed to read %s\n", node, field); + return -EINVAL; + } + + for (int i = 0; i < num_shadows; i += 2) { + u32 shadow = shadows[i + 1]; + u32 target = shadows[i]; + + if (shadow > IRQ_COUNT) { + pr_err("%pOF: %s[%d] shadow(%d) out of range\n", + node, field, i + 1, shadow); + continue; + } + + if (target >= IRQ_COUNT) { + pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target); + continue; + } + + if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) { + pr_err("%pOF: %s[%d] target(%d) already has a shadow\n", + node, field, i, target); + continue; + } + + if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) { + pr_err("%pOF: %s[%d] shadow(%d) already has a target\n", + node, field, i + 1, shadow); + continue; + } + + econet_intc.interrupt_shadows[target] = shadow; + econet_intc.interrupt_shadows[shadow] = IS_SHADOW; + } + + return 0; +} + +static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent) +{ + struct irq_domain *domain; + struct resource res; + int ret, irq; + + ret = get_shadow_interrupts(node); + if (ret) + return ret; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node); + return -EINVAL; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("%pOF: DT: Failed to get 'reg'\n", node); + ret = -EINVAL; + goto err_dispose_mapping; + } + + if (!request_mem_region(res.start, resource_size(&res), res.name)) { + pr_err("%pOF: Failed to request memory\n", node); + ret = -EBUSY; + goto err_dispose_mapping; + } + + econet_intc.membase = ioremap(res.start, resource_size(&res)); + if (!econet_intc.membase) { + pr_err("%pOF: Failed to remap membase\n", node); + ret = -ENOMEM; + goto err_release; + } + + econet_mask_all(); + + domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT, + &econet_domain_ops, NULL); + if (!domain) { + pr_err("%pOF: Failed to add irqdomain\n", node); + ret = -ENOMEM; + goto err_unmap; + } + + irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain); + + return 0; + +err_unmap: + iounmap(econet_intc.membase); +err_release: + release_mem_region(res.start, resource_size(&res)); +err_dispose_mapping: + irq_dispose_mapping(irq); + return ret; +} + +IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init); diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c index b91c358ea6db..a59a66d79da6 100644 --- a/drivers/irqchip/irq-ftintc010.c +++ b/drivers/irqchip/irq-ftintc010.c @@ -180,8 +180,9 @@ static int __init ft010_of_init_irq(struct device_node *node, writel(0, FT010_IRQ_MASK(f->base)); writel(0, FT010_FIQ_MASK(f->base)); - f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0, - &ft010_irqdomain_ops, f); + f->domain = irq_domain_create_simple(of_fwnode_handle(node), + FT010_NUM_IRQS, 0, + &ft010_irqdomain_ops, f); set_handle_irq(ft010_irqchip_handle_irq); return 0; diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index cc6a6c1585d2..24ef5af569fe 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -26,7 +26,7 @@ #include <linux/irqchip/arm-gic.h> #include <linux/irqchip/arm-gic-common.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> /* * MSI_TYPER: @@ -261,23 +261,23 @@ static struct msi_parent_ops gicv2m_msi_parent_ops = { static __init int gicv2m_allocate_domains(struct irq_domain *parent) { - struct irq_domain *inner_domain; + struct irq_domain_info info = { + .ops = &gicv2m_domain_ops, + .parent = parent, + }; struct v2m_data *v2m; v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry); if (!v2m) return 0; - inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode, - &gicv2m_domain_ops, v2m); - if (!inner_domain) { + info.host_data = v2m; + info.fwnode = v2m->fwnode; + + if (!msi_create_parent_irq_domain(&info, &gicv2m_msi_parent_ops)) { pr_err("Failed to create GICv2m domain\n"); return -ENOMEM; } - - irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); - inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - inner_domain->msi_parent_ops = &gicv2m_msi_parent_ops; return 0; } diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c index 8e87fc35f8aa..11549d85f23b 100644 --- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c @@ -152,7 +152,7 @@ static void __init its_fsl_mc_of_msi_init(void) if (!of_property_read_bool(np, "msi-controller")) continue; - its_fsl_mc_msi_init_one(of_node_to_fwnode(np), + its_fsl_mc_msi_init_one(of_fwnode_handle(np), np->full_name); } } diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c index c5a7eb1c0419..a5e110ffdd88 100644 --- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c +++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c @@ -8,7 +8,7 @@ #include <linux/pci.h> #include "irq-gic-common.h" -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ MSI_FLAG_USE_DEF_CHIP_OPS | \ @@ -68,17 +68,6 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev); /* - * @domain->msi_domain_info->hwsize contains the size of the - * MSI[-X] domain, but vector allocation happens one by one. This - * needs some thought when MSI comes into play as the size of MSI - * might be unknown at domain creation time and therefore set to - * MSI_MAX_INDEX. - */ - msi_info = msi_get_domain_info(domain); - if (msi_info->hwsize > nvec) - nvec = msi_info->hwsize; - - /* * Always allocate a power of 2, and special case device 0 for * broken systems where the DevID is not wired (and all devices * appear as DevID 0). For that reason, we generously allocate a @@ -118,6 +107,14 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, index++; } while (!ret); + if (ret) { + struct device_node *np = NULL; + + ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id); + if (np) + of_node_put(np); + } + return ret; } @@ -143,14 +140,6 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = dev_id; - /* - * @domain->msi_domain_info->hwsize contains the size of the device - * domain, but vector allocation happens one by one. - */ - msi_info = msi_get_domain_info(domain); - if (msi_info->hwsize > nvec) - nvec = msi_info->hwsize; - /* Allocate at least 32 MSIs, and always as a power of 2 */ nvec = max_t(int, 32, roundup_pow_of_two(nvec)); @@ -159,6 +148,14 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, dev, nvec, info); } +static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) +{ + struct msi_domain_info *msi_info; + + msi_info = msi_get_domain_info(domain->parent); + msi_info->ops->msi_teardown(domain->parent, info); +} + static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, struct irq_domain *real_parent, struct msi_domain_info *info) { @@ -182,6 +179,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, * %MSI_MAX_INDEX. */ info->ops->msi_prepare = its_pci_msi_prepare; + info->ops->msi_teardown = its_msi_teardown; break; case DOMAIN_BUS_DEVICE_MSI: case DOMAIN_BUS_WIRED_TO_MSI: @@ -190,6 +188,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, * size is also known at domain creation time. */ info->ops->msi_prepare = its_pmsi_prepare; + info->ops->msi_teardown = its_msi_teardown; break; default: /* Confused. How did the lib return true? */ diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 0115ad6c8259..d54fa0638dc4 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -41,7 +41,7 @@ #include <asm/exception.h> #include "irq-gic-common.h" -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) @@ -125,6 +125,8 @@ struct its_node { int vlpi_redist_offset; }; +static DEFINE_PER_CPU(struct its_node *, local_4_1_its); + #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) @@ -2778,6 +2780,7 @@ static u64 inherit_vpe_l1_table_from_its(void) } val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + *this_cpu_ptr(&local_4_1_its) = its; return val; } @@ -2815,6 +2818,7 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu); return val; } @@ -3620,8 +3624,33 @@ out: return err; } +static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) +{ + struct its_device *its_dev = info->scratchpad[0].ptr; + + guard(mutex)(&its_dev->its->dev_alloc_lock); + + /* If the device is shared, keep everything around */ + if (its_dev->shared) + return; + + /* LPIs should have been already unmapped at this stage */ + if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis))) + return; + + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt, and get rid of the tracking */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); +} + static struct msi_domain_ops its_msi_domain_ops = { .msi_prepare = its_msi_prepare, + .msi_teardown = its_msi_teardown, }; static int its_irq_gic_domain_alloc(struct irq_domain *domain, @@ -3722,7 +3751,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct its_device *its_dev = irq_data_get_irq_chip_data(d); - struct its_node *its = its_dev->its; int i; bitmap_release_region(its_dev->event_map.lpi_map, @@ -3736,26 +3764,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, irq_domain_reset_irq_data(data); } - mutex_lock(&its->dev_alloc_lock); - - /* - * If all interrupts have been freed, start mopping the - * floor. This is conditioned on the device not being shared. - */ - if (!its_dev->shared && - bitmap_empty(its_dev->event_map.lpi_map, - its_dev->event_map.nr_lpis)) { - its_lpi_free(its_dev->event_map.lpi_map, - its_dev->event_map.lpi_base, - its_dev->event_map.nr_lpis); - - /* Unmap device/itt */ - its_send_mapd(its_dev, 0); - its_free_device(its_dev); - } - - mutex_unlock(&its->dev_alloc_lock); - irq_domain_free_irqs_parent(domain, virq, nr_irqs); } @@ -4180,7 +4188,7 @@ static struct irq_chip its_vpe_irq_chip = { static struct its_node *find_4_1_its(void) { - static struct its_node *its = NULL; + struct its_node *its = *this_cpu_ptr(&local_4_1_its); if (!its) { list_for_each_entry(its, &its_nodes, entry) { @@ -5118,7 +5126,12 @@ out_unmap: static int its_init_domain(struct its_node *its) { - struct irq_domain *inner_domain; + struct irq_domain_info dom_info = { + .fwnode = its->fwnode_handle, + .ops = &its_domain_ops, + .domain_flags = its->msi_domain_flags, + .parent = its_parent, + }; struct msi_domain_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); @@ -5127,21 +5140,12 @@ static int its_init_domain(struct its_node *its) info->ops = &its_msi_domain_ops; info->data = its; + dom_info.host_data = info; - inner_domain = irq_domain_create_hierarchy(its_parent, - its->msi_domain_flags, 0, - its->fwnode_handle, &its_domain_ops, - info); - if (!inner_domain) { + if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) { kfree(info); return -ENOMEM; } - - irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); - - inner_domain->msi_parent_ops = &gic_v3_its_msi_parent_ops; - inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - return 0; } @@ -5518,7 +5522,7 @@ static struct its_node __init *its_node_init(struct resource *res, its->base = its_base; its->phys_base = res->start; its->get_msi_base = its_irq_get_msi_base; - its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE; its->numa_node = numa_node; its->fwnode_handle = handle; diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index 647b18e24e0c..aa11bbe8026a 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -18,7 +18,7 @@ #include <linux/irqchip/arm-gic-v3.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> struct mbi_range { u32 spi_start; @@ -206,17 +206,13 @@ static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = { static int mbi_allocate_domain(struct irq_domain *parent) { - struct irq_domain *nexus_domain; + struct irq_domain_info info = { + .fwnode = parent->fwnode, + .ops = &mbi_domain_ops, + .parent = parent, + }; - nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode, - &mbi_domain_ops, NULL); - if (!nexus_domain) - return -ENOMEM; - - irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS); - nexus_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - nexus_domain->msi_parent_ops = &gic_v3_mbi_msi_parent_ops; - return 0; + return msi_create_parent_irq_domain(&info, &gic_v3_mbi_msi_parent_ops) ? 0 : -ENOMEM; } int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 270d7a4d85a6..efc791c43d44 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1826,7 +1826,7 @@ static int partition_domain_translate(struct irq_domain *d, ppi_idx = __gic_get_ppi_index(ppi_intid); ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], - of_node_to_fwnode(np)); + of_fwnode_handle(np)); if (ret < 0) return ret; @@ -2192,7 +2192,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) part = &parts[part_idx]; - part->partition_id = of_node_to_fwnode(child_part); + part->partition_id = of_fwnode_handle(child_part); pr_info("GIC: PPI partition %pOFn[%d] { ", child_part, part_idx); diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c index 513f6edbbe95..a8b23b507ecd 100644 --- a/drivers/irqchip/irq-goldfish-pic.c +++ b/drivers/irqchip/irq-goldfish-pic.c @@ -101,10 +101,9 @@ static int __init goldfish_pic_of_init(struct device_node *of_node, irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0, IRQ_NOPROBE | IRQ_LEVEL, 0); - gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS, - GFPIC_IRQ_BASE, 0, - &goldfish_irq_domain_ops, - NULL); + gfpic->irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), GFPIC_NR_IRQS, + GFPIC_IRQ_BASE, 0, &goldfish_irq_domain_ops, + NULL); if (!gfpic->irq_domain) { pr_err("Failed to add irqdomain!\n"); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 31c3f70a5d5e..b7958c5a1221 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -386,10 +386,8 @@ hip04_of_init(struct device_node *node, struct device_node *parent) return -EINVAL; } - hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base, - 0, - &hip04_irq_domain_ops, - &hip04_data); + hip04_data.domain = irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0, + &hip04_irq_domain_ops, &hip04_data); if (WARN_ON(!hip04_data.domain)) return -EINVAL; diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index 115bdcffab24..91b2f587119c 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c @@ -313,8 +313,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) init_8259A(0); - domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0, - &i8259A_ops, NULL); + domain = irq_domain_create_legacy(of_fwnode_handle(node), 16, I8259A_IRQ_BASE, 0, + &i8259A_ops, NULL); if (!domain) panic("Failed to add i8259 IRQ domain"); diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c index 0732a0e9af62..f8324fb1fe8f 100644 --- a/drivers/irqchip/irq-idt3243x.c +++ b/drivers/irqchip/irq-idt3243x.c @@ -72,7 +72,7 @@ static int idt_pic_init(struct device_node *of_node, struct device_node *parent) goto out_unmap_irq; } - domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS, + domain = irq_domain_create_linear(of_fwnode_handle(of_node), IDT_PIC_NR_IRQS, &irq_generic_chip_ops, NULL); if (!domain) { pr_err("Failed to add irqdomain!\n"); diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c index 85f80bac0961..f0410d5d7315 100644 --- a/drivers/irqchip/irq-imgpdc.c +++ b/drivers/irqchip/irq-imgpdc.c @@ -372,7 +372,7 @@ static int pdc_intc_probe(struct platform_device *pdev) priv->syswake_irq = irq; /* Set up an IRQ domain */ - priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops, + priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 16, &irq_generic_chip_ops, priv); if (unlikely(!priv->domain)) { dev_err(&pdev->dev, "cannot add IRQ domain\n"); diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 095ae8e3217e..b91f5c14b405 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -240,8 +240,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, return -ENOMEM; } - domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, - node, &gpcv2_irqchip_data_domain_ops, cd); + domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS, + of_fwnode_handle(node), &gpcv2_irqchip_data_domain_ops, cd); if (!domain) { iounmap(cd->gpc_base); kfree(cd); diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c index 787543d07565..5f9b204d350b 100644 --- a/drivers/irqchip/irq-imx-intmux.c +++ b/drivers/irqchip/irq-imx-intmux.c @@ -254,7 +254,7 @@ static int imx_intmux_probe(struct platform_device *pdev) goto out; } - domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops, + domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &imx_intmux_domain_ops, &data->irqchip_data[i]); if (!domain) { ret = -ENOMEM; diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index afbfcce3b1e3..6dc9ac48fee5 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c @@ -212,7 +212,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev) /* steer all IRQs into configured channel */ writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); - data->domain = irq_domain_add_linear(np, data->reg_num * 32, + data->domain = irq_domain_create_linear(of_fwnode_handle(np), data->reg_num * 32, &imx_irqsteer_domain_ops, data); if (!data->domain) { dev_err(&pdev->dev, "failed to create IRQ domain\n"); diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c index 69aacdfc8bef..137da1927d14 100644 --- a/drivers/irqchip/irq-imx-mu-msi.c +++ b/drivers/irqchip/irq-imx-mu-msi.c @@ -24,7 +24,7 @@ #include <linux/pm_domain.h> #include <linux/spinlock.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #define IMX_MU_CHANS 4 diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c index 3363f83bd7e9..794ecba717c9 100644 --- a/drivers/irqchip/irq-ingenic-tcu.c +++ b/drivers/irqchip/irq-ingenic-tcu.c @@ -52,11 +52,10 @@ static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d) struct regmap *map = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); regmap_write(map, ct->regs.ack, mask); regmap_write(map, ct->regs.enable, mask); *ct->mask_cache |= mask; - irq_gc_unlock(gc); } static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) @@ -66,10 +65,9 @@ static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d) struct regmap *map = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); regmap_write(map, ct->regs.disable, mask); *ct->mask_cache &= ~mask; - irq_gc_unlock(gc); } static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) @@ -79,10 +77,9 @@ static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d) struct regmap *map = gc->private; u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); regmap_write(map, ct->regs.ack, mask); regmap_write(map, ct->regs.disable, mask); - irq_gc_unlock(gc); } static int __init ingenic_tcu_irq_init(struct device_node *np, @@ -114,8 +111,8 @@ static int __init ingenic_tcu_irq_init(struct device_node *np, tcu->nb_parent_irqs = irqs; - tcu->domain = irq_domain_add_linear(np, 32, &irq_generic_chip_ops, - NULL); + tcu->domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &irq_generic_chip_ops, + NULL); if (!tcu->domain) { ret = -ENOMEM; goto err_free_tcu; diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index cee839ca627e..52393724f213 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -90,8 +90,8 @@ static int __init ingenic_intc_of_init(struct device_node *node, goto out_unmap_irq; } - domain = irq_domain_add_linear(node, num_chips * 32, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), num_chips * 32, + &irq_generic_chip_ops, NULL); if (!domain) { err = -ENOMEM; goto out_unmap_base; diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c index f23b02f62a5c..a9a5a52b818a 100644 --- a/drivers/irqchip/irq-ixp4xx.c +++ b/drivers/irqchip/irq-ixp4xx.c @@ -261,7 +261,7 @@ static int __init ixp4xx_of_init_irq(struct device_node *np, pr_crit("IXP4XX: could not ioremap interrupt controller\n"); return -ENODEV; } - fwnode = of_node_to_fwnode(np); + fwnode = of_fwnode_handle(np); /* These chip variants have 64 interrupts */ is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") || diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index 1f613eb7b7f0..94c05cf974be 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -107,9 +107,8 @@ static int __init aic_irq_of_init(struct device_node *node, if (ret < 0) return ret; - domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq, - &jcore_aic_irqdomain_ops, - &jcore_aic); + domain = irq_domain_create_legacy(of_fwnode_handle(node), dom_sz - min_irq, min_irq, + min_irq, &jcore_aic_irqdomain_ops, &jcore_aic); if (!domain) return -ENOMEM; diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index 37e1a03fcbb4..c9e902b7bf48 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -157,8 +157,8 @@ static int keystone_irq_probe(struct platform_device *pdev) kirq->chip.irq_mask = keystone_irq_setmask; kirq->chip.irq_unmask = keystone_irq_unmask; - kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ, - &keystone_irq_ops, kirq); + kirq->irqd = irq_domain_create_linear(of_fwnode_handle(np), KEYSTONE_N_IRQ, + &keystone_irq_ops, kirq); if (!kirq->irqd) { dev_err(dev, "IRQ domain registration failed\n"); return -ENODEV; diff --git a/drivers/irqchip/irq-lan966x-oic.c b/drivers/irqchip/irq-lan966x-oic.c index 41ac880e3b87..11d3a0ffa261 100644 --- a/drivers/irqchip/irq-lan966x-oic.c +++ b/drivers/irqchip/irq-lan966x-oic.c @@ -71,14 +71,12 @@ static unsigned int lan966x_oic_irq_startup(struct irq_data *data) struct lan966x_oic_chip_regs *chip_regs = gc->private; u32 map; - irq_gc_lock(gc); - - /* Map the source interrupt to the destination */ - map = irq_reg_readl(gc, chip_regs->reg_off_map); - map |= data->mask; - irq_reg_writel(gc, map, chip_regs->reg_off_map); - - irq_gc_unlock(gc); + scoped_guard (raw_spinlock, &gc->lock) { + /* Map the source interrupt to the destination */ + map = irq_reg_readl(gc, chip_regs->reg_off_map); + map |= data->mask; + irq_reg_writel(gc, map, chip_regs->reg_off_map); + } ct->chip.irq_ack(data); ct->chip.irq_unmask(data); @@ -95,14 +93,12 @@ static void lan966x_oic_irq_shutdown(struct irq_data *data) ct->chip.irq_mask(data); - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); /* Unmap the interrupt */ map = irq_reg_readl(gc, chip_regs->reg_off_map); map &= ~data->mask; irq_reg_writel(gc, map, chip_regs->reg_off_map); - - irq_gc_unlock(gc); } static int lan966x_oic_irq_set_type(struct irq_data *data, @@ -224,7 +220,7 @@ static int lan966x_oic_probe(struct platform_device *pdev) .exit = lan966x_oic_chip_exit, }; struct irq_domain_info d_info = { - .fwnode = of_node_to_fwnode(pdev->dev.of_node), + .fwnode = of_fwnode_handle(pdev->dev.of_node), .domain_flags = IRQ_DOMAIN_FLAG_DESTROY_GC, .size = LAN966X_OIC_NR_IRQ, .hwirq_max = LAN966X_OIC_NR_IRQ, diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c index 80e55955a29f..bf52dc8345f5 100644 --- a/drivers/irqchip/irq-loongarch-avec.c +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -18,7 +18,7 @@ #include <asm/loongarch.h> #include <asm/setup.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #include "irq-loongson.h" #define VECTORS_PER_REG 64 diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index e62dab4c97fc..950bc087e388 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -100,7 +100,7 @@ static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = { static int __init cpuintc_of_init(struct device_node *of_node, struct device_node *parent) { - cpuintc_handle = of_node_to_fwnode(of_node); + cpuintc_handle = of_fwnode_handle(of_node); irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM, &loongarch_cpu_intc_irq_domain_ops, NULL); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index bb79e19dfb59..b2860eb2d32c 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -554,7 +554,7 @@ static int __init eiointc_of_init(struct device_node *of_node, priv->vec_count = VEC_COUNT; priv->node = 0; - priv->domain_handle = of_node_to_fwnode(of_node); + priv->domain_handle = of_fwnode_handle(of_node); ret = eiointc_init(priv, parent_irq, 0); if (ret < 0) diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c index 5da02c7ad0b3..d8558eb35044 100644 --- a/drivers/irqchip/irq-loongson-htvec.c +++ b/drivers/irqchip/irq-loongson-htvec.c @@ -248,7 +248,7 @@ static int htvec_of_init(struct device_node *node, } err = htvec_init(res.start, resource_size(&res), - num_parents, parent_irq, of_node_to_fwnode(node)); + num_parents, parent_irq, of_fwnode_handle(node)); if (err < 0) return err; diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 2b1bd4a96665..0033c2188abc 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -116,9 +116,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); u32 mask = data->mask; - unsigned long flags; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock)(&gc->lock); switch (type) { case IRQ_TYPE_LEVEL_HIGH: liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false); @@ -137,10 +136,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type) liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true); break; default: - irq_gc_unlock_irqrestore(gc, flags); return -EINVAL; } - irq_gc_unlock_irqrestore(gc, flags); irqd_set_trigger_type(data, type); return 0; @@ -157,10 +154,9 @@ static void liointc_suspend(struct irq_chip_generic *gc) static void liointc_resume(struct irq_chip_generic *gc) { struct liointc_priv *priv = gc->private; - unsigned long flags; int i; - irq_gc_lock_irqsave(gc, flags); + guard(raw_spinlock_irqsave)(&gc->lock); /* Disable all at first */ writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE); /* Restore map cache */ @@ -170,7 +166,6 @@ static void liointc_resume(struct irq_chip_generic *gc) writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE); /* Restore mask cache */ writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE); - irq_gc_unlock_irqrestore(gc, flags); } static int parent_irq[LIOINTC_NUM_PARENT]; @@ -363,7 +358,7 @@ static int __init liointc_of_init(struct device_node *node, } err = liointc_init(res.start, resource_size(&res), - revision, of_node_to_fwnode(node), node); + revision, of_fwnode_handle(node), node); if (err < 0) return err; diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index 9c62108b3ad5..a0257c7bef10 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -15,7 +15,7 @@ #include <linux/pci.h> #include <linux/slab.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #include "irq-loongson.h" static int nr_pics; @@ -243,7 +243,7 @@ static int pch_msi_of_init(struct device_node *node, struct device_node *parent) return -EINVAL; } - err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node)); + err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_fwnode_handle(node)); if (err < 0) return err; diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 69efda35a8e7..62e6bf3a0611 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -392,7 +392,7 @@ static int pch_pic_of_init(struct device_node *node, } err = pch_pic_init(res.start, resource_size(&res), vec_base, - parent_domain, of_node_to_fwnode(node), 0); + parent_domain, of_fwnode_handle(node), 0); if (err < 0) return err; diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c index 4d70a857133f..14cca44baa14 100644 --- a/drivers/irqchip/irq-lpc32xx.c +++ b/drivers/irqchip/irq-lpc32xx.c @@ -210,8 +210,8 @@ static int __init lpc32xx_of_ic_init(struct device_node *node, return -EINVAL; } - irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS, - &lpc32xx_irq_domain_ops, irqc); + irqc->domain = irq_domain_create_linear(of_fwnode_handle(node), NR_LPC32XX_IC_IRQS, + &lpc32xx_irq_domain_ops, irqc); if (!irqc->domain) { pr_err("unable to add irq domain\n"); iounmap(irqc->base); diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c index 139f26b0a6ef..50a7b38381b9 100644 --- a/drivers/irqchip/irq-ls-extirq.c +++ b/drivers/irqchip/irq-ls-extirq.c @@ -208,8 +208,8 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent) of_device_is_compatible(node, "fsl,ls1043a-extirq"); raw_spin_lock_init(&priv->lock); - domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node, - &extirq_domain_ops, priv); + domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node), + &extirq_domain_ops, priv); if (!domain) { ret = -ENOMEM; goto err_add_hierarchy; diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index 3cb80796cc7c..84bc5e4b47cf 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c @@ -215,17 +215,17 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc) static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data) { /* Initialize MSI domain parent */ - msi_data->parent = irq_domain_add_linear(NULL, - msi_data->irqs_num, - &ls_scfg_msi_domain_ops, - msi_data); + msi_data->parent = irq_domain_create_linear(NULL, + msi_data->irqs_num, + &ls_scfg_msi_domain_ops, + msi_data); if (!msi_data->parent) { dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n"); return -ENOMEM; } msi_data->msi_domain = pci_msi_create_irq_domain( - of_node_to_fwnode(msi_data->pdev->dev.of_node), + of_fwnode_handle(msi_data->pdev->dev.of_node), &ls_scfg_msi_domain_info, msi_data->parent); if (!msi_data->msi_domain) { diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c index 77a3f7dfaaf0..589d32007fca 100644 --- a/drivers/irqchip/irq-ls1x.c +++ b/drivers/irqchip/irq-ls1x.c @@ -126,8 +126,8 @@ static int __init ls1x_intc_of_init(struct device_node *node, } /* Set up an IRQ domain */ - priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, - NULL); + priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &irq_generic_chip_ops, + NULL); if (!priv->domain) { pr_err("ls1x-irq: cannot add IRQ domain\n"); err = -ENOMEM; diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c index 5dcd94c000a2..516a3a0e359c 100644 --- a/drivers/irqchip/irq-mchp-eic.c +++ b/drivers/irqchip/irq-mchp-eic.c @@ -248,8 +248,9 @@ static int mchp_eic_init(struct device_node *node, struct device_node *parent) eic->irqs[i] = irq.args[1]; } - eic->domain = irq_domain_add_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ, - node, &mchp_eic_domain_ops, eic); + eic->domain = irq_domain_create_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ, + of_fwnode_handle(node), &mchp_eic_domain_ops, + eic); if (!eic->domain) { pr_err("%pOF: Failed to add domain\n", node); ret = -ENODEV; diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 0a25536a5d07..7d177626d64b 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -607,7 +607,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node * domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->params->nr_hwirq, - of_node_to_fwnode(node), + of_fwnode_handle(node), &meson_gpio_irq_domain_ops, ctl); if (!domain) { diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 0c7ae71a0af0..ac784ef3ed4b 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -238,11 +238,9 @@ static void mips_cpu_register_ipi_domain(struct device_node *of_node) struct cpu_ipi_domain_state *ipi_domain_state; ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL); - ipi_domain = irq_domain_add_hierarchy(irq_domain, - IRQ_DOMAIN_FLAG_IPI_SINGLE, - 2, of_node, - &mips_cpu_ipi_chip_ops, - ipi_domain_state); + ipi_domain = irq_domain_create_hierarchy(irq_domain, IRQ_DOMAIN_FLAG_IPI_SINGLE, 2, + of_fwnode_handle(of_node), + &mips_cpu_ipi_chip_ops, ipi_domain_state); if (!ipi_domain) panic("Failed to add MIPS CPU IPI domain"); irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); @@ -260,9 +258,8 @@ static void __init __mips_cpu_irq_init(struct device_node *of_node) clear_c0_status(ST0_IM); clear_c0_cause(CAUSEF_IP); - irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0, - &mips_cpu_intc_irq_domain_ops, - NULL); + irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), 8, MIPS_CPU_IRQ_BASE, 0, + &mips_cpu_intc_irq_domain_ops, NULL); if (!irq_domain) panic("Failed to add irqdomain for MIPS CPU"); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index bca8053864b2..34e8d09c12a0 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -841,10 +841,10 @@ static int gic_register_ipi_domain(struct device_node *node) struct irq_domain *gic_ipi_domain; unsigned int v[2], num_ipis; - gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, - IRQ_DOMAIN_FLAG_IPI_PER_CPU, - GIC_NUM_LOCAL_INTRS + gic_shared_intrs, - node, &gic_ipi_domain_ops, NULL); + gic_ipi_domain = irq_domain_create_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + of_fwnode_handle(node), &gic_ipi_domain_ops, + NULL); if (!gic_ipi_domain) { pr_err("Failed to add IPI domain"); return -ENXIO; @@ -963,9 +963,10 @@ static int __init gic_of_init(struct device_node *node, gic_irq_dispatch); } - gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + - gic_shared_intrs, 0, - &gic_irq_domain_ops, NULL); + gic_irq_domain = irq_domain_create_simple(of_fwnode_handle(node), + GIC_NUM_LOCAL_INTRS + + gic_shared_intrs, 0, + &gic_irq_domain_ops, NULL); if (!gic_irq_domain) { pr_err("Failed to add IRQ domain"); return -ENXIO; diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 25cf4f80e767..09e640430208 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c @@ -261,9 +261,9 @@ static int __init mmp_init_bases(struct device_node *node) } icu_data[0].virq_base = 0; - icu_data[0].domain = irq_domain_add_linear(node, nr_irqs, - &mmp_irq_domain_ops, - &icu_data[0]); + icu_data[0].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, + &mmp_irq_domain_ops, + &icu_data[0]); for (irq = 0; irq < nr_irqs; irq++) { ret = irq_create_mapping(icu_data[0].domain, irq); if (!ret) { @@ -391,9 +391,9 @@ static int __init mmp2_mux_of_init(struct device_node *node, return -EINVAL; icu_data[i].virq_base = 0; - icu_data[i].domain = irq_domain_add_linear(node, nr_irqs, - &mmp_irq_domain_ops, - &icu_data[i]); + icu_data[i].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, + &mmp_irq_domain_ops, + &icu_data[i]); for (irq = 0; irq < nr_irqs; irq++) { ret = irq_create_mapping(icu_data[i].domain, irq); if (!ret) { diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c index 3dc745b14caf..8cbc191f750b 100644 --- a/drivers/irqchip/irq-mscc-ocelot.c +++ b/drivers/irqchip/irq-mscc-ocelot.c @@ -83,7 +83,7 @@ static void ocelot_irq_unmask(struct irq_data *data) unsigned int mask = data->mask; u32 val; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); /* * Clear sticky bits for edge mode interrupts. * Serval has only one trigger register replication, but the adjacent @@ -97,7 +97,6 @@ static void ocelot_irq_unmask(struct irq_data *data) *ct->mask_cache &= ~mask; irq_reg_writel(gc, mask, p->reg_off_ena_set); - irq_gc_unlock(gc); } static void ocelot_irq_handler(struct irq_desc *desc) @@ -132,8 +131,8 @@ static int __init vcoreiii_irq_init(struct device_node *node, if (!parent_irq) return -EINVAL; - domain = irq_domain_add_linear(node, p->n_irq, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), p->n_irq, + &irq_generic_chip_ops, NULL); if (!domain) { pr_err("%pOFn: unable to add irq domain\n", node); return -ENOMEM; diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c index 51464c6257f3..246c30205af4 100644 --- a/drivers/irqchip/irq-msi-lib.c +++ b/drivers/irqchip/irq-msi-lib.c @@ -4,7 +4,7 @@ #include <linux/export.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> /** * msi_lib_init_dev_msi_info - Domain info setup for MSI domains @@ -105,8 +105,13 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain, * MSI message into the hardware which is the whole purpose of the * device MSI domain aside of mask/unmask which is provided e.g. by * PCI/MSI device domains. + * + * The exception to the rule is when the underlying domain + * tells you that affinity is not a thing -- for example when + * everything is muxed behind a single interrupt. */ - chip->irq_set_affinity = msi_domain_set_affinity; + if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY)) + chip->irq_set_affinity = msi_domain_set_affinity; return true; } EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info); diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c index f6133ae28155..9643cc3a77d7 100644 --- a/drivers/irqchip/irq-mst-intc.c +++ b/drivers/irqchip/irq-mst-intc.c @@ -273,8 +273,8 @@ static int __init mst_intc_of_init(struct device_node *dn, raw_spin_lock_init(&cd->lock); cd->irq_start = irq_start; cd->nr_irqs = irq_end - irq_start + 1; - domain = irq_domain_add_hierarchy(domain_parent, 0, cd->nr_irqs, dn, - &mst_intc_domain_ops, cd); + domain = irq_domain_create_hierarchy(domain_parent, 0, cd->nr_irqs, of_fwnode_handle(dn), + &mst_intc_domain_ops, cd); if (!domain) { iounmap(cd->base); kfree(cd); diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c index 76bc0283e3b9..de481ba340f8 100644 --- a/drivers/irqchip/irq-mtk-cirq.c +++ b/drivers/irqchip/irq-mtk-cirq.c @@ -336,9 +336,8 @@ static int __init mtk_cirq_of_init(struct device_node *node, cirq_data->offsets = match->data; irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1; - domain = irq_domain_add_hierarchy(domain_parent, 0, - irq_num, node, - &cirq_domain_ops, cirq_data); + domain = irq_domain_create_hierarchy(domain_parent, 0, irq_num, of_fwnode_handle(node), + &cirq_domain_ops, cirq_data); if (!domain) { ret = -ENOMEM; goto out_unmap; diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 586e52d5442b..6895e7096b27 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -207,8 +207,8 @@ static int __init mtk_sysirq_of_init(struct device_node *node, chip_data->which_word[i] = word; } - domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node, - &sysirq_domain_ops, chip_data); + domain = irq_domain_create_hierarchy(domain_parent, 0, intpol_num, of_fwnode_handle(node), + &sysirq_domain_ops, chip_data); if (!domain) { ret = -ENOMEM; goto out_free_which_word; diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c index 60b976286636..d3232d6d8dce 100644 --- a/drivers/irqchip/irq-mvebu-gicp.c +++ b/drivers/irqchip/irq-mvebu-gicp.c @@ -17,7 +17,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #include <dt-bindings/interrupt-controller/arm-gic.h> @@ -170,9 +170,12 @@ static const struct msi_parent_ops gicp_msi_parent_ops = { static int mvebu_gicp_probe(struct platform_device *pdev) { - struct irq_domain *inner_domain, *parent_domain; struct device_node *node = pdev->dev.of_node; struct device_node *irq_parent_dn; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &gicp_domain_ops, + }; struct mvebu_gicp *gicp; int ret, i; @@ -217,30 +220,23 @@ static int mvebu_gicp_probe(struct platform_device *pdev) if (!gicp->spi_bitmap) return -ENOMEM; + info.size = gicp->spi_cnt; + info.host_data = gicp; + irq_parent_dn = of_irq_find_parent(node); if (!irq_parent_dn) { dev_err(&pdev->dev, "failed to find parent IRQ node\n"); return -ENODEV; } - parent_domain = irq_find_host(irq_parent_dn); + info.parent = irq_find_host(irq_parent_dn); of_node_put(irq_parent_dn); - if (!parent_domain) { + if (!info.parent) { dev_err(&pdev->dev, "failed to find parent IRQ domain\n"); return -ENODEV; } - inner_domain = irq_domain_create_hierarchy(parent_domain, 0, - gicp->spi_cnt, - of_node_to_fwnode(node), - &gicp_domain_ops, gicp); - if (!inner_domain) - return -ENOMEM; - - irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI); - inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - inner_domain->msi_parent_ops = &gicp_msi_parent_ops; - return 0; + return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM; } static const struct of_device_id mvebu_gicp_of_match[] = { diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c index 4eebed39880a..db5dbc6e88b0 100644 --- a/drivers/irqchip/irq-mvebu-icu.c +++ b/drivers/irqchip/irq-mvebu-icu.c @@ -20,7 +20,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #include <dt-bindings/interrupt-controller/mvebu-icu.h> diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c index 54f6f0811573..e5b2bde3d933 100644 --- a/drivers/irqchip/irq-mvebu-odmi.c +++ b/drivers/irqchip/irq-mvebu-odmi.c @@ -18,7 +18,7 @@ #include <linux/of_address.h> #include <linux/slab.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #include <dt-bindings/interrupt-controller/arm-gic.h> @@ -167,7 +167,12 @@ static const struct msi_parent_ops odmi_msi_parent_ops = { static int __init mvebu_odmi_init(struct device_node *node, struct device_node *parent) { - struct irq_domain *parent_domain, *inner_domain; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &odmi_domain_ops, + .size = odmis_count * NODMIS_PER_FRAME, + .parent = irq_find_host(parent), + }; int ret, i; if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count)) @@ -203,22 +208,10 @@ static int __init mvebu_odmi_init(struct device_node *node, } } - parent_domain = irq_find_host(parent); + if (msi_create_parent_irq_domain(&info, &odmi_msi_parent_ops)) + return 0; - inner_domain = irq_domain_create_hierarchy(parent_domain, 0, - odmis_count * NODMIS_PER_FRAME, - of_node_to_fwnode(node), - &odmi_domain_ops, NULL); - if (!inner_domain) { - ret = -ENOMEM; - goto err_unmap; - } - - irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI); - inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - inner_domain->msi_parent_ops = &odmi_msi_parent_ops; - - return 0; + ret = -ENOMEM; err_unmap: for (i = 0; i < odmis_count; i++) { diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c index 3888b7585981..8db638aa21d2 100644 --- a/drivers/irqchip/irq-mvebu-pic.c +++ b/drivers/irqchip/irq-mvebu-pic.c @@ -150,8 +150,8 @@ static int mvebu_pic_probe(struct platform_device *pdev) return -EINVAL; } - pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS, - &mvebu_pic_domain_ops, pic); + pic->domain = irq_domain_create_linear(of_fwnode_handle(node), PIC_MAX_IRQS, + &mvebu_pic_domain_ops, pic); if (!pic->domain) { dev_err(&pdev->dev, "Failed to allocate irq domain\n"); return -ENOMEM; diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c index ebd4a9014e8d..5822ea864765 100644 --- a/drivers/irqchip/irq-mvebu-sei.c +++ b/drivers/irqchip/irq-mvebu-sei.c @@ -14,7 +14,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> /* Cause register */ #define GICP_SECR(idx) (0x0 + ((idx) * 0x4)) @@ -366,6 +366,10 @@ static const struct msi_parent_ops sei_msi_parent_ops = { static int mvebu_sei_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &mvebu_sei_cp_domain_ops, + }; struct mvebu_sei *sei; u32 parent_irq; int ret; @@ -402,7 +406,7 @@ static int mvebu_sei_probe(struct platform_device *pdev) } /* Create the root SEI domain */ - sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node), + sei->sei_domain = irq_domain_create_linear(of_fwnode_handle(node), (sei->caps->ap_range.size + sei->caps->cp_range.size), &mvebu_sei_domain_ops, @@ -418,7 +422,7 @@ static int mvebu_sei_probe(struct platform_device *pdev) /* Create the 'wired' domain */ sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0, sei->caps->ap_range.size, - of_node_to_fwnode(node), + of_fwnode_handle(node), &mvebu_sei_ap_domain_ops, sei); if (!sei->ap_domain) { @@ -430,21 +434,17 @@ static int mvebu_sei_probe(struct platform_device *pdev) irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED); /* Create the 'MSI' domain */ - sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0, - sei->caps->cp_range.size, - of_node_to_fwnode(node), - &mvebu_sei_cp_domain_ops, - sei); + info.size = sei->caps->cp_range.size; + info.host_data = sei; + info.parent = sei->sei_domain; + + sei->cp_domain = msi_create_parent_irq_domain(&info, &sei_msi_parent_ops); if (!sei->cp_domain) { pr_err("Failed to create CPs IRQ domain\n"); ret = -ENOMEM; goto remove_ap_domain; } - irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI); - sei->cp_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - sei->cp_domain->msi_parent_ops = &sei_msi_parent_ops; - mvebu_sei_reset(sei); irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index d67b5da38982..0bb423dd5280 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -162,8 +162,8 @@ static const struct irq_domain_ops icoll_irq_domain_ops = { static void __init icoll_add_domain(struct device_node *np, int num) { - icoll_domain = irq_domain_add_linear(np, num, - &icoll_irq_domain_ops, NULL); + icoll_domain = irq_domain_create_linear(of_fwnode_handle(np), num, + &icoll_irq_domain_ops, NULL); if (!icoll_domain) panic("%pOF: unable to create irq domain", np); diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index ba6332b00a0a..76e11cac9631 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -90,7 +90,7 @@ static int __init nvic_of_init(struct device_node *node, irqs = NVIC_MAX_IRQ; nvic_irq_domain = - irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL); + irq_domain_create_linear(of_fwnode_handle(node), irqs, &nvic_irq_domain_ops, NULL); if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index ad84a2f03368..16f00db570e7 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c @@ -248,7 +248,7 @@ static int __init omap_init_irq_of(struct device_node *node) if (WARN_ON(!omap_irq_base)) return -ENOMEM; - domain = irq_domain_add_linear(node, omap_nr_irqs, + domain = irq_domain_create_linear(of_fwnode_handle(node), omap_nr_irqs, &irq_generic_chip_ops, NULL); omap_irq_soft_reset(); @@ -274,7 +274,7 @@ static int __init omap_init_irq_legacy(u32 base, struct device_node *node) irq_base = 0; } - domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, + domain = irq_domain_create_legacy(of_fwnode_handle(node), omap_nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL); omap_irq_soft_reset(); diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c index f289ccd95291..48126067c54b 100644 --- a/drivers/irqchip/irq-or1k-pic.c +++ b/drivers/irqchip/irq-or1k-pic.c @@ -144,8 +144,8 @@ static int __init or1k_pic_init(struct device_node *node, /* Disable all interrupts until explicitly requested */ mtspr(SPR_PICMR, (0UL)); - root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops, - pic); + root_domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &or1k_irq_domain_ops, + pic); set_handle_irq(or1k_pic_handle_irq); diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c index 4e4e874e09a8..dddbc05917c0 100644 --- a/drivers/irqchip/irq-orion.c +++ b/drivers/irqchip/irq-orion.c @@ -59,7 +59,7 @@ static int __init orion_irq_init(struct device_node *np, /* count number of irq chips by valid reg addresses */ num_chips = of_address_count(np); - orion_irq_domain = irq_domain_add_linear(np, + orion_irq_domain = irq_domain_create_linear(of_fwnode_handle(np), num_chips * ORION_IRQS_PER_CHIP, &irq_generic_chip_ops, NULL); if (!orion_irq_domain) @@ -146,8 +146,8 @@ static int __init orion_bridge_irq_init(struct device_node *np, /* get optional number of interrupts provided */ of_property_read_u32(np, "marvell,#interrupts", &nrirqs); - domain = irq_domain_add_linear(np, nrirqs, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs, + &irq_generic_chip_ops, NULL); if (!domain) { pr_err("%pOFn: unable to add irq domain\n", np); return -ENOMEM; diff --git a/drivers/irqchip/irq-owl-sirq.c b/drivers/irqchip/irq-owl-sirq.c index 6e4127465094..3d93d21f6732 100644 --- a/drivers/irqchip/irq-owl-sirq.c +++ b/drivers/irqchip/irq-owl-sirq.c @@ -323,8 +323,8 @@ static int __init owl_sirq_init(const struct owl_sirq_params *params, owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i); } - domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_SIRQ, node, - &owl_sirq_domain_ops, chip_data); + domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_SIRQ, of_fwnode_handle(node), + &owl_sirq_domain_ops, chip_data); if (!domain) { pr_err("%pOF: failed to add domain\n", node); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c index b546b1036e12..5dfda8e8df10 100644 --- a/drivers/irqchip/irq-pic32-evic.c +++ b/drivers/irqchip/irq-pic32-evic.c @@ -227,9 +227,9 @@ static int __init pic32_of_init(struct device_node *node, goto err_iounmap; } - evic_irq_domain = irq_domain_add_linear(node, nchips * 32, - &pic32_irq_domain_ops, - priv); + evic_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32, + &pic32_irq_domain_ops, + priv); if (!evic_irq_domain) { ret = -ENOMEM; goto err_free_priv; diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c index bee01980b463..87a5813fd835 100644 --- a/drivers/irqchip/irq-pruss-intc.c +++ b/drivers/irqchip/irq-pruss-intc.c @@ -555,8 +555,8 @@ static int pruss_intc_probe(struct platform_device *pdev) mutex_init(&intc->lock); - intc->domain = irq_domain_add_linear(dev->of_node, max_system_events, - &pruss_intc_irq_domain_ops, intc); + intc->domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), max_system_events, + &pruss_intc_irq_domain_ops, intc); if (!intc->domain) return -ENOMEM; @@ -581,8 +581,7 @@ static int pruss_intc_probe(struct platform_device *pdev) host_data->intc = intc; host_data->host_irq = i; - irq_set_handler_data(irq, host_data); - irq_set_chained_handler(irq, pruss_intc_irq_handler); + irq_set_chained_handler_and_data(irq, pruss_intc_irq_handler, host_data); } return 0; diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index f772deb9cba5..8d569f7c5a7a 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -450,7 +450,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent) priv->domain = irq_domain_create_hierarchy(parent_domain, IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt, - of_node_to_fwnode(np), &qcom_mpm_ops, priv); + of_fwnode_handle(np), &qcom_mpm_ops, priv); if (!priv->domain) { dev_err(dev, "failed to create MPM domain\n"); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c index 2a349082af81..942c1f8c363d 100644 --- a/drivers/irqchip/irq-realtek-rtl.c +++ b/drivers/irqchip/irq-realtek-rtl.c @@ -162,7 +162,7 @@ static int __init realtek_rtl_of_init(struct device_node *node, struct device_no else if (!parent_irq) return -ENODEV; - domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL); if (!domain) return -ENOMEM; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 954419f2460d..0959ed43b1a9 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -513,8 +513,10 @@ static int intc_irqpin_probe(struct platform_device *pdev) irq_chip->irq_set_wake = intc_irqpin_irq_set_wake; irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; - p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0, - &intc_irqpin_irq_domain_ops, p); + p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node), + nirqs, 0, + &intc_irqpin_irq_domain_ops, + p); if (!p->irq_domain) { ret = -ENXIO; dev_err(dev, "cannot initialize irq domain\n"); diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index cbce8ffc7de4..5c3196e5a437 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -168,8 +168,8 @@ static int irqc_probe(struct platform_device *pdev) p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ - p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs, - &irq_generic_chip_ops, p); + p->irq_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), p->number_of_irqs, + &irq_generic_chip_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(dev, "cannot initialize irq domain\n"); diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c index d4e6a68889ec..0a9640ba0adb 100644 --- a/drivers/irqchip/irq-renesas-rza1.c +++ b/drivers/irqchip/irq-renesas-rza1.c @@ -231,9 +231,9 @@ static int rza1_irqc_probe(struct platform_device *pdev) priv->chip.irq_set_type = rza1_irqc_set_type; priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; - priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ, - np, &rza1_irqc_domain_ops, - priv); + priv->irq_domain = irq_domain_create_hierarchy(parent, 0, IRQC_NUM_IRQ, + of_fwnode_handle(np), &rza1_irqc_domain_ops, + priv); if (!priv->irq_domain) { dev_err(dev, "cannot initialize irq domain\n"); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index 6a2e41f02446..1e861bd64f97 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -574,9 +574,9 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node * raw_spin_lock_init(&rzg2l_irqc_data->lock); - irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ, - node, &rzg2l_irqc_domain_ops, - rzg2l_irqc_data); + irq_domain = irq_domain_create_hierarchy(parent_domain, 0, IRQC_NUM_IRQ, + of_fwnode_handle(node), &rzg2l_irqc_domain_ops, + rzg2l_irqc_data); if (!irq_domain) { pm_runtime_put(dev); return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n"); diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c index 0f0fd7d4dfdf..1c12e6ec1370 100644 --- a/drivers/irqchip/irq-renesas-rzv2h.c +++ b/drivers/irqchip/irq-renesas-rzv2h.c @@ -522,8 +522,9 @@ static int rzv2h_icu_init_common(struct device_node *node, struct device_node *p raw_spin_lock_init(&rzv2h_icu_data->lock); - irq_domain = irq_domain_add_hierarchy(parent_domain, 0, ICU_NUM_IRQ, node, - &rzv2h_icu_domain_ops, rzv2h_icu_data); + irq_domain = irq_domain_create_hierarchy(parent_domain, 0, ICU_NUM_IRQ, + of_fwnode_handle(node), &rzv2h_icu_domain_ops, + rzv2h_icu_data); if (!irq_domain) { dev_err(&pdev->dev, "failed to add irq domain\n"); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c index b8ae67c25b37..1b9fbfce9581 100644 --- a/drivers/irqchip/irq-riscv-imsic-platform.c +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -20,7 +20,7 @@ #include <linux/spinlock.h> #include <linux/smp.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> #include "irq-riscv-imsic-state.h" static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index, diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index f653c13de62b..e5805885394e 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -242,7 +242,7 @@ static int __init riscv_intc_init(struct device_node *node, chip = &andes_intc_chip; } - return riscv_intc_init_common(of_node_to_fwnode(node), chip); + return riscv_intc_init_common(of_fwnode_handle(node), chip); } IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init); diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c index 9d0b80271949..d8d4dff16276 100644 --- a/drivers/irqchip/irq-sa11x0.c +++ b/drivers/irqchip/irq-sa11x0.c @@ -162,7 +162,7 @@ void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start) */ writel_relaxed(1, iobase + ICCR); - sa1100_normal_irqdomain = irq_domain_add_simple(NULL, + sa1100_normal_irqdomain = irq_domain_create_simple(NULL, 32, irq_start, &sa1100_normal_irqdomain_ops, NULL); diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c index 375b55aa0acd..af16bc5a3c8b 100644 --- a/drivers/irqchip/irq-sg2042-msi.c +++ b/drivers/irqchip/irq-sg2042-msi.c @@ -17,23 +17,38 @@ #include <linux/property.h> #include <linux/slab.h> -#include "irq-msi-lib.h" +#include <linux/irqchip/irq-msi-lib.h> -#define SG2042_MAX_MSI_VECTOR 32 +struct sg204x_msi_chip_info { + const struct irq_chip *irqchip; + const struct msi_parent_ops *parent_ops; +}; + +/** + * struct sg204x_msi_chipdata - chip data for the SG204x MSI IRQ controller + * @reg_clr: clear reg, see TRM, 10.1.33, GP_INTR0_CLR + * @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET + * @irq_first: First vectors number that MSIs starts + * @num_irqs: Number of vectors for MSIs + * @msi_map: mapping for allocated MSI vectors. + * @msi_map_lock: Lock for msi_map + * @chip_info: chip specific infomations + */ +struct sg204x_msi_chipdata { + void __iomem *reg_clr; -struct sg2042_msi_chipdata { - void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR + phys_addr_t doorbell_addr; - phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET + u32 irq_first; + u32 num_irqs; - u32 irq_first; // The vector number that MSIs starts - u32 num_irqs; // The number of vectors for MSIs + unsigned long *msi_map; + struct mutex msi_map_lock; - DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR); - struct mutex msi_map_lock; // lock for msi_map + const struct sg204x_msi_chip_info *chip_info; }; -static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req) +static int sg204x_msi_allocate_hwirq(struct sg204x_msi_chipdata *data, int num_req) { int first; @@ -43,7 +58,7 @@ static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_r return first >= 0 ? first : -ENOSPC; } -static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req) +static void sg204x_msi_free_hwirq(struct sg204x_msi_chipdata *data, int hwirq, int num_req) { guard(mutex)(&data->msi_map_lock); bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req)); @@ -51,7 +66,7 @@ static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, i static void sg2042_msi_irq_ack(struct irq_data *d) { - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); int bit_off = d->hwirq; writel(1 << bit_off, data->reg_clr); @@ -61,7 +76,7 @@ static void sg2042_msi_irq_ack(struct irq_data *d) static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) { - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); msg->address_hi = upper_32_bits(data->doorbell_addr); msg->address_lo = lower_32_bits(data->doorbell_addr); @@ -79,9 +94,38 @@ static const struct irq_chip sg2042_msi_middle_irq_chip = { .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg, }; -static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) +static void sg2044_msi_irq_ack(struct irq_data *d) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + + writel(0, (u32 __iomem *)data->reg_clr + d->hwirq); + irq_chip_ack_parent(d); +} + +static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); + phys_addr_t doorbell = data->doorbell_addr + 4 * (d->hwirq / 32); + + msg->address_lo = lower_32_bits(doorbell); + msg->address_hi = upper_32_bits(doorbell); + msg->data = d->hwirq % 32; +} + +static struct irq_chip sg2044_msi_middle_irq_chip = { + .name = "SG2044 MSI", + .irq_ack = sg2044_msi_irq_ack, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_compose_msi_msg = sg2044_msi_irq_compose_msi_msg, +}; + +static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) { - struct sg2042_msi_chipdata *data = domain->host_data; + struct sg204x_msi_chipdata *data = domain->host_data; struct irq_fwspec fwspec; struct irq_data *d; int ret; @@ -99,47 +143,45 @@ static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); } -static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, +static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { - struct sg2042_msi_chipdata *data = domain->host_data; + struct sg204x_msi_chipdata *data = domain->host_data; int hwirq, err, i; - hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs); + hwirq = sg204x_msi_allocate_hwirq(data, nr_irqs); if (hwirq < 0) return hwirq; for (i = 0; i < nr_irqs; i++) { - err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i); + err = sg204x_msi_parent_domain_alloc(domain, virq + i, hwirq + i); if (err) goto err_hwirq; irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, - &sg2042_msi_middle_irq_chip, data); + data->chip_info->irqchip, data); } - return 0; err_hwirq: - sg2042_msi_free_hwirq(data, hwirq, nr_irqs); + sg204x_msi_free_hwirq(data, hwirq, nr_irqs); irq_domain_free_irqs_parent(domain, virq, i); - return err; } -static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq, +static void sg204x_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d); + struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d); irq_domain_free_irqs_parent(domain, virq, nr_irqs); - sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs); + sg204x_msi_free_hwirq(data, d->hwirq, nr_irqs); } -static const struct irq_domain_ops sg2042_msi_middle_domain_ops = { - .alloc = sg2042_msi_middle_domain_alloc, - .free = sg2042_msi_middle_domain_free, +static const struct irq_domain_ops sg204x_msi_middle_domain_ops = { + .alloc = sg204x_msi_middle_domain_alloc, + .free = sg204x_msi_middle_domain_free, .select = msi_lib_irq_domain_select, }; @@ -158,14 +200,30 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = { .init_dev_msi_info = msi_lib_init_dev_msi_info, }; -static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data, +#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) + +#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) + +static const struct msi_parent_ops sg2044_msi_parent_ops = { + .required_flags = SG2044_MSI_FLAGS_REQUIRED, + .supported_flags = SG2044_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .bus_select_mask = MATCH_PCI_MSI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .prefix = "SG2044-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int sg204x_msi_init_domains(struct sg204x_msi_chipdata *data, struct irq_domain *plic_domain, struct device *dev) { struct fwnode_handle *fwnode = dev_fwnode(dev); struct irq_domain *middle_domain; middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode, - &sg2042_msi_middle_domain_ops, data); + &sg204x_msi_middle_domain_ops, data); if (!middle_domain) { pr_err("Failed to create the MSI middle domain\n"); return -ENOMEM; @@ -174,24 +232,29 @@ static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data, irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; - middle_domain->msi_parent_ops = &sg2042_msi_parent_ops; - + middle_domain->msi_parent_ops = data->chip_info->parent_ops; return 0; } static int sg2042_msi_probe(struct platform_device *pdev) { struct fwnode_reference_args args = { }; - struct sg2042_msi_chipdata *data; + struct sg204x_msi_chipdata *data; struct device *dev = &pdev->dev; struct irq_domain *plic_domain; struct resource *res; int ret; - data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(struct sg204x_msi_chipdata), GFP_KERNEL); if (!data) return -ENOMEM; + data->chip_info = device_get_match_data(&pdev->dev); + if (!data->chip_info) { + dev_err(&pdev->dev, "Failed to get irqchip\n"); + return -EINVAL; + } + data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr"); if (IS_ERR(data->reg_clr)) { dev_err(dev, "Failed to map clear register\n"); @@ -232,11 +295,28 @@ static int sg2042_msi_probe(struct platform_device *pdev) mutex_init(&data->msi_map_lock); - return sg2042_msi_init_domains(data, plic_domain, dev); + data->msi_map = devm_bitmap_zalloc(&pdev->dev, data->num_irqs, GFP_KERNEL); + if (!data->msi_map) { + dev_err(&pdev->dev, "Unable to allocate msi mapping\n"); + return -ENOMEM; + } + + return sg204x_msi_init_domains(data, plic_domain, dev); } +static const struct sg204x_msi_chip_info sg2042_chip_info = { + .irqchip = &sg2042_msi_middle_irq_chip, + .parent_ops = &sg2042_msi_parent_ops, +}; + +static const struct sg204x_msi_chip_info sg2044_chip_info = { + .irqchip = &sg2044_msi_middle_irq_chip, + .parent_ops = &sg2044_msi_parent_ops, +}; + static const struct of_device_id sg2042_msi_of_match[] = { - { .compatible = "sophgo,sg2042-msi" }, + { .compatible = "sophgo,sg2042-msi", .data = &sg2042_chip_info }, + { .compatible = "sophgo,sg2044-msi", .data = &sg2044_chip_info }, { } }; diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c index c7db617e1a2f..0cad68aa8388 100644 --- a/drivers/irqchip/irq-sni-exiu.c +++ b/drivers/irqchip/irq-sni-exiu.c @@ -249,12 +249,12 @@ static int __init exiu_dt_init(struct device_node *node, return -ENXIO; } - data = exiu_init(of_node_to_fwnode(node), &res); + data = exiu_init(of_fwnode_handle(node), &res); if (IS_ERR(data)) return PTR_ERR(data); - domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node, - &exiu_domain_ops, data); + domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_IRQS, of_fwnode_handle(node), + &exiu_domain_ops, data); if (!domain) { pr_err("%pOF: failed to allocate domain\n", node); goto out_unmap; diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c index bed78d1def3d..2a6eda9ab62e 100644 --- a/drivers/irqchip/irq-sp7021-intc.c +++ b/drivers/irqchip/irq-sp7021-intc.c @@ -256,8 +256,8 @@ static int __init sp_intc_init_dt(struct device_node *node, struct device_node * writel_relaxed(~0, REG_INTR_CLEAR + i * 4); } - sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS, - &sp_intc_dm_ops, &sp_intc); + sp_intc.domain = irq_domain_create_linear(of_fwnode_handle(node), SP_INTC_NR_IRQS, + &sp_intc_dm_ops, &sp_intc); if (!sp_intc.domain) { ret = -ENOMEM; goto out_unmap1; diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c index 0f5837176e53..2460798ec158 100644 --- a/drivers/irqchip/irq-starfive-jh8100-intc.c +++ b/drivers/irqchip/irq-starfive-jh8100-intc.c @@ -158,8 +158,8 @@ static int __init starfive_intc_init(struct device_node *intc, raw_spin_lock_init(&irqc->lock); - irqc->domain = irq_domain_add_linear(intc, STARFIVE_INTC_SRC_IRQ_NUM, - &starfive_intc_domain_ops, irqc); + irqc->domain = irq_domain_create_linear(of_fwnode_handle(intc), STARFIVE_INTC_SRC_IRQ_NUM, + &starfive_intc_domain_ops, irqc); if (!irqc->domain) { pr_err("Unable to create IRQ domain\n"); ret = -EINVAL; diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 7c6a0080c330..978811f2abe8 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -169,22 +169,18 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type) u32 rtsr, ftsr; int err; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst); ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst); err = stm32_exti_set_type(d, type, &rtsr, &ftsr); if (err) - goto unlock; + return err; irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst); irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst); - -unlock: - irq_gc_unlock(gc); - - return err; + return 0; } static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data, @@ -217,18 +213,16 @@ static void stm32_irq_suspend(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); stm32_chip_suspend(chip_data, gc->wake_active); - irq_gc_unlock(gc); } static void stm32_irq_resume(struct irq_chip_generic *gc) { struct stm32_exti_chip_data *chip_data = gc->private; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); stm32_chip_resume(chip_data, gc->mask_cache); - irq_gc_unlock(gc); } static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq, @@ -265,11 +259,8 @@ static void stm32_irq_ack(struct irq_data *d) struct stm32_exti_chip_data *chip_data = gc->private; const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank; - irq_gc_lock(gc); - + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst); - - irq_gc_unlock(gc); } static struct @@ -344,8 +335,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data, if (!host_data) return -ENOMEM; - domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK, - &irq_exti_domain_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), drv_data->bank_nr * IRQS_PER_BANK, + &irq_exti_domain_ops, NULL); if (!domain) { pr_err("%pOFn: Could not register interrupt domain.\n", node); diff --git a/drivers/irqchip/irq-stm32mp-exti.c b/drivers/irqchip/irq-stm32mp-exti.c index cb83d6cc6113..c6b4407d05f9 100644 --- a/drivers/irqchip/irq-stm32mp-exti.c +++ b/drivers/irqchip/irq-stm32mp-exti.c @@ -531,7 +531,7 @@ static int stm32mp_exti_domain_alloc(struct irq_domain *dm, if (ret) return ret; /* we only support one parent, so far */ - if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode) + if (of_fwnode_handle(out_irq.np) != dm->parent->fwnode) return -EINVAL; of_phandle_args_to_fwspec(out_irq.np, out_irq.args, @@ -682,10 +682,9 @@ static int stm32mp_exti_probe(struct platform_device *pdev) return -EINVAL; } - domain = irq_domain_add_hierarchy(parent_domain, 0, - drv_data->bank_nr * IRQS_PER_BANK, - np, &stm32mp_exti_domain_ops, - host_data); + domain = irq_domain_create_hierarchy(parent_domain, 0, drv_data->bank_nr * IRQS_PER_BANK, + of_fwnode_handle(np), &stm32mp_exti_domain_ops, + host_data); if (!domain) { dev_err(dev, "Could not register exti domain\n"); diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index dd506ebfdacb..9c2c9caeca2a 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c @@ -133,7 +133,7 @@ static int __init sun4i_of_init(struct device_node *node, /* Configure the external interrupt source type */ writel(0x00, irq_ic_data->irq_base + SUN4I_IRQ_NMI_CTRL_REG); - irq_ic_data->irq_domain = irq_domain_add_linear(node, 3 * 32, + irq_ic_data->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), 3 * 32, &sun4i_irq_ops, NULL); if (!irq_ic_data->irq_domain) panic("%pOF: unable to create IRQ domain\n", node); diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c index 99958d470d62..37d4b29763bc 100644 --- a/drivers/irqchip/irq-sun6i-r.c +++ b/drivers/irqchip/irq-sun6i-r.c @@ -338,8 +338,8 @@ static int __init sun6i_r_intc_init(struct device_node *node, return PTR_ERR(base); } - domain = irq_domain_add_hierarchy(parent_domain, 0, 0, node, - &sun6i_r_intc_domain_ops, NULL); + domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node), + &sun6i_r_intc_domain_ops, NULL); if (!domain) { pr_err("%pOF: Failed to allocate domain\n", node); iounmap(base); diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 01b0d8321728..fe32dfdc2dd0 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -111,7 +111,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) unsigned int src_type; unsigned int i; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); switch (flow_type & IRQF_TRIGGER_MASK) { case IRQ_TYPE_EDGE_FALLING: @@ -128,9 +128,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) src_type = SUNXI_SRC_TYPE_LEVEL_LOW; break; default: - irq_gc_unlock(gc); - pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", - data->irq); + pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq); return -EBADR; } @@ -145,9 +143,6 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK; src_type_reg |= src_type; sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg); - - irq_gc_unlock(gc); - return IRQ_SET_MASK_OK; } @@ -159,7 +154,7 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, struct irq_domain *domain; int ret; - domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(node), 1, &irq_generic_chip_ops, NULL); if (!domain) { pr_err("Could not register interrupt domain.\n"); return -ENOMEM; diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c index d59bfbe8c6d0..94cbc5111d7e 100644 --- a/drivers/irqchip/irq-tb10x.c +++ b/drivers/irqchip/irq-tb10x.c @@ -41,11 +41,9 @@ static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - uint32_t im, mod, pol; + uint32_t mod, pol, im = data->mask; - im = data->mask; - - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im; pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im; @@ -67,9 +65,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) case IRQ_TYPE_EDGE_RISING: break; default: - irq_gc_unlock(gc); - pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", - __func__, data->irq); + pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", __func__, data->irq); return -EBADR; } @@ -79,9 +75,6 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod); ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol); ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im); - - irq_gc_unlock(gc); - return IRQ_SET_MASK_OK; } @@ -121,13 +114,13 @@ static int __init of_tb10x_init_irq(struct device_node *ictl, goto ioremap_fail; } - domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(ictl), AB_IRQCTL_MAXIRQ, + &irq_generic_chip_ops, NULL); if (!domain) { ret = -ENOMEM; pr_err("%pOFn: Could not register interrupt domain.\n", ictl); - goto irq_domain_add_fail; + goto irq_domain_create_fail; } ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ, @@ -174,7 +167,7 @@ static int __init of_tb10x_init_irq(struct device_node *ictl, gc_alloc_fail: irq_domain_remove(domain); -irq_domain_add_fail: +irq_domain_create_fail: iounmap(reg_base); ioremap_fail: release_mem_region(mem.start, resource_size(&mem)); diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c index ad3e2c1b3c87..66cbb9f77ff3 100644 --- a/drivers/irqchip/irq-tegra.c +++ b/drivers/irqchip/irq-tegra.c @@ -330,9 +330,8 @@ static int __init tegra_ictlr_init(struct device_node *node, node, num_ictlrs, soc->num_ictlrs); - domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, - node, &tegra_ictlr_domain_ops, - lic); + domain = irq_domain_create_hierarchy(parent_domain, 0, num_ictlrs * 32, + of_fwnode_handle(node), &tegra_ictlr_domain_ops, lic); if (!domain) { pr_err("%pOF: failed to allocated domain\n", node); err = -ENOMEM; diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index a887efba262c..7de59238e6b0 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -233,7 +233,7 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom INIT_LIST_HEAD(&vint_desc->list); parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev)); - parent_fwspec.fwnode = of_node_to_fwnode(parent_node); + parent_fwspec.fwnode = of_fwnode_handle(parent_node); if (of_device_is_compatible(parent_node, "arm,gic-v3")) { /* Parent is GIC */ @@ -701,15 +701,15 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) if (ret) return ret; - domain = irq_domain_add_linear(dev_of_node(dev), - ti_sci_get_num_resources(inta->vint), - &ti_sci_inta_irq_domain_ops, inta); + domain = irq_domain_create_linear(of_fwnode_handle(dev_of_node(dev)), + ti_sci_get_num_resources(inta->vint), + &ti_sci_inta_irq_domain_ops, inta); if (!domain) { dev_err(dev, "Failed to allocate IRQ domain\n"); return -ENOMEM; } - msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node), + msi_domain = ti_sci_inta_msi_create_irq_domain(of_fwnode_handle(node), &ti_sci_inta_msi_domain_info, domain); if (!msi_domain) { diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c index b49a73106c69..07fff5ae5ce0 100644 --- a/drivers/irqchip/irq-ti-sci-intr.c +++ b/drivers/irqchip/irq-ti-sci-intr.c @@ -149,7 +149,7 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain, goto err_irqs; parent_node = of_irq_find_parent(dev_of_node(intr->dev)); - fwspec.fwnode = of_node_to_fwnode(parent_node); + fwspec.fwnode = of_fwnode_handle(parent_node); if (of_device_is_compatible(parent_node, "arm,gic-v3")) { /* Parent is GIC */ @@ -274,8 +274,9 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) return PTR_ERR(intr->out_irqs); } - domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev), - &ti_sci_intr_irq_domain_ops, intr); + domain = irq_domain_create_hierarchy(parent_domain, 0, 0, + of_fwnode_handle(dev_of_node(dev)), + &ti_sci_intr_irq_domain_ops, intr); if (!domain) { dev_err(dev, "Failed to allocate IRQ domain\n"); return -ENOMEM; diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c index 960c343d5781..e625f4fb2bb8 100644 --- a/drivers/irqchip/irq-ts4800.c +++ b/drivers/irqchip/irq-ts4800.c @@ -125,7 +125,7 @@ static int ts4800_ic_probe(struct platform_device *pdev) return -EINVAL; } - data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data); + data->domain = irq_domain_create_linear(of_fwnode_handle(node), 8, &ts4800_ic_ops, data); if (!data->domain) { dev_err(&pdev->dev, "cannot add IRQ domain\n"); return -ENOMEM; diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c index 601f9343d5b3..6005c2d28dd9 100644 --- a/drivers/irqchip/irq-uniphier-aidet.c +++ b/drivers/irqchip/irq-uniphier-aidet.c @@ -188,7 +188,7 @@ static int uniphier_aidet_probe(struct platform_device *pdev) priv->domain = irq_domain_create_hierarchy( parent_domain, 0, UNIPHIER_AIDET_NR_IRQS, - of_node_to_fwnode(dev->of_node), + of_fwnode_handle(dev->of_node), &uniphier_aidet_domain_ops, priv); if (!priv->domain) return -ENOMEM; diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 0abc8934c2ee..034ce6afe170 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -176,8 +176,8 @@ static void __init fpga_irq_init(void __iomem *base, int parent_irq, f); } - f->domain = irq_domain_add_linear(node, fls(valid), - &fpga_irqdomain_ops, f); + f->domain = irq_domain_create_linear(of_fwnode_handle(node), fls(valid), + &fpga_irqdomain_ops, f); /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < fls(valid); i++) diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c index 2b9a8ba58e26..5d9c7503aa7f 100644 --- a/drivers/irqchip/irq-vf610-mscm-ir.c +++ b/drivers/irqchip/irq-vf610-mscm-ir.c @@ -209,9 +209,9 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node, regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid); mscm_ir_data->cpu_mask = 0x1 << cpuid; - domain = irq_domain_add_hierarchy(domain_parent, 0, - MSCM_IRSPRC_NUM, node, - &mscm_irq_domain_ops, mscm_ir_data); + domain = irq_domain_create_hierarchy(domain_parent, 0, MSCM_IRSPRC_NUM, + of_fwnode_handle(node), &mscm_irq_domain_ops, + mscm_ir_data); if (!domain) { ret = -ENOMEM; goto out_unmap; diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index ea93e7236c4a..2bcdf216a000 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -289,8 +289,9 @@ static void __init vic_register(void __iomem *base, unsigned int parent_irq, vic_handle_irq_cascaded, v); } - v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, - &vic_irqdomain_ops, v); + v->domain = irq_domain_create_simple(of_fwnode_handle(node), + fls(valid_sources), irq, + &vic_irqdomain_ops, v); /* create an IRQ mapping for each valid IRQ */ for (i = 0; i < fls(valid_sources); i++) if (valid_sources & (1 << i)) diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c index e17dd3a8c2d5..3b742590aec8 100644 --- a/drivers/irqchip/irq-vt8500.c +++ b/drivers/irqchip/irq-vt8500.c @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/bitops.h> @@ -63,29 +64,28 @@ struct vt8500_irq_data { struct irq_domain *domain; /* Domain for this controller */ }; -/* Global variable for accessing io-mem addresses */ -static struct vt8500_irq_data intc[VT8500_INTC_MAX]; -static u32 active_cnt = 0; +/* Primary interrupt controller data */ +static struct vt8500_irq_data *primary_intc; -static void vt8500_irq_mask(struct irq_data *d) +static void vt8500_irq_ack(struct irq_data *d) { struct vt8500_irq_data *priv = d->domain->host_data; void __iomem *base = priv->base; void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4); - u8 edge, dctr; - u32 status; + u32 status = (1 << (d->hwirq & 0x1f)); - edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE; - if (edge) { - status = readl(stat_reg); + writel(status, stat_reg); +} - status |= (1 << (d->hwirq & 0x1f)); - writel(status, stat_reg); - } else { - dctr = readb(base + VT8500_ICDC + d->hwirq); - dctr &= ~VT8500_INT_ENABLE; - writeb(dctr, base + VT8500_ICDC + d->hwirq); - } +static void vt8500_irq_mask(struct irq_data *d) +{ + struct vt8500_irq_data *priv = d->domain->host_data; + void __iomem *base = priv->base; + u8 dctr; + + dctr = readb(base + VT8500_ICDC + d->hwirq); + dctr &= ~VT8500_INT_ENABLE; + writeb(dctr, base + VT8500_ICDC + d->hwirq); } static void vt8500_irq_unmask(struct irq_data *d) @@ -130,11 +130,11 @@ static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) } static struct irq_chip vt8500_irq_chip = { - .name = "vt8500", - .irq_ack = vt8500_irq_mask, - .irq_mask = vt8500_irq_mask, - .irq_unmask = vt8500_irq_unmask, - .irq_set_type = vt8500_irq_set_type, + .name = "vt8500", + .irq_ack = vt8500_irq_ack, + .irq_mask = vt8500_irq_mask, + .irq_unmask = vt8500_irq_unmask, + .irq_set_type = vt8500_irq_set_type, }; static void __init vt8500_init_irq_hw(void __iomem *base) @@ -163,82 +163,89 @@ static const struct irq_domain_ops vt8500_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; +static inline void vt8500_handle_irq_common(struct vt8500_irq_data *intc) +{ + unsigned long irqnr = readl_relaxed(intc->base) & 0x3F; + unsigned long stat; + + /* + * Highest Priority register default = 63, so check that this + * is a real interrupt by checking the status register + */ + if (irqnr == 63) { + stat = readl_relaxed(intc->base + VT8500_ICIS + 4); + if (!(stat & BIT(31))) + return; + } + + generic_handle_domain_irq(intc->domain, irqnr); +} + static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) { - u32 stat, i; - int irqnr; - void __iomem *base; - - /* Loop through each active controller */ - for (i=0; i<active_cnt; i++) { - base = intc[i].base; - irqnr = readl_relaxed(base) & 0x3F; - /* - Highest Priority register default = 63, so check that this - is a real interrupt by checking the status register - */ - if (irqnr == 63) { - stat = readl_relaxed(base + VT8500_ICIS + 4); - if (!(stat & BIT(31))) - continue; - } + vt8500_handle_irq_common(primary_intc); +} - generic_handle_domain_irq(intc[i].domain, irqnr); - } +static void vt8500_handle_irq_chained(struct irq_desc *desc) +{ + struct irq_domain *d = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct vt8500_irq_data *intc = d->host_data; + + chained_irq_enter(chip, desc); + vt8500_handle_irq_common(intc); + chained_irq_exit(chip, desc); } static int __init vt8500_irq_init(struct device_node *node, struct device_node *parent) { - int irq, i; - struct device_node *np = node; + struct vt8500_irq_data *intc; + int irq, i, ret = 0; - if (active_cnt == VT8500_INTC_MAX) { - pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n", - __func__); - goto out; - } + intc = kzalloc(sizeof(*intc), GFP_KERNEL); + if (!intc) + return -ENOMEM; - intc[active_cnt].base = of_iomap(np, 0); - intc[active_cnt].domain = irq_domain_add_linear(node, 64, - &vt8500_irq_domain_ops, &intc[active_cnt]); - - if (!intc[active_cnt].base) { + intc->base = of_iomap(node, 0); + if (!intc->base) { pr_err("%s: Unable to map IO memory\n", __func__); - goto out; + ret = -ENOMEM; + goto err_free; } - if (!intc[active_cnt].domain) { + intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64, + &vt8500_irq_domain_ops, intc); + if (!intc->domain) { pr_err("%s: Unable to add irq domain!\n", __func__); - goto out; + ret = -ENOMEM; + goto err_unmap; } - set_handle_irq(vt8500_handle_irq); - - vt8500_init_irq_hw(intc[active_cnt].base); + vt8500_init_irq_hw(intc->base); pr_info("vt8500-irq: Added interrupt controller\n"); - active_cnt++; - - /* check if this is a slaved controller */ - if (of_irq_count(np) != 0) { - /* check that we have the correct number of interrupts */ - if (of_irq_count(np) != 8) { - pr_err("%s: Incorrect IRQ map for slaved controller\n", - __func__); - return -EINVAL; - } - - for (i = 0; i < 8; i++) { - irq = irq_of_parse_and_map(np, i); - enable_irq(irq); + /* check if this is a chained controller */ + if (of_irq_count(node) != 0) { + for (i = 0; i < of_irq_count(node); i++) { + irq = irq_of_parse_and_map(node, i); + irq_set_chained_handler_and_data(irq, vt8500_handle_irq_chained, + intc); } pr_info("vt8500-irq: Enabled slave->parent interrupts\n"); + } else { + primary_intc = intc; + set_handle_irq(vt8500_handle_irq); } -out: return 0; + +err_unmap: + iounmap(intc->base); +err_free: + kfree(intc); + return ret; } IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init); diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c index 91df62a64cd9..a8ed4894d29e 100644 --- a/drivers/irqchip/irq-wpcm450-aic.c +++ b/drivers/irqchip/irq-wpcm450-aic.c @@ -154,7 +154,7 @@ static int __init wpcm450_aic_of_init(struct device_node *node, set_handle_irq(wpcm450_aic_handle_irq); - aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic); + aic->domain = irq_domain_create_linear(of_fwnode_handle(node), AIC_NUM_IRQS, &wpcm450_aic_ops, aic); return 0; } diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index 38727e9cc713..92dcb9fdcb25 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -212,8 +212,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc, xintc_write(irqc, MER, MER_HIE | MER_ME); } - irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq, - &xintc_irq_domain_ops, irqc); + irqc->root_domain = irq_domain_create_linear(of_fwnode_handle(intc), irqc->nr_irq, + &xintc_irq_domain_ops, irqc); if (!irqc->root_domain) { pr_err("irq-xilinx: Unable to create IRQ domain\n"); ret = -EINVAL; diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index 9b441d180299..9fdacbd89a63 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -167,8 +167,7 @@ static void __init xtensa_mx_init_common(struct irq_domain *root_domain) int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, - &xtensa_mx_irq_domain_ops, + irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); xtensa_mx_init_common(root_domain); return 0; @@ -178,7 +177,7 @@ static int __init xtensa_mx_init(struct device_node *np, struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops, + irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); xtensa_mx_init_common(root_domain); return 0; diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index 9be7b7c5cd23..44e7be051a2e 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -85,7 +85,7 @@ static struct irq_chip xtensa_irq_chip = { int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, + irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_irq_domain_ops, &xtensa_irq_chip); irq_set_default_domain(root_domain); return 0; @@ -95,7 +95,7 @@ static int __init xtensa_pic_init(struct device_node *np, struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops, + irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_irq_domain_ops, &xtensa_irq_chip); irq_set_default_domain(root_domain); return 0; diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c index 7a72620fc478..22d46c246594 100644 --- a/drivers/irqchip/irq-zevio.c +++ b/drivers/irqchip/irq-zevio.c @@ -92,8 +92,8 @@ static int __init zevio_of_init(struct device_node *node, zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE); zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE); - zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS, - &irq_generic_chip_ops, NULL); + zevio_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), MAX_INTRS, + &irq_generic_chip_ops, NULL); BUG_ON(!zevio_irq_domain); ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1, diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 7c17a6f643ef..576e55569d77 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -239,7 +239,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr, goto err_unmap; } - shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0, + shirq_domain = irq_domain_create_legacy(of_fwnode_handle(np), nr_irqs, virq_base, 0, &irq_domain_simple_ops, NULL); if (WARN_ON(!shirq_domain)) { pr_warn("%s: irq domain init failed\n", __func__); diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c index 0b17a38ea6bf..ea44ffb5ce1a 100644 --- a/drivers/mailbox/qcom-ipcc.c +++ b/drivers/mailbox/qcom-ipcc.c @@ -312,8 +312,8 @@ static int qcom_ipcc_probe(struct platform_device *pdev) if (!name) return -ENOMEM; - ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node, - &qcom_ipcc_irq_ops, ipcc); + ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node), + &qcom_ipcc_irq_ops, ipcc); if (!ipcc->irq_domain) return -ENOMEM; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 813b38aec3e4..c40db9c161c1 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -293,8 +293,7 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META; bio->bi_iter.bi_sector = SB_SECTOR; - __bio_add_page(bio, virt_to_page(out), SB_SIZE, - offset_in_page(out)); + bio_add_virt_nofail(bio, out, SB_SIZE); out->offset = cpu_to_le64(sb->offset); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index f0b5a6931161..d098e75e3461 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1364,7 +1364,7 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, ptr = (char *)b->data + offset; len = n_sectors << SECTOR_SHIFT; - __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr)); + bio_add_virt_nofail(bio, ptr, len); submit_bio(bio); } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index cc3d3897ef42..1f626066e8cc 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2557,14 +2557,8 @@ static void dm_integrity_inline_recheck(struct work_struct *w) char *mem; outgoing_bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recheck_bios); - - r = bio_add_page(outgoing_bio, virt_to_page(outgoing_data), ic->sectors_per_block << SECTOR_SHIFT, 0); - if (unlikely(r != (ic->sectors_per_block << SECTOR_SHIFT))) { - bio_put(outgoing_bio); - bio->bi_status = BLK_STS_RESOURCE; - bio_endio(bio); - return; - } + bio_add_virt_nofail(outgoing_bio, outgoing_data, + ic->sectors_per_block << SECTOR_SHIFT); bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1); if (IS_ERR(bip)) { @@ -3211,7 +3205,8 @@ next_chunk: bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios); bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; - __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer)); + bio_add_virt_nofail(bio, recalc_buffer, + range.n_sectors << SECTOR_SHIFT); r = submit_bio_wait(bio); bio_put(bio); if (unlikely(r)) { @@ -3228,7 +3223,8 @@ next_chunk: bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios); bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector; - __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer)); + bio_add_virt_nofail(bio, recalc_buffer, + range.n_sectors << SECTOR_SHIFT); bip = bio_integrity_alloc(bio, GFP_NOIO, 1); if (unlikely(IS_ERR(bip))) { diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 6adc55fd90d3..127138c61be5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -14,6 +14,7 @@ #include "raid5.h" #include "raid10.h" #include "md-bitmap.h" +#include "dm-core.h" #include <linux/device-mapper.h> @@ -3308,6 +3309,7 @@ size_check: /* Disable/enable discard support on raid set. */ configure_discard_support(rs); + rs->md.dm_gendisk = ti->table->md->disk; mddev_unlock(&rs->md); return 0; @@ -3327,6 +3329,7 @@ static void raid_dtr(struct dm_target *ti) mddev_lock_nointr(&rs->md); md_stop(&rs->md); + rs->md.dm_gendisk = NULL; mddev_unlock(&rs->md); if (work_pending(&rs->md.event_work)) diff --git a/drivers/md/md.c b/drivers/md/md.c index 9daa78c5fe33..0fde115e921f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -111,32 +111,48 @@ static void md_wakeup_thread_directly(struct md_thread __rcu *thread); /* Default safemode delay: 200 msec */ #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1) /* - * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' - * is 1000 KB/sec, so the extra system load does not show up that much. - * Increase it if you want to have more _guaranteed_ speed. Note that - * the RAID driver will use the maximum available bandwidth if the IO - * subsystem is idle. There is also an 'absolute maximum' reconstruction - * speed limit - in case reconstruction slows down your system despite - * idle IO detection. + * Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit' + * is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load + * does not show up that much. Increase it if you want to have more guaranteed + * speed. Note that the RAID driver will use the maximum bandwidth + * sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle. * - * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. - * or /sys/block/mdX/md/sync_speed_{min,max} + * Background sync IO speed control: + * + * - below speed min: + * no limit; + * - above speed min and below speed max: + * a) if mddev is idle, then no limit; + * b) if mddev is busy handling normal IO, then limit inflight sync IO + * to sync_io_depth; + * - above speed max: + * sync IO can't be issued; + * + * Following configurations can be changed via /proc/sys/dev/raid/ for system + * or /sys/block/mdX/md/ for one array. */ - static int sysctl_speed_limit_min = 1000; static int sysctl_speed_limit_max = 200000; -static inline int speed_min(struct mddev *mddev) +static int sysctl_sync_io_depth = 32; + +static int speed_min(struct mddev *mddev) { return mddev->sync_speed_min ? mddev->sync_speed_min : sysctl_speed_limit_min; } -static inline int speed_max(struct mddev *mddev) +static int speed_max(struct mddev *mddev) { return mddev->sync_speed_max ? mddev->sync_speed_max : sysctl_speed_limit_max; } +static int sync_io_depth(struct mddev *mddev) +{ + return mddev->sync_io_depth ? + mddev->sync_io_depth : sysctl_sync_io_depth; +} + static void rdev_uninit_serial(struct md_rdev *rdev) { if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) @@ -293,14 +309,21 @@ static const struct ctl_table raid_table[] = { .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, .maxlen = sizeof(int), - .mode = S_IRUGO|S_IWUSR, + .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "speed_limit_max", .data = &sysctl_speed_limit_max, .maxlen = sizeof(int), - .mode = S_IRUGO|S_IWUSR, + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sync_io_depth", + .data = &sysctl_sync_io_depth, + .maxlen = sizeof(int), + .mode = 0644, .proc_handler = proc_dointvec, }, }; @@ -5091,7 +5114,7 @@ static ssize_t sync_min_show(struct mddev *mddev, char *page) { return sprintf(page, "%d (%s)\n", speed_min(mddev), - mddev->sync_speed_min ? "local": "system"); + mddev->sync_speed_min ? "local" : "system"); } static ssize_t @@ -5100,7 +5123,7 @@ sync_min_store(struct mddev *mddev, const char *buf, size_t len) unsigned int min; int rv; - if (strncmp(buf, "system", 6)==0) { + if (strncmp(buf, "system", 6) == 0) { min = 0; } else { rv = kstrtouint(buf, 10, &min); @@ -5120,7 +5143,7 @@ static ssize_t sync_max_show(struct mddev *mddev, char *page) { return sprintf(page, "%d (%s)\n", speed_max(mddev), - mddev->sync_speed_max ? "local": "system"); + mddev->sync_speed_max ? "local" : "system"); } static ssize_t @@ -5129,7 +5152,7 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len) unsigned int max; int rv; - if (strncmp(buf, "system", 6)==0) { + if (strncmp(buf, "system", 6) == 0) { max = 0; } else { rv = kstrtouint(buf, 10, &max); @@ -5146,6 +5169,35 @@ static struct md_sysfs_entry md_sync_max = __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); static ssize_t +sync_io_depth_show(struct mddev *mddev, char *page) +{ + return sprintf(page, "%d (%s)\n", sync_io_depth(mddev), + mddev->sync_io_depth ? "local" : "system"); +} + +static ssize_t +sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len) +{ + unsigned int max; + int rv; + + if (strncmp(buf, "system", 6) == 0) { + max = 0; + } else { + rv = kstrtouint(buf, 10, &max); + if (rv < 0) + return rv; + if (max == 0) + return -EINVAL; + } + mddev->sync_io_depth = max; + return len; +} + +static struct md_sysfs_entry md_sync_io_depth = +__ATTR_RW(sync_io_depth); + +static ssize_t degraded_show(struct mddev *mddev, char *page) { return sprintf(page, "%d\n", mddev->degraded); @@ -5671,6 +5723,7 @@ static struct attribute *md_redundancy_attrs[] = { &md_mismatches.attr, &md_sync_min.attr, &md_sync_max.attr, + &md_sync_io_depth.attr, &md_sync_speed.attr, &md_sync_force_parallel.attr, &md_sync_completed.attr, @@ -8572,50 +8625,55 @@ void md_cluster_stop(struct mddev *mddev) put_cluster_ops(mddev); } -static int is_mddev_idle(struct mddev *mddev, int init) +static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init) { + unsigned long last_events = rdev->last_events; + + if (!bdev_is_partition(rdev->bdev)) + return true; + + /* + * If rdev is partition, and user doesn't issue IO to the array, the + * array is still not idle if user issues IO to other partitions. + */ + rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0, + sectors) - + part_stat_read_accum(rdev->bdev, sectors); + + return init || rdev->last_events <= last_events; +} + +/* + * mddev is idle if following conditions are matched since last check: + * 1) mddev doesn't have normal IO completed; + * 2) mddev doesn't have inflight normal IO; + * 3) if any member disk is partition, and other partitions don't have IO + * completed; + * + * Noted this checking rely on IO accounting is enabled. + */ +static bool is_mddev_idle(struct mddev *mddev, int init) +{ + unsigned long last_events = mddev->normal_io_events; + struct gendisk *disk; struct md_rdev *rdev; - int idle; - int curr_events; + bool idle = true; - idle = 1; - rcu_read_lock(); - rdev_for_each_rcu(rdev, mddev) { - struct gendisk *disk = rdev->bdev->bd_disk; + disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk; + if (!disk) + return true; - if (!init && !blk_queue_io_stat(disk->queue)) - continue; + mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors); + if (!init && (mddev->normal_io_events > last_events || + bdev_count_inflight(disk->part0))) + idle = false; - curr_events = (int)part_stat_read_accum(disk->part0, sectors) - - atomic_read(&disk->sync_io); - /* sync IO will cause sync_io to increase before the disk_stats - * as sync_io is counted when a request starts, and - * disk_stats is counted when it completes. - * So resync activity will cause curr_events to be smaller than - * when there was no such activity. - * non-sync IO will cause disk_stat to increase without - * increasing sync_io so curr_events will (eventually) - * be larger than it was before. Once it becomes - * substantially larger, the test below will cause - * the array to appear non-idle, and resync will slow - * down. - * If there is a lot of outstanding resync activity when - * we set last_event to curr_events, then all that activity - * completing might cause the array to appear non-idle - * and resync will be slowed down even though there might - * not have been non-resync activity. This will only - * happen once though. 'last_events' will soon reflect - * the state where there is little or no outstanding - * resync requests, and further resync activity will - * always make curr_events less than last_events. - * - */ - if (init || curr_events - rdev->last_events > 64) { - rdev->last_events = curr_events; - idle = 0; - } - } + rcu_read_lock(); + rdev_for_each_rcu(rdev, mddev) + if (!is_rdev_holder_idle(rdev, init)) + idle = false; rcu_read_unlock(); + return idle; } @@ -8927,6 +8985,23 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) } } +static bool sync_io_within_limit(struct mddev *mddev) +{ + int io_sectors; + + /* + * For raid456, sync IO is stripe(4k) per IO, for other levels, it's + * RESYNC_PAGES(64k) per IO. + */ + if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6) + io_sectors = 8; + else + io_sectors = 128; + + return atomic_read(&mddev->recovery_active) < + io_sectors * sync_io_depth(mddev); +} + #define SYNC_MARKS 10 #define SYNC_MARK_STEP (3*HZ) #define UPDATE_FREQUENCY (5*60*HZ) @@ -9195,7 +9270,8 @@ void md_do_sync(struct md_thread *thread) msleep(500); goto repeat; } - if (!is_mddev_idle(mddev, 0)) { + if (!sync_io_within_limit(mddev) && + !is_mddev_idle(mddev, 0)) { /* * Give other IO more of a chance. * The faster the devices, the less we wait. diff --git a/drivers/md/md.h b/drivers/md/md.h index 1cf00a04bcdd..d45a9e6ead80 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -132,7 +132,7 @@ struct md_rdev { sector_t sectors; /* Device size (in 512bytes sectors) */ struct mddev *mddev; /* RAID array if running */ - int last_events; /* IO event timestamp */ + unsigned long last_events; /* IO event timestamp */ /* * If meta_bdev is non-NULL, it means that a separate device is @@ -404,7 +404,8 @@ struct mddev { * are happening, so run/ * takeover/stop are not safe */ - struct gendisk *gendisk; + struct gendisk *gendisk; /* mdraid gendisk */ + struct gendisk *dm_gendisk; /* dm-raid gendisk */ struct kobject kobj; int hold_active; @@ -483,6 +484,7 @@ struct mddev { /* if zero, use the system-wide default */ int sync_speed_min; int sync_speed_max; + int sync_io_depth; /* resync even though the same disks are shared among md-devices */ int parallel_resync; @@ -518,6 +520,7 @@ struct mddev { * adding a spare */ + unsigned long normal_io_events; /* IO event timestamp */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t recovery_cp; @@ -714,17 +717,6 @@ static inline int mddev_trylock(struct mddev *mddev) } extern void mddev_unlock(struct mddev *mddev); -static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) -{ - if (blk_queue_io_stat(bdev->bd_disk->queue)) - atomic_add(nr_sectors, &bdev->bd_disk->sync_io); -} - -static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) -{ - md_sync_acct(bio->bi_bdev, nr_sectors); -} - struct md_personality { struct md_submodule_head head; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index de9bccbe7337..657d481525be 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2382,7 +2382,6 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) wbio->bi_end_io = end_sync_write; atomic_inc(&r1_bio->remaining); - md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); submit_bio_noacct(wbio); } @@ -3055,7 +3054,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r1_bio->bios[i]; if (bio->bi_end_io == end_sync_read) { read_targets--; - md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; submit_bio_noacct(bio); @@ -3064,7 +3062,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, } else { atomic_set(&r1_bio->remaining, 1); bio = r1_bio->bios[r1_bio->read_disk]; - md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; submit_bio_noacct(bio); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ba32bac975b8..dce06bf65016 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2426,7 +2426,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&r10_bio->remaining); - md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) tbio->bi_opf |= MD_FAILFAST; @@ -2448,8 +2447,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) bio_copy_data(tbio, fbio); d = r10_bio->devs[i].devnum; atomic_inc(&r10_bio->remaining); - md_sync_acct(conf->mirrors[d].replacement->bdev, - bio_sectors(tbio)); submit_bio_noacct(tbio); } @@ -2583,13 +2580,10 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) d = r10_bio->devs[1].devnum; if (wbio->bi_end_io) { atomic_inc(&conf->mirrors[d].rdev->nr_pending); - md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); submit_bio_noacct(wbio); } if (wbio2) { atomic_inc(&conf->mirrors[d].replacement->nr_pending); - md_sync_acct(conf->mirrors[d].replacement->bdev, - bio_sectors(wbio2)); submit_bio_noacct(wbio2); } } @@ -3757,7 +3751,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, r10_bio->sectors = nr_sectors; if (bio->bi_end_io == end_sync_read) { - md_sync_acct_bio(bio, nr_sectors); bio->bi_status = 0; submit_bio_noacct(bio); } @@ -4880,7 +4873,6 @@ read_more: r10_bio->sectors = nr_sectors; /* Now submit the read */ - md_sync_acct_bio(read_bio, r10_bio->sectors); atomic_inc(&r10_bio->remaining); read_bio->bi_next = NULL; submit_bio_noacct(read_bio); @@ -4940,7 +4932,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) continue; atomic_inc(&rdev->nr_pending); - md_sync_acct_bio(b, r10_bio->sectors); atomic_inc(&r10_bio->remaining); b->bi_next = NULL; submit_bio_noacct(b); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6389383166c0..ca5b0e8ba707 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1240,10 +1240,6 @@ again: } if (rdev) { - if (s->syncing || s->expanding || s->expanded - || s->replacing) - md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf)); - set_bit(STRIPE_IO_STARTED, &sh->state); bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags); @@ -1300,10 +1296,6 @@ again: submit_bio_noacct(bi); } if (rrdev) { - if (s->syncing || s->expanding || s->expanded - || s->replacing) - md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf)); - set_bit(STRIPE_IO_STARTED, &sh->state); bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags); diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 53f1888cc84f..d5bf3243fe78 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -1455,10 +1455,8 @@ static int gpmc_setup_irq(struct gpmc_device *gpmc) gpmc->irq_chip.irq_unmask = gpmc_irq_unmask; gpmc->irq_chip.irq_set_type = gpmc_irq_set_type; - gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node, - gpmc->nirqs, - &gpmc_irq_domain_ops, - gpmc); + gpmc_irq_domain = irq_domain_create_linear(of_fwnode_handle(gpmc->dev->of_node), + gpmc->nirqs, &gpmc_irq_domain_ops, gpmc); if (!gpmc_irq_domain) { dev_err(gpmc->dev, "IRQ domain add failed\n"); return -ENODEV; diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 8e68b64bd7f8..488e346047c1 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -624,8 +624,8 @@ static int device_irq_init(struct pm860x_chip *chip, ret = -EBUSY; goto out; } - irq_domain_add_legacy(node, nr_irqs, chip->irq_base, 0, - &pm860x_irq_domain_ops, chip); + irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, chip->irq_base, 0, + &pm860x_irq_domain_ops, chip); chip->core_irq = i2c->irq; if (!chip->core_irq) goto out; diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 15c95828b09a..049abcbd71ce 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -580,9 +580,9 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np) num_irqs = AB8500_NR_IRQS; /* If ->irq_base is zero this will give a linear mapping */ - ab8500->domain = irq_domain_add_simple(ab8500->dev->of_node, - num_irqs, 0, - &ab8500_irq_ops, ab8500); + ab8500->domain = irq_domain_create_simple(of_fwnode_handle(ab8500->dev->of_node), + num_irqs, 0, + &ab8500_irq_ops, ab8500); if (!ab8500->domain) { dev_err(ab8500->dev, "Failed to create irqdomain\n"); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index d919ae9691e2..ac2139597fab 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -312,8 +312,7 @@ int arizona_irq_init(struct arizona *arizona) flags |= arizona->pdata.irq_flags; /* Allocate a virtual IRQ domain to distribute to the regmap domains */ - arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops, - arizona); + arizona->virq = irq_domain_create_linear(NULL, 2, &arizona_domain_ops, arizona); if (!arizona->virq) { dev_err(arizona->dev, "Failed to add core IRQ domain\n"); ret = -EINVAL; diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5b3e355e78f6..21e68a382b11 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -2607,9 +2607,9 @@ static int db8500_irq_init(struct device_node *np) { int i; - db8500_irq_domain = irq_domain_add_simple( - np, NUM_PRCMU_WAKEUPS, 0, - &db8500_irq_ops, NULL); + db8500_irq_domain = irq_domain_create_simple(of_fwnode_handle(np), + NUM_PRCMU_WAKEUPS, 0, + &db8500_irq_ops, NULL); if (!db8500_irq_domain) { pr_err("Failed to create irqdomain\n"); diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index 6fe388da6fb6..d47152467951 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -65,15 +65,14 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev, struct mx25_tsadc *tsadc) { struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; int irq; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops, - tsadc); + tsadc->domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node), 2, 0, + &mx25_tsadc_domain_ops, tsadc); if (!tsadc->domain) { dev_err(dev, "Failed to add irq domain\n"); return -ENOMEM; diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c index 39006297f3d2..ea0fdf7a4b6e 100644 --- a/drivers/mfd/lp8788-irq.c +++ b/drivers/mfd/lp8788-irq.c @@ -161,7 +161,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq) return -ENOMEM; irqd->lp = lp; - irqd->domain = irq_domain_add_linear(lp->dev->of_node, LP8788_INT_MAX, + irqd->domain = irq_domain_create_linear(of_fwnode_handle(lp->dev->of_node), LP8788_INT_MAX, &lp8788_domain_ops, irqd); if (!irqd->domain) { dev_err(lp->dev, "failed to add irq domain err\n"); diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 105d79b91493..78b16c67a5fc 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -682,8 +682,8 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, return -EBUSY; } - irq_domain_add_legacy(node, MAX8925_NR_IRQS, chip->irq_base, 0, - &max8925_irq_domain_ops, chip); + irq_domain_create_legacy(of_fwnode_handle(node), MAX8925_NR_IRQS, chip->irq_base, 0, + &max8925_irq_domain_ops, chip); /* request irq handler for pmic main irq*/ chip->core_irq = irq; diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c index 92e348df03d1..cc87571c9af5 100644 --- a/drivers/mfd/max8997-irq.c +++ b/drivers/mfd/max8997-irq.c @@ -327,8 +327,8 @@ int max8997_irq_init(struct max8997_dev *max8997) true : false; } - domain = irq_domain_add_linear(NULL, MAX8997_IRQ_NR, - &max8997_irq_domain_ops, max8997); + domain = irq_domain_create_linear(NULL, MAX8997_IRQ_NR, + &max8997_irq_domain_ops, max8997); if (!domain) { dev_err(max8997->dev, "could not create irq domain\n"); return -ENODEV; diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c index 83b6f510bc05..b0773fa6e07f 100644 --- a/drivers/mfd/max8998-irq.c +++ b/drivers/mfd/max8998-irq.c @@ -230,7 +230,7 @@ int max8998_irq_init(struct max8998_dev *max8998) max8998_write_reg(max8998->i2c, MAX8998_REG_STATUSM1, 0xff); max8998_write_reg(max8998->i2c, MAX8998_REG_STATUSM2, 0xff); - domain = irq_domain_add_simple(NULL, MAX8998_IRQ_NR, + domain = irq_domain_create_simple(NULL, MAX8998_IRQ_NR, max8998->irq_base, &max8998_irq_domain_ops, max8998); if (!domain) { dev_err(max8998->dev, "could not create irq domain\n"); diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c index 49830b526ee8..9f0bcc3ad7a1 100644 --- a/drivers/mfd/mt6358-irq.c +++ b/drivers/mfd/mt6358-irq.c @@ -272,9 +272,9 @@ int mt6358_irq_init(struct mt6397_chip *chip) irqd->pmic_ints[i].en_reg_shift * j, 0); } - chip->irq_domain = irq_domain_add_linear(chip->dev->of_node, - irqd->num_pmic_irqs, - &mt6358_irq_domain_ops, chip); + chip->irq_domain = irq_domain_create_linear(of_fwnode_handle(chip->dev->of_node), + irqd->num_pmic_irqs, + &mt6358_irq_domain_ops, chip); if (!chip->irq_domain) { dev_err(chip->dev, "Could not create IRQ domain\n"); return -ENODEV; diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c index 1310665200ed..badc614b4345 100644 --- a/drivers/mfd/mt6397-irq.c +++ b/drivers/mfd/mt6397-irq.c @@ -216,10 +216,8 @@ int mt6397_irq_init(struct mt6397_chip *chip) regmap_write(chip->regmap, chip->int_con[2], 0x0); chip->pm_nb.notifier_call = mt6397_irq_pm_notifier; - chip->irq_domain = irq_domain_add_linear(chip->dev->of_node, - MT6397_IRQ_NR, - &mt6397_irq_domain_ops, - chip); + chip->irq_domain = irq_domain_create_linear(of_fwnode_handle(chip->dev->of_node), + MT6397_IRQ_NR, &mt6397_irq_domain_ops, chip); if (!chip->irq_domain) { dev_err(chip->dev, "could not create irq domain\n"); return -ENOMEM; diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c index f9ebdf5845b8..c96ea6fbede8 100644 --- a/drivers/mfd/qcom-pm8xxx.c +++ b/drivers/mfd/qcom-pm8xxx.c @@ -559,10 +559,8 @@ static int pm8xxx_probe(struct platform_device *pdev) chip->pm_irq_data = data; spin_lock_init(&chip->pm_irq_lock); - chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node, - data->num_irqs, - &pm8xxx_irq_domain_ops, - chip); + chip->irqdomain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node), + data->num_irqs, &pm8xxx_irq_domain_ops, chip); if (!chip->irqdomain) return -ENODEV; diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c index f391c2ccaa72..823b1d29389e 100644 --- a/drivers/mfd/stmfx.c +++ b/drivers/mfd/stmfx.c @@ -269,7 +269,7 @@ static int stmfx_irq_init(struct i2c_client *client) u32 irqoutpin = 0, irqtrigger; int ret; - stmfx->irq_domain = irq_domain_add_simple(stmfx->dev->of_node, + stmfx->irq_domain = irq_domain_create_simple(of_fwnode_handle(stmfx->dev->of_node), STMFX_REG_IRQ_SRC_MAX, 0, &stmfx_irq_ops, stmfx); if (!stmfx->irq_domain) { diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 9c3cf58457a7..819d19dc9b4a 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -1219,8 +1219,8 @@ static int stmpe_irq_init(struct stmpe *stmpe, struct device_node *np) int base = 0; int num_irqs = stmpe->variant->num_irqs; - stmpe->domain = irq_domain_add_simple(np, num_irqs, base, - &stmpe_irq_ops, stmpe); + stmpe->domain = irq_domain_create_simple(of_fwnode_handle(np), num_irqs, + base, &stmpe_irq_ops, stmpe); if (!stmpe->domain) { dev_err(stmpe->dev, "Failed to create irqdomain\n"); return -ENOSYS; diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index ef953ee73145..2d4eb771e230 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -234,9 +234,9 @@ static const struct irq_domain_ops tc3589x_irq_ops = { static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np) { - tc3589x->domain = irq_domain_add_simple( - np, TC3589x_NR_INTERNAL_IRQS, 0, - &tc3589x_irq_ops, tc3589x); + tc3589x->domain = irq_domain_create_simple(of_fwnode_handle(np), + TC3589x_NR_INTERNAL_IRQS, 0, + &tc3589x_irq_ops, tc3589x); if (!tc3589x->domain) { dev_err(tc3589x->dev, "Failed to create irqdomain\n"); diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c index 029ecc32f078..4e9669d327b4 100644 --- a/drivers/mfd/tps65217.c +++ b/drivers/mfd/tps65217.c @@ -158,7 +158,7 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq) tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK, TPS65217_INT_MASK, TPS65217_PROTECT_NONE); - tps->irq_domain = irq_domain_add_linear(tps->dev->of_node, + tps->irq_domain = irq_domain_create_linear(of_fwnode_handle(tps->dev->of_node), TPS65217_NUM_IRQ, &tps65217_irq_domain_ops, tps); if (!tps->irq_domain) { dev_err(tps->dev, "Could not create IRQ domain\n"); diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index 82714899efb2..853c48286071 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -363,7 +363,7 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq, new_irq_base = 0; } - tps6586x->irq_domain = irq_domain_add_simple(tps6586x->dev->of_node, + tps6586x->irq_domain = irq_domain_create_simple(of_fwnode_handle(tps6586x->dev->of_node), irq_num, new_irq_base, &tps6586x_domain_ops, tps6586x); if (!tps6586x->irq_domain) { diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 87496c1cb8bc..232c2bfe8c18 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -691,8 +691,8 @@ int twl4030_init_irq(struct device *dev, int irq_num) return irq_base; } - irq_domain_add_legacy(node, nr_irqs, irq_base, 0, - &irq_domain_simple_ops, NULL); + irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0, + &irq_domain_simple_ops, NULL); irq_end = irq_base + TWL4030_CORE_NR_IRQS; diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 3c03681c124c..00b14cef1dfb 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c @@ -364,7 +364,6 @@ static const struct of_device_id twl6030_of_match[] __maybe_unused = { int twl6030_init_irq(struct device *dev, int irq_num) { - struct device_node *node = dev->of_node; int nr_irqs; int status; u8 mask[3]; @@ -412,8 +411,8 @@ int twl6030_init_irq(struct device *dev, int irq_num) twl6030_irq->irq_mapping_tbl = of_id->data; twl6030_irq->irq_domain = - irq_domain_add_linear(node, nr_irqs, - &twl6030_irq_domain_ops, twl6030_irq); + irq_domain_create_linear(of_fwnode_handle(dev->of_node), nr_irqs, + &twl6030_irq_domain_ops, twl6030_irq); if (!twl6030_irq->irq_domain) { dev_err(dev, "Can't add irq_domain\n"); return -ENOMEM; diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index f1f58e3149ae..b3883fa5dd9f 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -587,16 +587,13 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) } if (irq_base) - domain = irq_domain_add_legacy(wm831x->dev->of_node, - ARRAY_SIZE(wm831x_irqs), - irq_base, 0, - &wm831x_irq_domain_ops, - wm831x); + domain = irq_domain_create_legacy(of_fwnode_handle(wm831x->dev->of_node), + ARRAY_SIZE(wm831x_irqs), irq_base, 0, + &wm831x_irq_domain_ops, wm831x); else - domain = irq_domain_add_linear(wm831x->dev->of_node, - ARRAY_SIZE(wm831x_irqs), - &wm831x_irq_domain_ops, - wm831x); + domain = irq_domain_create_linear(of_fwnode_handle(wm831x->dev->of_node), + ARRAY_SIZE(wm831x_irqs), &wm831x_irq_domain_ops, + wm831x); if (!domain) { dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n"); diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c index 651a028bc519..1475b1ac6983 100644 --- a/drivers/mfd/wm8994-irq.c +++ b/drivers/mfd/wm8994-irq.c @@ -213,9 +213,7 @@ int wm8994_irq_init(struct wm8994 *wm8994) return ret; } - wm8994->edge_irq = irq_domain_add_linear(NULL, 1, - &wm8994_edge_irq_ops, - wm8994); + wm8994->edge_irq = irq_domain_create_linear(NULL, 1, &wm8994_edge_irq_ops, wm8994); ret = regmap_add_irq_chip(wm8994->regmap, irq_create_mapping(wm8994->edge_irq, diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c index 18fc1aaa5cdd..2b6778d8d166 100644 --- a/drivers/misc/cs5535-mfgpt.c +++ b/drivers/misc/cs5535-mfgpt.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/cs5535.h> #include <linux/slab.h> +#include <asm/msr.h> #define DRV_NAME "cs5535-mfgpt" diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c index 69ee4f39af2a..187c5bc91e31 100644 --- a/drivers/misc/hi6421v600-irq.c +++ b/drivers/misc/hi6421v600-irq.c @@ -254,8 +254,9 @@ static int hi6421v600_irq_probe(struct platform_device *pdev) if (!priv->irqs) return -ENOMEM; - priv->domain = irq_domain_add_simple(np, PMIC_IRQ_LIST_MAX, 0, - &hi6421v600_domain_ops, priv); + priv->domain = irq_domain_create_simple(of_fwnode_handle(np), + PMIC_IRQ_LIST_MAX, 0, + &hi6421v600_domain_ops, priv); if (!priv->domain) { dev_err(dev, "Failed to create IRQ domain\n"); return -ENODEV; diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 09b9ab15e499..a20d03fdd6a9 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/sizes.h> @@ -745,6 +746,29 @@ static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv } } +static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv) +{ + struct device *dev = mmc_dev(host->mmc); + int ret; + + /* + * This works around the design of the RK3576's power domains, which + * makes the PD_NVM power domain, which the sdhci controller on the + * RK3576 is in, never come back the same way once it's run-time + * suspended once. This can happen during early kernel boot if no driver + * is using either PD_NVM or its child power domain PD_SDGMAC for a + * short moment, leading to it being turned off to save power. By + * keeping it on, sdhci suspending won't lead to PD_NVM becoming a + * candidate for getting turned off. + */ + ret = dev_pm_genpd_rpm_always_on(dev, true); + if (ret && ret != -EOPNOTSUPP) + dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n", + ERR_PTR(ret)); + + dwcmshc_rk35xx_postinit(host, dwc_priv); +} + static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -1176,6 +1200,18 @@ static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = { .postinit = dwcmshc_rk35xx_postinit, }; +static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = { + .pdata = { + .ops = &sdhci_dwcmshc_rk35xx_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN, + }, + .init = dwcmshc_rk35xx_init, + .postinit = dwcmshc_rk3576_postinit, +}; + static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = { .pdata = { .ops = &sdhci_dwcmshc_th1520_ops, @@ -1275,6 +1311,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = { .data = &sdhci_dwcmshc_rk35xx_pdata, }, { + .compatible = "rockchip,rk3576-dwcmshc", + .data = &sdhci_dwcmshc_rk3576_pdata, + }, + { .compatible = "rockchip,rk3568-dwcmshc", .data = &sdhci_dwcmshc_rk35xx_pdata, }, diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index f75c31815ab0..73385ff4c0f3 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -155,6 +155,7 @@ struct sdhci_am654_data { u32 tuning_loop; #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) +#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1) }; struct window { @@ -166,6 +167,7 @@ struct window { struct sdhci_am654_driver_data { const struct sdhci_pltfm_data *pdata; u32 flags; + u32 quirks; #define IOMUX_PRESENT (1 << 0) #define FREQSEL_2_BIT (1 << 1) #define STRBSEL_4_BIT (1 << 2) @@ -356,6 +358,29 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, sdhci_set_clock(host, clock); } +static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + int ret; + + if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) && + ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = mmc_regulator_set_vqmmc(mmc, ios); + if (ret < 0) { + pr_err("%s: Switching to 1.8V signalling voltage failed,\n", + mmc_hostname(mmc)); + return -EIO; + } + } + return 0; + } + + return sdhci_start_signal_voltage_switch(mmc, ios); +} + static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg) { writeb(val, host->ioaddr + reg); @@ -650,6 +675,12 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = { .flags = IOMUX_PRESENT, }; +static const struct sdhci_am654_driver_data sdhci_am62_4bit_drvdata = { + .pdata = &sdhci_j721e_4bit_pdata, + .flags = IOMUX_PRESENT, + .quirks = SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA, +}; + static const struct soc_device_attribute sdhci_am654_devices[] = { { .family = "AM65X", .revision = "SR1.0", @@ -872,7 +903,7 @@ static const struct of_device_id sdhci_am654_of_match[] = { }, { .compatible = "ti,am62-sdhci", - .data = &sdhci_j721e_4bit_drvdata, + .data = &sdhci_am62_4bit_drvdata, }, { /* sentinel */ } }; @@ -906,6 +937,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); sdhci_am654 = sdhci_pltfm_priv(pltfm_host); sdhci_am654->flags = drvdata->flags; + sdhci_am654->quirks = drvdata->quirks; clk_xin = devm_clk_get(dev, "clk_xin"); if (IS_ERR(clk_xin)) { @@ -940,6 +972,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto err_pltfm_free; } + host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch; host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; pm_runtime_get_noresume(dev); diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 341318024a19..ec95d787001b 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -351,20 +351,20 @@ static int __init cs553x_init(void) return -ENXIO; /* If it doesn't have the CS553[56], abort */ - rdmsrl(MSR_DIVIL_GLD_CAP, val); + rdmsrq(MSR_DIVIL_GLD_CAP, val); val &= ~0xFFULL; if (val != CAP_CS5535 && val != CAP_CS5536) return -ENXIO; /* If it doesn't have the NAND controller enabled, abort */ - rdmsrl(MSR_DIVIL_BALL_OPTS, val); + rdmsrq(MSR_DIVIL_BALL_OPTS, val); if (val & PIN_OPT_IDE) { pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n"); return -ENXIO; } for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val); + rdmsrq(MSR_DIVIL_LBAR_FLSH0 + i, val); if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND)) err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF); diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index cf0d51805272..f6921368cd14 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/timer.h> +#include <net/netdev_queues.h> MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); @@ -410,10 +411,13 @@ struct kvaser_pciefd_can { void __iomem *reg_base; struct can_berr_counter bec; u8 cmd_seq; + u8 tx_max_count; + u8 tx_idx; + u8 ack_idx; int err_rep_cnt; - int echo_idx; + unsigned int completed_tx_pkts; + unsigned int completed_tx_bytes; spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ - spinlock_t echo_lock; /* Locks the message echo buffer */ struct timer_list bec_poll_timer; struct completion start_comp, flush_comp; }; @@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev) int ret; struct kvaser_pciefd_can *can = netdev_priv(netdev); + can->tx_idx = 0; + can->ack_idx = 0; + ret = open_candev(netdev); if (ret) return ret; @@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev) timer_delete(&can->bec_poll_timer); } can->can.state = CAN_STATE_STOPPED; + netdev_reset_queue(netdev); close_candev(netdev); return ret; } +static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can) +{ + return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx)); +} + static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, - struct kvaser_pciefd_can *can, + struct can_priv *can, u8 seq, struct sk_buff *skb) { struct canfd_frame *cf = (struct canfd_frame *)skb->data; int packet_size; - int seq = can->echo_idx; memset(p, 0, sizeof(*p)); - if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT) p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; if (cf->can_id & CAN_RTR_FLAG) @@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, } else { p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, - can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); + can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode)); } p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); @@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kvaser_pciefd_can *can = netdev_priv(netdev); - unsigned long irq_flags; struct kvaser_pciefd_tx_packet packet; + unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1); + unsigned int frame_len; int nr_words; - u8 count; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; + if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1)) + return NETDEV_TX_BUSY; - nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); + nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb); - spin_lock_irqsave(&can->echo_lock, irq_flags); /* Prepare and save echo skb in internal slot */ - can_put_echo_skb(skb, netdev, can->echo_idx, 0); - - /* Move echo index to the next slot */ - can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + WRITE_ONCE(can->can.echo_skb[seq], NULL); + frame_len = can_skb_get_frame_len(skb); + can_put_echo_skb(skb, netdev, seq, frame_len); + netdev_sent_queue(netdev, frame_len); + WRITE_ONCE(can->tx_idx, can->tx_idx + 1); /* Write header to fifo */ iowrite32(packet.header[0], @@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, KVASER_PCIEFD_KCAN_FIFO_LAST_REG); } - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); - /* No room for a new message, stop the queue until at least one - * successful transmit - */ - if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) - netif_stop_queue(netdev); - spin_unlock_irqrestore(&can->echo_lock, irq_flags); + netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1); return NETDEV_TX_OK; } @@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->kv_pcie = pcie; can->cmd_seq = 0; can->err_rep_cnt = 0; + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; can->bec.txerr = 0; can->bec.rxerr = 0; @@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) tx_nr_packets_max = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); can->can.clock.freq = pcie->freq; - can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); - can->echo_idx = 0; - spin_lock_init(&can->echo_lock); + can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count); spin_lock_init(&can->lock); can->can.bittiming_const = &kvaser_pciefd_bittiming_const; @@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_canfd_skb(priv->dev, &cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; } cf->len = can_fd_dlc2len(dlc); @@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; } can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); } @@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, priv->dev->stats.rx_packets++; kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - return netif_rx(skb); + netif_rx(skb); + + return 0; } static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, @@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, netdev_dbg(can->can.dev, "Packet was flushed\n"); } else { int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); - int len; - u8 count; + unsigned int len, frame_len = 0; struct sk_buff *skb; + if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1))) + return 0; skb = can->can.echo_skb[echo_idx]; - if (skb) - kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - len = can_get_echo_skb(can->can.dev, echo_idx, NULL); - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + if (!skb) + return 0; + kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); + len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len); - if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) - netif_wake_queue(can->can.dev); + /* Pairs with barrier in kvaser_pciefd_start_xmit() */ + smp_store_release(&can->ack_idx, can->ack_idx + 1); + can->completed_tx_pkts++; + can->completed_tx_bytes += frame_len; if (!one_shot_fail) { can->can.dev->stats.tx_bytes += len; @@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) { int pos = 0; int res = 0; + unsigned int i; do { res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); + /* Report ACKs in this buffer to BQL en masse for correct periods */ + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + if (!can->completed_tx_pkts) + continue; + netif_subqueue_completed_wake(can->can.dev, 0, + can->completed_tx_pkts, + can->completed_tx_bytes, + kvaser_pciefd_tx_avail(can), 1); + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; + } + return res; } -static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { + void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG; u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { kvaser_pciefd_read_buffer(pcie, 0); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */ + } - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { kvaser_pciefd_read_buffer(pcie, 1); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */ + } if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); - - iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - return irq; } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); - u32 srb_irq = 0; - u32 srb_release = 0; int i; if (!(pci_irq & irq_mask->all)) return IRQ_NONE; + iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + if (pci_irq & irq_mask->kcan_rx0) - srb_irq = kvaser_pciefd_receive_irq(pcie); + kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; - - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; - - if (srb_release) - iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); return IRQ_HANDLED; } @@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) } } +static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie) +{ + unsigned int i; + + /* Masking PCI_IRQ is insufficient as running ISR will unmask it */ + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); + for (i = 0; i < pcie->nr_channels; ++i) + iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); +} + static int kvaser_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; struct kvaser_pciefd *pcie; const struct kvaser_pciefd_irq_mask *irq_mask; - void __iomem *irq_en_base; pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); /* Enable PCI interrupts */ - irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); - iowrite32(irq_mask->all, irq_en_base); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); /* Ready the DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); @@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, return 0; err_free_irq: - /* Disable PCI interrupts */ - iowrite32(0, irq_en_base); + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); err_pci_free_irq_vectors: @@ -1844,35 +1875,26 @@ err_disable_pci: return ret; } -static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) -{ - int i; - - for (i = 0; i < pcie->nr_channels; i++) { - struct kvaser_pciefd_can *can = pcie->can[i]; - - if (can) { - iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); - unregister_candev(can->can.dev); - timer_delete(&can->bec_poll_timer); - kvaser_pciefd_pwm_stop(can); - free_candev(can->can.dev); - } - } -} - static void kvaser_pciefd_remove(struct pci_dev *pdev) { struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + unsigned int i; - kvaser_pciefd_remove_all_ctrls(pcie); + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; - /* Disable interrupts */ - iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); - iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + unregister_candev(can->can.dev); + timer_delete(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + } + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); pci_free_irq_vectors(pcie->pci); + + for (i = 0; i < pcie->nr_channels; ++i) + free_candev(pcie->can[i]->can.dev); + pci_iounmap(pdev, pcie->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index 24c6622d36bd..58ff2ec1d975 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>"); #define SLCAN_CMD_LEN 1 #define SLCAN_SFF_ID_LEN 3 #define SLCAN_EFF_ID_LEN 8 +#define SLCAN_DATA_LENGTH_LEN 1 +#define SLCAN_ERROR_LEN 1 #define SLCAN_STATE_LEN 1 #define SLCAN_STATE_BE_RXCNT_LEN 3 #define SLCAN_STATE_BE_TXCNT_LEN 3 -#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ - SLCAN_STATE_BE_RXCNT_LEN + \ - SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \ + SLCAN_STATE_LEN + \ + SLCAN_STATE_BE_RXCNT_LEN + \ + SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_ERROR_LEN + \ + SLCAN_DATA_LENGTH_LEN) +#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_SFF_ID_LEN + \ + SLCAN_DATA_LENGTH_LEN) struct slcan { struct can_priv can; @@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; + if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN) + return; + skb = alloc_can_skb(sl->dev, &cf); if (unlikely(!skb)) { sl->dev->stats.rx_dropped++; @@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl) return; } - if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) + if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN) return; cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; @@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl) bool rx_errors = false, tx_errors = false, rx_over_errors = false; int i, len; + if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN) + return; + /* get len from sanitized ASCII value */ len = cmd[1]; if (len >= '0' && len < '9') @@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl) static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ - if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 4) + if (!test_and_clear_bit(SLF_ERROR, &sl->flags)) slcan_bump(sl); sl->rcount = 0; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index f95a9aac56ee..d66da5cbbeb9 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2767,8 +2767,9 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq) kirq->dev = dev; kirq->masked = ~0; - kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0, - &ksz_irq_domain_ops, kirq); + kirq->domain = irq_domain_create_simple(of_fwnode_handle(dev->dev->of_node), + kirq->nirqs, 0, + &ksz_irq_domain_ops, kirq); if (!kirq->domain) return -ENOMEM; diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c index 22fb9ef4645c..992101e4bdee 100644 --- a/drivers/net/dsa/microchip/ksz_ptp.c +++ b/drivers/net/dsa/microchip/ksz_ptp.c @@ -1136,8 +1136,8 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p) init_completion(&port->tstamp_msg_comp); - ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs, - &ksz_ptp_irq_domain_ops, ptpirq); + ptpirq->domain = irq_domain_create_linear(of_fwnode_handle(dev->dev->of_node), + ptpirq->nirqs, &ksz_ptp_irq_domain_ops, ptpirq); if (!ptpirq->domain) return -ENOMEM; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 08db846cda8d..2281d6ab8c9a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -297,7 +297,7 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) u16 reg, mask; chip->g1_irq.nirqs = chip->info->g1_irqs; - chip->g1_irq.domain = irq_domain_add_simple( + chip->g1_irq.domain = irq_domain_create_simple( NULL, chip->g1_irq.nirqs, 0, &mv88e6xxx_g1_irq_domain_ops, chip); if (!chip->g1_irq.domain) diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index b2b5f6ba438f..aaf97c1e3167 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1154,8 +1154,10 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) if (err) return err; - chip->g2_irq.domain = irq_domain_add_simple( - chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip); + chip->g2_irq.domain = irq_domain_create_simple(of_fwnode_handle(chip->dev->of_node), + 16, 0, + &mv88e6xxx_g2_irq_domain_ops, + chip); if (!chip->g2_irq.domain) return -ENOMEM; diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index e9f2c67bc15f..79a29676ca6f 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -821,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv) return ret; } - priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops, - priv); + priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(np), 1, + &ar9331_sw_irqdomain_ops, priv); if (!priv->irqdomain) { dev_err(dev, "failed to create IRQ domain\n"); return -EINVAL; diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 7e96355c28bd..964a56ee16cc 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -1719,8 +1719,8 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv) goto out_put_node; } - priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports, - &rtl8365mb_irqdomain_ops, priv); + priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), priv->num_ports, + &rtl8365mb_irqdomain_ops, priv); if (!priv->irqdomain) { dev_err(priv->dev, "failed to add irq domain\n"); ret = -ENOMEM; diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index f54771cab56d..8bdb52b5fdcb 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -550,10 +550,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv) dev_err(priv->dev, "unable to request irq: %d\n", ret); goto out_put_node; } - priv->irqdomain = irq_domain_add_linear(intc, - RTL8366RB_NUM_INTERRUPT, - &rtl8366rb_irqdomain_ops, - priv); + priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), RTL8366RB_NUM_INTERRUPT, + &rtl8366rb_irqdomain_ops, priv); if (!priv->irqdomain) { dev_err(priv->dev, "failed to create IRQ domain\n"); ret = -EINVAL; diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index d748dc6de923..1e9ab65218ff 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -614,7 +614,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) struct airoha_queue_entry *e = &q->entry[q->tail]; struct airoha_qdma_desc *desc = &q->desc[q->tail]; u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); - dma_addr_t dma_addr = le32_to_cpu(desc->addr); struct page *page = virt_to_head_page(e->buf); u32 desc_ctrl = le32_to_cpu(desc->ctrl); struct airoha_gdm_port *port; @@ -623,22 +622,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) break; - if (!dma_addr) - break; - - len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); - if (!len) - break; - q->tail = (q->tail + 1) % q->ndesc; q->queued--; - dma_sync_single_for_cpu(eth->dev, dma_addr, + dma_sync_single_for_cpu(eth->dev, e->dma_addr, SKB_WITH_OVERHEAD(q->buf_size), dir); + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); data_len = q->skb ? q->buf_size : SKB_WITH_OVERHEAD(q->buf_size); - if (data_len < len) + if (!len || data_len < len) goto free_frag; p = airoha_qdma_get_gdm_port(eth, desc); @@ -701,9 +694,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) q->skb = NULL; continue; free_frag: - page_pool_put_full_page(q->page_pool, page, true); - dev_kfree_skb(q->skb); - q->skb = NULL; + if (q->skb) { + dev_kfree_skb(q->skb); + q->skb = NULL; + } else { + page_pool_put_full_page(q->page_pool, page, true); + } } airoha_qdma_fill_rx_queue(q); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index a8e930d5dbb0..7564705d6478 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -20,6 +20,7 @@ #include <asm/byteorder.h> #include <linux/bitmap.h> #include <linux/auxiliary_bus.h> +#include <net/netdev_lock.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -309,14 +310,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp) if (!ulp->msix_requested) return; - netdev_lock(bp->dev); - ops = rcu_dereference(ulp->ulp_ops); + ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev); if (!ops || !ops->ulp_irq_stop) return; if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) reset = true; ops->ulp_irq_stop(ulp->handle, reset); - netdev_unlock(bp->dev); } } @@ -335,8 +334,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) if (!ulp->msix_requested) return; - netdev_lock(bp->dev); - ops = rcu_dereference(ulp->ulp_ops); + ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev); if (!ops || !ops->ulp_irq_restart) return; @@ -348,7 +346,6 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) bnxt_fill_msix_vecs(bp, ent); } ops->ulp_irq_restart(ulp->handle, ent); - netdev_unlock(bp->dev); kfree(ent); } } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index a0bcfb5a713d..ff3295b60a69 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -61,6 +61,8 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) return -EBUSY; } + netif_device_detach(priv->netdev); + priv->reset_type = type; set_bit(HBG_NIC_STATE_RESETTING, &priv->state); clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); @@ -91,6 +93,8 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type) return ret; } + netif_device_attach(priv->netdev); + dev_info(&priv->pdev->dev, "reset done\n"); return ret; } @@ -117,16 +121,13 @@ void hbg_err_reset(struct hbg_priv *priv) if (running) dev_close(priv->netdev); - hbg_reset(priv); - - /* in hbg_pci_err_detected(), we will detach first, - * so we need to attach before open - */ - if (!netif_device_present(priv->netdev)) - netif_device_attach(priv->netdev); + if (hbg_reset(priv)) + goto err_unlock; if (running) dev_open(priv->netdev, NULL); + +err_unlock: rtnl_unlock(); } @@ -160,7 +161,6 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); hbg_err_reset(priv); - netif_device_attach(netdev); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c index 8f1107b85fbb..55520053270a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -317,6 +317,9 @@ static void hbg_update_stats_by_info(struct hbg_priv *priv, const struct hbg_ethtool_stats *stats; u32 i; + if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state)) + return; + for (i = 0; i < info_len; i++) { stats = &info[i]; if (!stats->reg) diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 22371011c249..2410aee59fb2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) */ if (!primary_lag) { lag->primary = true; + if (!ice_is_switchdev_running(lag->pf)) + return; + /* Configure primary's SWID to be shared */ ice_lag_primary_swid(lag, true); primary_lag = lag; } else { u16 swid; + if (!ice_is_switchdev_running(primary_lag->pf)) + return; + swid = primary_lag->pf->hw.port_info->sw_id; ice_lag_set_swid(swid, lag, true); ice_lag_add_prune_list(primary_lag, lag->pf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 7c3006eb68dd..6446d0fcc052 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -4275,7 +4275,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) } ice_vfhw_mac_add(vf, &al->list[i]); - vf->num_mac++; break; } diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index aef0e9775a33..70dbf80f3bb7 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -143,6 +143,7 @@ enum idpf_vport_state { * @vport_id: Vport identifier * @link_speed_mbps: Link speed in mbps * @vport_idx: Relative vport index + * @max_tx_hdr_size: Max header length hardware can support * @state: See enum idpf_vport_state * @netstats: Packet and byte stats * @stats_lock: Lock to protect stats update @@ -153,6 +154,7 @@ struct idpf_netdev_priv { u32 vport_id; u32 link_speed_mbps; u16 vport_idx; + u16 max_tx_hdr_size; enum idpf_vport_state state; struct rtnl_link_stats64 netstats; spinlock_t stats_lock; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 82f09b4030bc..3a033ce19cda 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -723,6 +723,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->vport = vport; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); vport->netdev = netdev; return idpf_init_mac_addr(vport, netdev); @@ -740,6 +741,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->adapter = adapter; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); spin_lock_init(&np->stats_lock); @@ -2203,8 +2205,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_adapter *adapter = vport->adapter; + struct idpf_netdev_priv *np = netdev_priv(netdev); + u16 max_tx_hdr_size = np->max_tx_hdr_size; size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -2227,7 +2229,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, goto unsupported; len = skb_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; if (!skb->encapsulation) @@ -2240,7 +2242,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, /* IPLEN can support at most 127 dwords */ len = skb_inner_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; /* No need to validate L4LEN as TCP is the only protocol with a diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index bdf52cef3891..2d5f5c9f91ce 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -4025,6 +4025,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) return budget; } + /* Switch to poll mode in the tear-down path after sending disable + * queues virtchnl message, as the interrupts will be disabled after + * that. + */ + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) + return budget; + work_done = min_t(int, work_done, budget - 1); /* Exit the polling mode, but don't re-enable interrupts if stack might @@ -4035,15 +4043,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) else idpf_vport_intr_set_wb_on_itr(q_vector); - /* Switch to poll mode in the tear-down path after sending disable - * queues virtchnl message, as the interrupts will be disabled after - * that - */ - if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, - q_vector->tx[0]))) - return budget; - else - return work_done; + return work_done; } /** diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7fa98aeb3663..4a3370a40dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index a1f9ec03c2ce..c827da626471 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); - index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 7ef3ba477d49..9b28be4c4a5d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -729,9 +729,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } #ifdef CONFIG_DCB - err = otx2_dcbnl_set_ops(netdev); - if (err) - goto err_free_zc_bmap; + /* Priority flow control is not supported for LBK and SDP vf(s) */ + if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) { + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_free_zc_bmap; + } #endif otx2_qos_init(vf, qos_txqs); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e2d6bfb5d693..a70b88037a20 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -3495,6 +3495,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, struct pci_dev *pdev) { struct lan743x_tx *tx; + u32 sgmii_ctl; int index; int ret; @@ -3507,6 +3508,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, spin_lock_init(&adapter->eth_syslock_spinlock); mutex_init(&adapter->sgmii_rw_lock); pci11x1x_set_rfe_rd_fifo_threshold(adapter); + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + if (adapter->is_sgmii_en) { + sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; + } else { + sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; + } + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); } else { adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; @@ -3558,7 +3568,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) { - u32 sgmii_ctl; int ret; adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); @@ -3570,10 +3579,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) adapter->mdiobus->priv = (void *)adapter; if (adapter->is_pci11x1x) { if (adapter->is_sgmii_en) { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "SGMII operation\n"); adapter->mdiobus->read = lan743x_mdiobus_read_c22; @@ -3584,10 +3589,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n"); } else { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "RGMII operation\n"); // Only C22 support when RGMII I/F diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 85723a78793a..6c7e8655a7eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY * address. No need to mask it again. */ - reg |= 1 << H3_EPHY_ADDR_SHIFT; + reg |= ret << H3_EPHY_ADDR_SHIFT; } else { /* For SoCs without internal PHY the PHY selection bit should be * set to 0 (external PHY). diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 1e6d2335293d..30665ffe78cf 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2685,7 +2685,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) port->slave.mac_addr); if (!is_valid_ether_addr(port->slave.mac_addr)) { eth_random_addr(port->slave.mac_addr); - dev_err(dev, "Use random MAC address\n"); + dev_info(dev, "Use random MAC address\n"); } } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index 8658a51ee810..f2c2bd257e39 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -184,8 +184,8 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) goto skip_sp_irq; txgbe->misc.nirqs = 1; - txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0, - &txgbe_misc_irq_domain_ops, txgbe); + txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0, + &txgbe_misc_irq_domain_ops, txgbe); if (!txgbe->misc.domain) return -ENOMEM; diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index d8fc0c79745d..b75ceb90359f 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -1778,8 +1778,8 @@ static void team_change_rx_flags(struct net_device *dev, int change) struct team_port *port; int inc; - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) { + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) { if (change & IFF_PROMISC) { inc = dev->flags & IFF_PROMISC ? 1 : -1; dev_set_promiscuity(port->dev, inc); @@ -1789,7 +1789,7 @@ static void team_change_rx_flags(struct net_device *dev, int change) dev_set_allmulti(port->dev, inc); } } - rcu_read_unlock(); + mutex_unlock(&team->lock); } static void team_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index e4f1663b6204..3e8025a71fcb 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2456,14 +2456,11 @@ static struct irq_chip lan78xx_irqchip = { static int lan78xx_setup_irq_domain(struct lan78xx_net *dev) { - struct device_node *of_node; struct irq_domain *irqdomain; unsigned int irqmap = 0; u32 buf; int ret = 0; - of_node = dev->udev->dev.parent->of_node; - mutex_init(&dev->domain_data.irq_lock); ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); @@ -2475,8 +2472,10 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev) dev->domain_data.irqchip = &lan78xx_irqchip; dev->domain_data.irq_handler = handle_simple_irq; - irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0, - &chip_domain_ops, &dev->domain_data); + irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node), + MAX_INT_EP, 0, + &chip_domain_ops, + &dev->domain_data); if (irqdomain) { /* create mapping for PHY interrupt */ irqmap = irq_create_mapping(irqdomain, INT_EP_PHY); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 3df6aabc7e33..2440e30c5bd1 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -27,6 +27,10 @@ #include <linux/module.h> #include <net/ip6_checksum.h> +#ifdef CONFIG_X86 +#include <asm/msr.h> +#endif + #include "vmxnet3_int.h" #include "vmxnet3_xdp.h" @@ -3607,8 +3611,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) struct vmxnet3_adapter *adapter = netdev_priv(netdev); int err = 0; - WRITE_ONCE(netdev->mtu, new_mtu); - /* * Reset_work may be in the middle of resetting the device, wait for its * completion. @@ -3622,6 +3624,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) /* we need to re-create the rx queue based on the new mtu */ vmxnet3_rq_destroy_all(adapter); + WRITE_ONCE(netdev->mtu, new_mtu); vmxnet3_adjust_rx_ring_size(adapter); err = vmxnet3_rq_create_all(adapter); if (err) { @@ -3638,6 +3641,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) "Closing it\n", err); goto out; } + } else { + WRITE_ONCE(netdev->mtu, new_mtu); } out: diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c index 6295e55ef85e..368f6d894bba 100644 --- a/drivers/ntb/msi.c +++ b/drivers/ntb/msi.c @@ -106,10 +106,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb) if (!ntb->msi) return -EINVAL; - msi_lock_descs(&ntb->pdev->dev); - desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED); - addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); - msi_unlock_descs(&ntb->pdev->dev); + scoped_guard (msi_descs_lock, &ntb->pdev->dev) { + desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED); + addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); + } for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { peer_widx = ntb_peer_highest_mw_idx(ntb, peer); @@ -289,7 +289,7 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, if (!ntb->msi) return -EINVAL; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) { if (irq_has_action(entry->irq)) continue; @@ -307,17 +307,11 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, ret = ntbm_msi_setup_callback(ntb, entry, msi_desc); if (ret) { devm_free_irq(&ntb->dev, entry->irq, dev_id); - goto unlock; + return ret; } - - ret = entry->irq; - goto unlock; + return entry->irq; } - ret = -ENODEV; - -unlock: - msi_unlock_descs(dev); - return ret; + return -ENODEV; } EXPORT_SYMBOL(ntbm_msi_request_threaded_irq); diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index 2c092ec8c0a9..3b6d759bcdf2 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -242,7 +242,7 @@ struct nvme_dhchap_key *nvme_auth_transform_key( { const char *hmac_name; struct crypto_shash *key_tfm; - struct shash_desc *shash; + SHASH_DESC_ON_STACK(shash, key_tfm); struct nvme_dhchap_key *transformed_key; int ret, key_len; @@ -267,19 +267,11 @@ struct nvme_dhchap_key *nvme_auth_transform_key( if (IS_ERR(key_tfm)) return ERR_CAST(key_tfm); - shash = kmalloc(sizeof(struct shash_desc) + - crypto_shash_descsize(key_tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto out_free_key; - } - key_len = crypto_shash_digestsize(key_tfm); transformed_key = nvme_auth_alloc_key(key_len, key->hash); if (!transformed_key) { ret = -ENOMEM; - goto out_free_shash; + goto out_free_key; } shash->tfm = key_tfm; @@ -299,15 +291,12 @@ struct nvme_dhchap_key *nvme_auth_transform_key( if (ret < 0) goto out_free_transformed_key; - kfree(shash); crypto_free_shash(key_tfm); return transformed_key; out_free_transformed_key: nvme_auth_free_key(transformed_key); -out_free_shash: - kfree(shash); out_free_key: crypto_free_shash(key_tfm); diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 6115fef74c1e..f6ddbe553289 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -31,6 +31,7 @@ struct nvme_dhchap_queue_context { u32 s1; u32 s2; bool bi_directional; + bool authenticated; u16 transaction; u8 status; u8 dhgroup_id; @@ -682,6 +683,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) { nvme_auth_reset_dhchap(chap); + chap->authenticated = false; if (chap->shash_tfm) crypto_free_shash(chap->shash_tfm); if (chap->dh_tfm) @@ -930,12 +932,14 @@ static void nvme_queue_auth_work(struct work_struct *work) } if (!ret) { chap->error = 0; + chap->authenticated = true; if (ctrl->opts->concat && (ret = nvme_auth_secure_concat(ctrl, chap))) { dev_warn(ctrl->device, "%s: qid %d failed to enable secure concatenation\n", __func__, chap->qid); chap->error = ret; + chap->authenticated = false; } return; } @@ -1023,13 +1027,16 @@ static void nvme_ctrl_auth_work(struct work_struct *work) return; for (q = 1; q < ctrl->queue_count; q++) { - ret = nvme_auth_negotiate(ctrl, q); - if (ret) { - dev_warn(ctrl->device, - "qid %d: error %d setting up authentication\n", - q, ret); - break; - } + struct nvme_dhchap_queue_context *chap = + &ctrl->dhchap_ctxs[q]; + /* + * Skip re-authentication if the queue had + * not been authenticated initially. + */ + if (!chap->authenticated) + continue; + cancel_work_sync(&chap->auth_work); + queue_work(nvme_auth_wq, &chap->auth_work); } /* @@ -1037,7 +1044,13 @@ static void nvme_ctrl_auth_work(struct work_struct *work) * the controller terminates the connection. */ for (q = 1; q < ctrl->queue_count; q++) { - ret = nvme_auth_wait(ctrl, q); + struct nvme_dhchap_queue_context *chap = + &ctrl->dhchap_ctxs[q]; + if (!chap->authenticated) + continue; + flush_work(&chap->auth_work); + ret = chap->error; + nvme_auth_reset_dhchap(chap); if (ret) dev_warn(ctrl->device, "qid %d: authentication failed\n", q); @@ -1076,6 +1089,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) chap = &ctrl->dhchap_ctxs[i]; chap->qid = i; chap->ctrl = ctrl; + chap->authenticated = false; INIT_WORK(&chap->auth_work, nvme_queue_auth_work); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6b04473c0ab7..f69a232a000a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -38,6 +38,8 @@ struct nvme_ns_info { u32 nsid; __le32 anagrpid; u8 pi_offset; + u16 endgid; + u64 runs; bool is_shared; bool is_readonly; bool is_ready; @@ -150,6 +152,8 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid); static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, struct nvme_command *cmd); +static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, + u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi); void nvme_queue_scan(struct nvme_ctrl *ctrl) { @@ -664,10 +668,11 @@ static void nvme_free_ns_head(struct kref *ref) struct nvme_ns_head *head = container_of(ref, struct nvme_ns_head, ref); - nvme_mpath_remove_disk(head); + nvme_mpath_put_disk(head); ida_free(&head->subsys->ns_ida, head->instance); cleanup_srcu_struct(&head->srcu); nvme_put_subsystem(head->subsys); + kfree(head->plids); kfree(head); } @@ -991,6 +996,18 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + if (op == nvme_cmd_write && ns->head->nr_plids) { + u16 write_stream = req->bio->bi_write_stream; + + if (WARN_ON_ONCE(write_stream > ns->head->nr_plids)) + return BLK_STS_INVAL; + + if (write_stream) { + dsmgmt |= ns->head->plids[write_stream - 1] << 16; + control |= NVME_RW_DTYPE_DPLCMT; + } + } + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) return BLK_STS_INVAL; @@ -1157,7 +1174,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, req->cmd_flags &= ~REQ_FAILFAST_DRIVER; if (buffer && bufflen) { - ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_KERNEL); if (ret) goto out; } @@ -1609,6 +1626,7 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; info->is_ready = true; + info->endgid = le16_to_cpu(id->endgid); if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { dev_info(ctrl->device, "Ignoring bogus Namespace Identifiers\n"); @@ -1649,6 +1667,7 @@ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, info->is_ready = id->nstat & NVME_NSTAT_NRDY; info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL; info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT; + info->endgid = le16_to_cpu(id->endgid); } kfree(id); return ret; @@ -1674,7 +1693,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result) + void *result) { return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, buflen, result); @@ -1683,7 +1702,7 @@ EXPORT_SYMBOL_GPL(nvme_set_features); int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result) + void *result) { return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, buflen, result); @@ -2167,6 +2186,148 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns, return ret; } +static int nvme_query_fdp_granularity(struct nvme_ctrl *ctrl, + struct nvme_ns_info *info, u8 fdp_idx) +{ + struct nvme_fdp_config_log hdr, *h; + struct nvme_fdp_config_desc *desc; + size_t size = sizeof(hdr); + void *log, *end; + int i, n, ret; + + ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0, + NVME_CSI_NVM, &hdr, size, 0, info->endgid); + if (ret) { + dev_warn(ctrl->device, + "FDP configs log header status:0x%x endgid:%d\n", ret, + info->endgid); + return ret; + } + + size = le32_to_cpu(hdr.sze); + if (size > PAGE_SIZE * MAX_ORDER_NR_PAGES) { + dev_warn(ctrl->device, "FDP config size too large:%zu\n", + size); + return 0; + } + + h = kvmalloc(size, GFP_KERNEL); + if (!h) + return -ENOMEM; + + ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0, + NVME_CSI_NVM, h, size, 0, info->endgid); + if (ret) { + dev_warn(ctrl->device, + "FDP configs log status:0x%x endgid:%d\n", ret, + info->endgid); + goto out; + } + + n = le16_to_cpu(h->numfdpc) + 1; + if (fdp_idx > n) { + dev_warn(ctrl->device, "FDP index:%d out of range:%d\n", + fdp_idx, n); + /* Proceed without registering FDP streams */ + ret = 0; + goto out; + } + + log = h + 1; + desc = log; + end = log + size - sizeof(*h); + for (i = 0; i < fdp_idx; i++) { + log += le16_to_cpu(desc->dsze); + desc = log; + if (log >= end) { + dev_warn(ctrl->device, + "FDP invalid config descriptor list\n"); + ret = 0; + goto out; + } + } + + if (le32_to_cpu(desc->nrg) > 1) { + dev_warn(ctrl->device, "FDP NRG > 1 not supported\n"); + ret = 0; + goto out; + } + + info->runs = le64_to_cpu(desc->runs); +out: + kvfree(h); + return ret; +} + +static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info) +{ + struct nvme_ns_head *head = ns->head; + struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_fdp_ruh_status *ruhs; + struct nvme_fdp_config fdp; + struct nvme_command c = {}; + size_t size; + int i, ret; + + /* + * The FDP configuration is static for the lifetime of the namespace, + * so return immediately if we've already registered this namespace's + * streams. + */ + if (head->nr_plids) + return 0; + + ret = nvme_get_features(ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0, + &fdp); + if (ret) { + dev_warn(ctrl->device, "FDP get feature status:0x%x\n", ret); + return ret; + } + + if (!(fdp.flags & FDPCFG_FDPE)) + return 0; + + ret = nvme_query_fdp_granularity(ctrl, info, fdp.fdpcidx); + if (!info->runs) + return ret; + + size = struct_size(ruhs, ruhsd, S8_MAX - 1); + ruhs = kzalloc(size, GFP_KERNEL); + if (!ruhs) + return -ENOMEM; + + c.imr.opcode = nvme_cmd_io_mgmt_recv; + c.imr.nsid = cpu_to_le32(head->ns_id); + c.imr.mo = NVME_IO_MGMT_RECV_MO_RUHS; + c.imr.numd = cpu_to_le32(nvme_bytes_to_numd(size)); + ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size); + if (ret) { + dev_warn(ctrl->device, "FDP io-mgmt status:0x%x\n", ret); + goto free; + } + + head->nr_plids = le16_to_cpu(ruhs->nruhsd); + if (!head->nr_plids) + goto free; + + head->plids = kcalloc(head->nr_plids, sizeof(*head->plids), + GFP_KERNEL); + if (!head->plids) { + dev_warn(ctrl->device, + "failed to allocate %u FDP placement IDs\n", + head->nr_plids); + head->nr_plids = 0; + ret = -ENOMEM; + goto free; + } + + for (i = 0; i < head->nr_plids; i++) + head->plids[i] = le16_to_cpu(ruhs->ruhsd[i].pid); +free: + kfree(ruhs); + return ret; +} + static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { @@ -2204,6 +2365,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, goto out; } + if (ns->ctrl->ctratt & NVME_CTRL_ATTR_FDPS) { + ret = nvme_query_fdp_info(ns, info); + if (ret < 0) + goto out; + } + lim = queue_limits_start_update(ns->disk->queue); memflags = blk_mq_freeze_queue(ns->disk->queue); @@ -2248,6 +2415,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, if (!nvme_init_integrity(ns->head, &lim, info)) capacity = 0; + lim.max_write_streams = ns->head->nr_plids; + if (lim.max_write_streams) + lim.write_stream_granularity = min(info->runs, U32_MAX); + else + lim.write_stream_granularity = 0; + ret = queue_limits_commit_update(ns->disk->queue, &lim); if (ret) { blk_mq_unfreeze_queue(ns->disk->queue, memflags); @@ -2351,6 +2524,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) ns->head->disk->flags |= GENHD_FL_HIDDEN; else nvme_init_integrity(ns->head, &lim, info); + lim.max_write_streams = ns_lim->max_write_streams; + lim.write_stream_granularity = ns_lim->write_stream_granularity; ret = queue_limits_commit_update(ns->head->disk->queue, &lim); set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); @@ -3108,8 +3283,8 @@ out_unlock: return ret; } -int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, - void *log, size_t size, u64 offset) +static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, + u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi) { struct nvme_command c = { }; u32 dwlen = nvme_bytes_to_numd(size); @@ -3123,10 +3298,18 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); c.get_log_page.csi = csi; + c.get_log_page.lsi = cpu_to_le16(lsi); return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); } +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, + void *log, size_t size, u64 offset) +{ + return nvme_get_log_lsi(ctrl, nsid, log_page, lsp, csi, log, size, + offset, 0); +} + static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, struct nvme_effects_log **log) { @@ -3584,7 +3767,7 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, */ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) continue; - if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) + if (nvme_tryget_ns_head(h)) return h; } @@ -3828,7 +4011,8 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) } } else { ret = -EINVAL; - if (!info->is_shared || !head->shared) { + if ((!info->is_shared || !head->shared) && + !list_empty(&head->list)) { dev_err(ctrl->device, "Duplicate unshared namespace %d\n", info->nsid); @@ -4032,7 +4216,8 @@ static void nvme_ns_remove(struct nvme_ns *ns) mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); if (list_empty(&ns->head->list)) { - list_del_init(&ns->head->entry); + if (!nvme_mpath_queue_if_no_path(ns->head)) + list_del_init(&ns->head->entry); last_path = true; } mutex_unlock(&ns->ctrl->subsys->lock); @@ -4053,7 +4238,7 @@ static void nvme_ns_remove(struct nvme_ns *ns) synchronize_srcu(&ns->ctrl->srcu); if (last_path) - nvme_mpath_shutdown_disk(ns->head); + nvme_mpath_remove_disk(ns->head); nvme_put_ns(ns); } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 2257c3c96dd2..fdafa3e9e66f 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1410,9 +1410,8 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) } static void -nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) +nvme_fc_xmt_ls_rsp_free(struct nvmefc_ls_rcv_op *lsop) { - struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; struct nvme_fc_rport *rport = lsop->rport; struct nvme_fc_lport *lport = rport->lport; unsigned long flags; @@ -1434,6 +1433,14 @@ nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) } static void +nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) +{ + struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; + + nvme_fc_xmt_ls_rsp_free(lsop); +} + +static void nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) { struct nvme_fc_rport *rport = lsop->rport; @@ -1450,7 +1457,7 @@ nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) dev_warn(lport->dev, "LLDD rejected LS RSP xmt: LS %d status %d\n", w0->ls_cmd, ret); - nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); + nvme_fc_xmt_ls_rsp_free(lsop); return; } } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index cf0ef4745564..878ea8b1a0ac 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -10,10 +10,61 @@ #include "nvme.h" bool multipath = true; -module_param(multipath, bool, 0444); +static bool multipath_always_on; + +static int multipath_param_set(const char *val, const struct kernel_param *kp) +{ + int ret; + bool *arg = kp->arg; + + ret = param_set_bool(val, kp); + if (ret) + return ret; + + if (multipath_always_on && !*arg) { + pr_err("Can't disable multipath when multipath_always_on is configured.\n"); + *arg = true; + return -EINVAL; + } + + return 0; +} + +static const struct kernel_param_ops multipath_param_ops = { + .set = multipath_param_set, + .get = param_get_bool, +}; + +module_param_cb(multipath, &multipath_param_ops, &multipath, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); +static int multipath_always_on_set(const char *val, + const struct kernel_param *kp) +{ + int ret; + bool *arg = kp->arg; + + ret = param_set_bool(val, kp); + if (ret < 0) + return ret; + + if (*arg) + multipath = true; + + return 0; +} + +static const struct kernel_param_ops multipath_always_on_ops = { + .set = multipath_always_on_set, + .get = param_get_bool, +}; + +module_param_cb(multipath_always_on, &multipath_always_on_ops, + &multipath_always_on, 0444); +MODULE_PARM_DESC(multipath_always_on, + "create multipath node always except for private namespace with non-unique nsid; note that this also implicitly enables native multipath support"); + static const char *nvme_iopolicy_names[] = { [NVME_IOPOLICY_NUMA] = "numa", [NVME_IOPOLICY_RR] = "round-robin", @@ -442,7 +493,17 @@ static bool nvme_available_path(struct nvme_ns_head *head) break; } } - return false; + + /* + * If "head->delayed_removal_secs" is configured (i.e., non-zero), do + * not immediately fail I/O. Instead, requeue the I/O for the configured + * duration, anticipating that if there's a transient link failure then + * it may recover within this time window. This parameter is exported to + * userspace via sysfs, and its default value is zero. It is internally + * mapped to NVME_NSHEAD_QUEUE_IF_NO_PATH. When delayed_removal_secs is + * non-zero, this flag is set to true. When zero, the flag is cleared. + */ + return nvme_mpath_queue_if_no_path(head); } static void nvme_ns_head_submit_bio(struct bio *bio) @@ -617,6 +678,40 @@ static void nvme_requeue_work(struct work_struct *work) } } +static void nvme_remove_head(struct nvme_ns_head *head) +{ + if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + kblockd_schedule_work(&head->requeue_work); + + nvme_cdev_del(&head->cdev, &head->cdev_device); + synchronize_srcu(&head->srcu); + del_gendisk(head->disk); + nvme_put_ns_head(head); + } +} + +static void nvme_remove_head_work(struct work_struct *work) +{ + struct nvme_ns_head *head = container_of(to_delayed_work(work), + struct nvme_ns_head, remove_work); + bool remove = false; + + mutex_lock(&head->subsys->lock); + if (list_empty(&head->list)) { + list_del_init(&head->entry); + remove = true; + } + mutex_unlock(&head->subsys->lock); + if (remove) + nvme_remove_head(head); + + module_put(THIS_MODULE); +} + int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { struct queue_limits lim; @@ -626,14 +721,25 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); + INIT_DELAYED_WORK(&head->remove_work, nvme_remove_head_work); + head->delayed_removal_secs = 0; /* - * Add a multipath node if the subsystems supports multiple controllers. - * We also do this for private namespaces as the namespace sharing flag - * could change after a rescan. + * If "multipath_always_on" is enabled, a multipath node is added + * regardless of whether the disk is single/multi ported, and whether + * the namespace is shared or private. If "multipath_always_on" is not + * enabled, a multipath node is added only if the subsystem supports + * multiple controllers and the "multipath" option is configured. In + * either case, for private namespaces, we ensure that the NSID is + * unique. */ - if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || - !nvme_is_unique_nsid(ctrl, head) || !multipath) + if (!multipath_always_on) { + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + !multipath) + return 0; + } + + if (!nvme_is_unique_nsid(ctrl, head)) return 0; blk_set_stacking_limits(&lim); @@ -660,6 +766,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); + nvme_tryget_ns_head(head); return 0; } @@ -1016,6 +1123,49 @@ static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr } DEVICE_ATTR_RO(numa_nodes); +static ssize_t delayed_removal_secs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct nvme_ns_head *head = disk->private_data; + int ret; + + mutex_lock(&head->subsys->lock); + ret = sysfs_emit(buf, "%u\n", head->delayed_removal_secs); + mutex_unlock(&head->subsys->lock); + return ret; +} + +static ssize_t delayed_removal_secs_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct gendisk *disk = dev_to_disk(dev); + struct nvme_ns_head *head = disk->private_data; + unsigned int sec; + int ret; + + ret = kstrtouint(buf, 0, &sec); + if (ret < 0) + return ret; + + mutex_lock(&head->subsys->lock); + head->delayed_removal_secs = sec; + if (sec) + set_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); + else + clear_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); + mutex_unlock(&head->subsys->lock); + /* + * Ensure that update to NVME_NSHEAD_QUEUE_IF_NO_PATH is seen + * by its reader. + */ + synchronize_srcu(&head->srcu); + + return count; +} + +DEVICE_ATTR_RW(delayed_removal_secs); + static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { @@ -1137,23 +1287,43 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) #endif } -void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +void nvme_mpath_remove_disk(struct nvme_ns_head *head) { - if (!head->disk) - return; - if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { - nvme_cdev_del(&head->cdev, &head->cdev_device); + bool remove = false; + + mutex_lock(&head->subsys->lock); + /* + * We are called when all paths have been removed, and at that point + * head->list is expected to be empty. However, nvme_remove_ns() and + * nvme_init_ns_head() can run concurrently and so if head->delayed_ + * removal_secs is configured, it is possible that by the time we reach + * this point, head->list may no longer be empty. Therefore, we recheck + * head->list here. If it is no longer empty then we skip enqueuing the + * delayed head removal work. + */ + if (!list_empty(&head->list)) + goto out; + + if (head->delayed_removal_secs) { /* - * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared - * to allow multipath to fail all I/O. + * Ensure that no one could remove this module while the head + * remove work is pending. */ - synchronize_srcu(&head->srcu); - kblockd_schedule_work(&head->requeue_work); - del_gendisk(head->disk); + if (!try_module_get(THIS_MODULE)) + goto out; + queue_delayed_work(nvme_wq, &head->remove_work, + head->delayed_removal_secs * HZ); + } else { + list_del_init(&head->entry); + remove = true; } +out: + mutex_unlock(&head->subsys->lock); + if (remove) + nvme_remove_head(head); } -void nvme_mpath_remove_disk(struct nvme_ns_head *head) +void nvme_mpath_put_disk(struct nvme_ns_head *head) { if (!head->disk) return; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 8fc4683418a3..ad0c1f834f09 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -497,6 +497,9 @@ struct nvme_ns_head { struct device cdev_device; struct gendisk *disk; + + u16 nr_plids; + u16 *plids; #ifdef CONFIG_NVME_MULTIPATH struct bio_list requeue_list; spinlock_t requeue_lock; @@ -504,7 +507,10 @@ struct nvme_ns_head { struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; -#define NVME_NSHEAD_DISK_LIVE 0 + struct delayed_work remove_work; + unsigned int delayed_removal_secs; +#define NVME_NSHEAD_DISK_LIVE 0 +#define NVME_NSHEAD_QUEUE_IF_NO_PATH 1 struct nvme_ns __rcu *current_path[]; #endif }; @@ -897,10 +903,10 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int qid, nvme_submit_flags_t flags); int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result); + void *result); int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result); + void *result); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); @@ -961,7 +967,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns); void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns); void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid); -void nvme_mpath_remove_disk(struct nvme_ns_head *head); +void nvme_mpath_put_disk(struct nvme_ns_head *head); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); void nvme_mpath_update(struct nvme_ctrl *ctrl); @@ -970,7 +976,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_revalidate_paths(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); -void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); +void nvme_mpath_remove_disk(struct nvme_ns_head *head); void nvme_mpath_start_request(struct request *rq); void nvme_mpath_end_request(struct request *rq); @@ -987,12 +993,19 @@ extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute dev_attr_queue_depth; extern struct device_attribute dev_attr_numa_nodes; +extern struct device_attribute dev_attr_delayed_removal_secs; extern struct device_attribute subsys_attr_iopolicy; static inline bool nvme_disk_is_ns_head(struct gendisk *disk) { return disk->fops == &nvme_ns_head_ops; } +static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head) +{ + if (test_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags)) + return true; + return false; +} #else #define multipath false static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) @@ -1013,7 +1026,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) { } -static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) +static inline void nvme_mpath_put_disk(struct nvme_ns_head *head) { } static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns) @@ -1032,7 +1045,7 @@ static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { } -static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) { } static inline void nvme_trace_bio_complete(struct request *req) @@ -1080,6 +1093,10 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) { return false; } +static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head) +{ + return false; +} #endif /* CONFIG_NVME_MULTIPATH */ int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f1dd804151b1..e0bfe04a2bc2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/nodemask.h> #include <linux/once.h> #include <linux/pci.h> #include <linux/suspend.h> @@ -34,16 +35,31 @@ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) -#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +/* Optimisation for I/Os between 4k and 128k */ +#define NVME_SMALL_POOL_SIZE 256 /* * These can be higher, but we need to ensure that any command doesn't * require an sg allocation that needs more than a page of data. */ #define NVME_MAX_KB_SZ 8192 -#define NVME_MAX_SEGS 128 -#define NVME_MAX_META_SEGS 15 -#define NVME_MAX_NR_ALLOCATIONS 5 +#define NVME_MAX_NR_DESCRIPTORS 5 + +/* + * For data SGLs we support a single descriptors worth of SGL entries, but for + * now we also limit it to avoid an allocation larger than PAGE_SIZE for the + * scatterlist. + */ +#define NVME_MAX_SEGS \ + min(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc), \ + (PAGE_SIZE / sizeof(struct scatterlist))) + +/* + * For metadata SGLs, only the small descriptor is supported, and the first + * entry is the segment descriptor, which for the data pointer sits in the SQE. + */ +#define NVME_MAX_META_SEGS \ + ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1) static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); @@ -112,6 +128,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); static void nvme_delete_io_queues(struct nvme_dev *dev); static void nvme_update_attrs(struct nvme_dev *dev); +struct nvme_descriptor_pools { + struct dma_pool *large; + struct dma_pool *small; +}; + /* * Represents an NVM Express device. Each nvme_dev is a PCI function. */ @@ -121,8 +142,6 @@ struct nvme_dev { struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; struct device *dev; - struct dma_pool *prp_page_pool; - struct dma_pool *prp_small_pool; unsigned online_queues; unsigned max_qid; unsigned io_queues[HCTX_MAX_TYPES]; @@ -162,6 +181,7 @@ struct nvme_dev { unsigned int nr_allocated_queues; unsigned int nr_write_queues; unsigned int nr_poll_queues; + struct nvme_descriptor_pools descriptor_pools[]; }; static int io_queue_depth_set(const char *val, const struct kernel_param *kp) @@ -191,6 +211,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) */ struct nvme_queue { struct nvme_dev *dev; + struct nvme_descriptor_pools descriptor_pools; spinlock_t sq_lock; void *sq_cmds; /* only used for poll queues: */ @@ -219,30 +240,30 @@ struct nvme_queue { struct completion delete_done; }; -union nvme_descriptor { - struct nvme_sgl_desc *sg_list; - __le64 *prp_list; +/* bits for iod->flags */ +enum nvme_iod_flags { + /* this command has been aborted by the timeout handler */ + IOD_ABORTED = 1U << 0, + + /* uses the small descriptor pool */ + IOD_SMALL_DESCRIPTOR = 1U << 1, }; /* * The nvme_iod describes the data in an I/O. - * - * The sg pointer contains the list of PRP/SGL chunk allocations in addition - * to the actual struct scatterlist. */ struct nvme_iod { struct nvme_request req; struct nvme_command cmd; - bool aborted; - s8 nr_allocations; /* PRP list pool allocations. 0 means small - pool in use */ + u8 flags; + u8 nr_descriptors; unsigned int dma_len; /* length of single DMA segment mapping */ dma_addr_t first_dma; dma_addr_t meta_dma; struct sg_table sgt; struct sg_table meta_sgt; - union nvme_descriptor meta_list; - union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; + struct nvme_sgl_desc *meta_descriptor; + void *descriptors[NVME_MAX_NR_DESCRIPTORS]; }; static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) @@ -397,30 +418,78 @@ static __always_inline int nvme_pci_npages_prp(void) return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); } -static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) +static struct nvme_descriptor_pools * +nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node) { - struct nvme_dev *dev = to_nvme_dev(data); - struct nvme_queue *nvmeq = &dev->queues[0]; + struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node]; + size_t small_align = NVME_SMALL_POOL_SIZE; - WARN_ON(hctx_idx != 0); - WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); + if (pools->small) + return pools; /* already initialized */ - hctx->driver_data = nvmeq; - return 0; + pools->large = dma_pool_create_node("nvme descriptor page", dev->dev, + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node); + if (!pools->large) + return ERR_PTR(-ENOMEM); + + if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) + small_align = 512; + + pools->small = dma_pool_create_node("nvme descriptor small", dev->dev, + NVME_SMALL_POOL_SIZE, small_align, 0, numa_node); + if (!pools->small) { + dma_pool_destroy(pools->large); + pools->large = NULL; + return ERR_PTR(-ENOMEM); + } + + return pools; } -static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) +static void nvme_release_descriptor_pools(struct nvme_dev *dev) +{ + unsigned i; + + for (i = 0; i < nr_node_ids; i++) { + struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i]; + + dma_pool_destroy(pools->large); + dma_pool_destroy(pools->small); + } +} + +static int nvme_init_hctx_common(struct blk_mq_hw_ctx *hctx, void *data, + unsigned qid) { struct nvme_dev *dev = to_nvme_dev(data); - struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; + struct nvme_queue *nvmeq = &dev->queues[qid]; + struct nvme_descriptor_pools *pools; + struct blk_mq_tags *tags; + + tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0]; + WARN_ON(tags != hctx->tags); + pools = nvme_setup_descriptor_pools(dev, hctx->numa_node); + if (IS_ERR(pools)) + return PTR_ERR(pools); - WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); + nvmeq->descriptor_pools = *pools; hctx->driver_data = nvmeq; return 0; } +static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + WARN_ON(hctx_idx != 0); + return nvme_init_hctx_common(hctx, data, 0); +} + +static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + return nvme_init_hctx_common(hctx, data, hctx_idx + 1); +} + static int nvme_pci_init_request(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) @@ -537,23 +606,39 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, return true; } -static void nvme_free_prps(struct nvme_dev *dev, struct request *req) +static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq, + struct nvme_iod *iod) +{ + if (iod->flags & IOD_SMALL_DESCRIPTOR) + return nvmeq->descriptor_pools.small; + return nvmeq->descriptor_pools.large; +} + +static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req) { const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_addr_t dma_addr = iod->first_dma; int i; - for (i = 0; i < iod->nr_allocations; i++) { - __le64 *prp_list = iod->list[i].prp_list; + if (iod->nr_descriptors == 1) { + dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0], + dma_addr); + return; + } + + for (i = 0; i < iod->nr_descriptors; i++) { + __le64 *prp_list = iod->descriptors[i]; dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); - dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); + dma_pool_free(nvmeq->descriptor_pools.large, prp_list, + dma_addr); dma_addr = next_dma_addr; } } -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq, + struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -566,15 +651,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) WARN_ON_ONCE(!iod->sgt.nents); dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); - - if (iod->nr_allocations == 0) - dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, - iod->first_dma); - else if (iod->nr_allocations == 1) - dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, - iod->first_dma); - else - nvme_free_prps(dev, req); + nvme_free_descriptors(nvmeq, req); mempool_free(iod->sgt.sgl, dev->iod_mempool); } @@ -592,11 +669,10 @@ static void nvme_print_sgl(struct scatterlist *sgl, int nents) } } -static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, +static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq, struct request *req, struct nvme_rw_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct dma_pool *pool; int length = blk_rq_payload_bytes(req); struct scatterlist *sg = iod->sgt.sgl; int dma_len = sg_dma_len(sg); @@ -604,7 +680,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); __le64 *prp_list; dma_addr_t prp_dma; - int nprps, i; + int i; length -= (NVME_CTRL_PAGE_SIZE - offset); if (length <= 0) { @@ -626,30 +702,26 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, goto done; } - nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); - if (nprps <= (256 / 8)) { - pool = dev->prp_small_pool; - iod->nr_allocations = 0; - } else { - pool = dev->prp_page_pool; - iod->nr_allocations = 1; - } + if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <= + NVME_SMALL_POOL_SIZE / sizeof(__le64)) + iod->flags |= IOD_SMALL_DESCRIPTOR; - prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); - if (!prp_list) { - iod->nr_allocations = -1; + prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, + &prp_dma); + if (!prp_list) return BLK_STS_RESOURCE; - } - iod->list[0].prp_list = prp_list; + iod->descriptors[iod->nr_descriptors++] = prp_list; iod->first_dma = prp_dma; i = 0; for (;;) { if (i == NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list = prp_list; - prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + + prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large, + GFP_ATOMIC, &prp_dma); if (!prp_list) goto free_prps; - iod->list[iod->nr_allocations++].prp_list = prp_list; + iod->descriptors[iod->nr_descriptors++] = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); i = 1; @@ -673,7 +745,7 @@ done: cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); return BLK_STS_OK; free_prps: - nvme_free_prps(dev, req); + nvme_free_descriptors(nvmeq, req); return BLK_STS_RESOURCE; bad_sgl: WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), @@ -698,11 +770,10 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } -static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, +static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq, struct request *req, struct nvme_rw_command *cmd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct dma_pool *pool; struct nvme_sgl_desc *sg_list; struct scatterlist *sg = iod->sgt.sgl; unsigned int entries = iod->sgt.nents; @@ -717,21 +788,14 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, return BLK_STS_OK; } - if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { - pool = dev->prp_small_pool; - iod->nr_allocations = 0; - } else { - pool = dev->prp_page_pool; - iod->nr_allocations = 1; - } + if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list)) + iod->flags |= IOD_SMALL_DESCRIPTOR; - sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); - if (!sg_list) { - iod->nr_allocations = -1; + sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, + &sgl_dma); + if (!sg_list) return BLK_STS_RESOURCE; - } - - iod->list[0].sg_list = sg_list; + iod->descriptors[iod->nr_descriptors++] = sg_list; iod->first_dma = sgl_dma; nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); @@ -785,12 +849,12 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret = BLK_STS_RESOURCE; int rc; if (blk_rq_nr_phys_segments(req) == 1) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct bio_vec bv = req_bvec(req); if (!is_pci_p2pdma_page(bv.bv_page)) { @@ -825,9 +889,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, } if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) - ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); + ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw); else - ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); + ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw); if (ret != BLK_STS_OK) goto out_unmap_sg; return BLK_STS_OK; @@ -842,6 +906,7 @@ out_free_sg: static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, struct request *req) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_rw_command *cmnd = &iod->cmd.rw; struct nvme_sgl_desc *sg_list; @@ -865,12 +930,13 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, if (rc) goto out_free_sg; - sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma); + sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC, + &sgl_dma); if (!sg_list) goto out_unmap_sg; entries = iod->meta_sgt.nents; - iod->meta_list.sg_list = sg_list; + iod->meta_descriptor = sg_list; iod->meta_dma = sgl_dma; cmnd->flags = NVME_CMD_SGL_METASEG; @@ -912,7 +978,10 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev, static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req) { - if (nvme_pci_metadata_use_sgls(dev, req)) + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && + nvme_pci_metadata_use_sgls(dev, req)) return nvme_pci_setup_meta_sgls(dev, req); return nvme_pci_setup_meta_mptr(dev, req); } @@ -922,8 +991,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; - iod->aborted = false; - iod->nr_allocations = -1; + iod->flags = 0; + iod->nr_descriptors = 0; iod->sgt.nents = 0; iod->meta_sgt.nents = 0; @@ -947,7 +1016,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) return BLK_STS_OK; out_unmap_data: if (blk_rq_nr_phys_segments(req)) - nvme_unmap_data(dev, req); + nvme_unmap_data(dev, req->mq_hctx->driver_data, req); out_free_cmd: nvme_cleanup_cmd(req); return ret; @@ -1037,6 +1106,7 @@ static void nvme_queue_rqs(struct rq_list *rqlist) } static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, + struct nvme_queue *nvmeq, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -1048,8 +1118,8 @@ static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, return; } - dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list, - iod->meta_dma); + dma_pool_free(nvmeq->descriptor_pools.small, iod->meta_descriptor, + iod->meta_dma); dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); } @@ -1060,10 +1130,10 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req) struct nvme_dev *dev = nvmeq->dev; if (blk_integrity_rq(req)) - nvme_unmap_metadata(dev, req); + nvme_unmap_metadata(dev, nvmeq, req); if (blk_rq_nr_phys_segments(req)) - nvme_unmap_data(dev, req); + nvme_unmap_data(dev, nvmeq, req); } static void nvme_pci_complete_rq(struct request *req) @@ -1490,7 +1560,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * returned to the driver, or if this is the admin queue. */ opcode = nvme_req(req)->cmd->common.opcode; - if (!nvmeq->qid || iod->aborted) { + if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) { dev_warn(dev->ctrl.device, "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", req->tag, nvme_cid(req), opcode, @@ -1503,7 +1573,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } - iod->aborted = true; + iod->flags |= IOD_ABORTED; cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = nvme_cid(req); @@ -2842,35 +2912,6 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) return 0; } -static int nvme_setup_prp_pools(struct nvme_dev *dev) -{ - size_t small_align = 256; - - dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, - NVME_CTRL_PAGE_SIZE, - NVME_CTRL_PAGE_SIZE, 0); - if (!dev->prp_page_pool) - return -ENOMEM; - - if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) - small_align = 512; - - /* Optimisation for I/Os between 4k and 128k */ - dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, - 256, small_align, 0); - if (!dev->prp_small_pool) { - dma_pool_destroy(dev->prp_page_pool); - return -ENOMEM; - } - return 0; -} - -static void nvme_release_prp_pools(struct nvme_dev *dev) -{ - dma_pool_destroy(dev->prp_page_pool); - dma_pool_destroy(dev->prp_small_pool); -} - static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) { size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1); @@ -3185,7 +3226,8 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, struct nvme_dev *dev; int ret = -ENOMEM; - dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); + dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids), + GFP_KERNEL, node); if (!dev) return ERR_PTR(-ENOMEM); INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); @@ -3260,13 +3302,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto out_uninit_ctrl; - result = nvme_setup_prp_pools(dev); - if (result) - goto out_dev_unmap; - result = nvme_pci_alloc_iod_mempool(dev); if (result) - goto out_release_prp_pools; + goto out_dev_unmap; dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); @@ -3342,8 +3380,6 @@ out_disable: out_release_iod_mempool: mempool_destroy(dev->iod_mempool); mempool_destroy(dev->iod_meta_mempool); -out_release_prp_pools: - nvme_release_prp_pools(dev); out_dev_unmap: nvme_dev_unmap(dev); out_uninit_ctrl: @@ -3408,7 +3444,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_free_queues(dev, 0); mempool_destroy(dev->iod_mempool); mempool_destroy(dev->iod_meta_mempool); - nvme_release_prp_pools(dev); + nvme_release_descriptor_pools(dev); nvme_dev_unmap(dev); nvme_uninit_ctrl(&dev->ctrl); } @@ -3809,9 +3845,7 @@ static int __init nvme_init(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); - BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); - BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); - BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); + BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS); return pci_register_driver(&nvme_driver); } diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index 6d31226f7a4f..29430949ce2f 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -260,6 +260,7 @@ static struct attribute *nvme_ns_attrs[] = { &dev_attr_ana_state.attr, &dev_attr_queue_depth.attr, &dev_attr_numa_nodes.attr, + &dev_attr_delayed_removal_secs.attr, #endif &dev_attr_io_passthru_err_log_enabled.attr, NULL, @@ -296,6 +297,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, if (nvme_disk_is_ns_head(dev_to_disk(dev))) return 0; } + if (a == &dev_attr_delayed_removal_secs.attr) { + struct gendisk *disk = dev_to_disk(dev); + + if (!nvme_disk_is_ns_head(disk)) + return 0; + } #endif return a->mode; } @@ -306,13 +313,41 @@ static const struct attribute_group nvme_ns_attr_group = { }; #ifdef CONFIG_NVME_MULTIPATH +/* + * NOTE: The dummy attribute does not appear in sysfs. It exists solely to allow + * control over the visibility of the multipath sysfs node. Without at least one + * attribute defined in nvme_ns_mpath_attrs[], the sysfs implementation does not + * invoke the multipath_sysfs_group_visible() method. As a result, we would not + * be able to control the visibility of the multipath sysfs node. + */ +static struct attribute dummy_attr = { + .name = "dummy", +}; + static struct attribute *nvme_ns_mpath_attrs[] = { + &dummy_attr, NULL, }; +static bool multipath_sysfs_group_visible(struct kobject *kobj) +{ + struct device *dev = container_of(kobj, struct device, kobj); + + return nvme_disk_is_ns_head(dev_to_disk(dev)); +} + +static bool multipath_sysfs_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + return false; +} + +DEFINE_SYSFS_GROUP_VISIBLE(multipath_sysfs) + const struct attribute_group nvme_ns_mpath_attr_group = { .name = "multipath", .attrs = nvme_ns_mpath_attrs, + .is_visible = SYSFS_GROUP_VISIBLE(multipath_sysfs), }; #endif diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index aba365f97cf6..853bc67d045c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -403,7 +403,7 @@ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) } static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, - bool sync, bool last) + bool last) { struct nvme_tcp_queue *queue = req->queue; bool empty; @@ -417,7 +417,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, * are on the same cpu, so we don't introduce contention. */ if (queue->io_cpu == raw_smp_processor_id() && - sync && empty && mutex_trylock(&queue->send_mutex)) { + empty && mutex_trylock(&queue->send_mutex)) { nvme_tcp_send_all(queue); mutex_unlock(&queue->send_mutex); } @@ -770,7 +770,9 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, req->ttag = pdu->ttag; nvme_tcp_setup_h2c_data_pdu(req); - nvme_tcp_queue_request(req, false, true); + + llist_add(&req->lentry, &queue->req_list); + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); return 0; } @@ -2385,7 +2387,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) if (ret) return ret; - if (ctrl->opts && ctrl->opts->concat && !ctrl->tls_pskid) { + if (ctrl->opts->concat && !ctrl->tls_pskid) { /* See comments for nvme_tcp_key_revoke_needed() */ dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n"); nvme_stop_keep_alive(ctrl); @@ -2637,7 +2639,7 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) ctrl->async_req.curr_bio = NULL; ctrl->async_req.data_len = 0; - nvme_tcp_queue_request(&ctrl->async_req, true, true); + nvme_tcp_queue_request(&ctrl->async_req, true); } static void nvme_tcp_complete_timed_out(struct request *rq) @@ -2789,7 +2791,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_start_request(rq); - nvme_tcp_queue_request(req, true, bd->last); + nvme_tcp_queue_request(req, bd->last); return BLK_STS_OK; } diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index acc138bbf8f2..c7317299078d 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -63,14 +63,9 @@ static void nvmet_execute_create_sq(struct nvmet_req *req) if (status != NVME_SC_SUCCESS) goto complete; - /* - * Note: The NVMe specification allows multiple SQs to use the same CQ. - * However, the target code does not really support that. So for now, - * prevent this and fail the command if sqid and cqid are different. - */ - if (!cqid || cqid != sqid) { - pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid); - status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR; + status = nvmet_check_io_cqid(ctrl, cqid, false); + if (status != NVME_SC_SUCCESS) { + pr_err("SQ %u: Invalid CQID %u\n", sqid, cqid); goto complete; } @@ -79,7 +74,7 @@ static void nvmet_execute_create_sq(struct nvmet_req *req) goto complete; } - status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1); + status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1); complete: nvmet_req_complete(req, status); @@ -96,14 +91,15 @@ static void nvmet_execute_delete_cq(struct nvmet_req *req) goto complete; } - if (!cqid) { - status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + status = nvmet_check_io_cqid(ctrl, cqid, false); + if (status != NVME_SC_SUCCESS) goto complete; - } - status = nvmet_check_cqid(ctrl, cqid); - if (status != NVME_SC_SUCCESS) + if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) { + /* Some SQs are still using this CQ */ + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; goto complete; + } status = ctrl->ops->delete_cq(ctrl, cqid); @@ -127,12 +123,7 @@ static void nvmet_execute_create_cq(struct nvmet_req *req) goto complete; } - if (!cqid) { - status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; - goto complete; - } - - status = nvmet_check_cqid(ctrl, cqid); + status = nvmet_check_io_cqid(ctrl, cqid, true); if (status != NVME_SC_SUCCESS) goto complete; diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 9429b8218408..b340380f3892 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -280,9 +280,12 @@ void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) bool nvmet_check_auth_status(struct nvmet_req *req) { - if (req->sq->ctrl->host_key && - !req->sq->authenticated) - return false; + if (req->sq->ctrl->host_key) { + if (req->sq->qid > 0) + return true; + if (!req->sq->authenticated) + return false; + } return true; } @@ -290,7 +293,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, unsigned int shash_len) { struct crypto_shash *shash_tfm; - struct shash_desc *shash; + SHASH_DESC_ON_STACK(shash, shash_tfm); struct nvmet_ctrl *ctrl = req->sq->ctrl; const char *hash_name; u8 *challenge = req->sq->dhchap_c1; @@ -342,19 +345,13 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c1, challenge, shash_len); if (ret) - goto out_free_challenge; + goto out; } pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, req->sq->dhchap_tid); - shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto out_free_challenge; - } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); if (ret) @@ -389,8 +386,6 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: - kfree(shash); -out_free_challenge: if (challenge != req->sq->dhchap_c1) kfree(challenge); out_free_response: diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 245475c43127..db7b17d1094e 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -813,11 +813,43 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status) } EXPORT_SYMBOL_GPL(nvmet_req_complete); +void nvmet_cq_init(struct nvmet_cq *cq) +{ + refcount_set(&cq->ref, 1); +} +EXPORT_SYMBOL_GPL(nvmet_cq_init); + +bool nvmet_cq_get(struct nvmet_cq *cq) +{ + return refcount_inc_not_zero(&cq->ref); +} +EXPORT_SYMBOL_GPL(nvmet_cq_get); + +void nvmet_cq_put(struct nvmet_cq *cq) +{ + if (refcount_dec_and_test(&cq->ref)) + nvmet_cq_destroy(cq); +} +EXPORT_SYMBOL_GPL(nvmet_cq_put); + void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size) { cq->qid = qid; cq->size = size; + + ctrl->cqs[qid] = cq; +} + +void nvmet_cq_destroy(struct nvmet_cq *cq) +{ + struct nvmet_ctrl *ctrl = cq->ctrl; + + if (ctrl) { + ctrl->cqs[cq->qid] = NULL; + nvmet_ctrl_put(cq->ctrl); + cq->ctrl = NULL; + } } void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, @@ -837,37 +869,47 @@ static void nvmet_confirm_sq(struct percpu_ref *ref) complete(&sq->confirm_done); } -u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid) +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) { - if (!ctrl->sqs) + if (!ctrl->cqs) return NVME_SC_INTERNAL | NVME_STATUS_DNR; if (cqid > ctrl->subsys->max_qid) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; - /* - * Note: For PCI controllers, the NVMe specifications allows multiple - * SQs to share a single CQ. However, we do not support this yet, so - * check that there is no SQ defined for a CQ. If one exist, then the - * CQ ID is invalid for creation as well as when the CQ is being - * deleted (as that would mean that the SQ was not deleted before the - * CQ). - */ - if (ctrl->sqs[cqid]) + if ((create && ctrl->cqs[cqid]) || (!create && !ctrl->cqs[cqid])) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; return NVME_SC_SUCCESS; } +u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) +{ + if (!cqid) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + return nvmet_check_cqid(ctrl, cqid, create); +} + +bool nvmet_cq_in_use(struct nvmet_cq *cq) +{ + return refcount_read(&cq->ref) > 1; +} +EXPORT_SYMBOL_GPL(nvmet_cq_in_use); + u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size) { u16 status; - status = nvmet_check_cqid(ctrl, qid); + status = nvmet_check_cqid(ctrl, qid, true); if (status != NVME_SC_SUCCESS) return status; + if (!kref_get_unless_zero(&ctrl->ref)) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + cq->ctrl = ctrl; + + nvmet_cq_init(cq); nvmet_cq_setup(ctrl, cq, qid, size); return NVME_SC_SUCCESS; @@ -891,7 +933,7 @@ u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, } u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, - u16 sqid, u16 size) + struct nvmet_cq *cq, u16 sqid, u16 size) { u16 status; int ret; @@ -903,7 +945,7 @@ u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, if (status != NVME_SC_SUCCESS) return status; - ret = nvmet_sq_init(sq); + ret = nvmet_sq_init(sq, cq); if (ret) { status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto ctrl_put; @@ -935,6 +977,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); nvmet_auth_sq_free(sq); + nvmet_cq_put(sq->cq); /* * we must reference the ctrl again after waiting for inflight IO @@ -967,18 +1010,23 @@ static void nvmet_sq_free(struct percpu_ref *ref) complete(&sq->free_done); } -int nvmet_sq_init(struct nvmet_sq *sq) +int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq) { int ret; + if (!nvmet_cq_get(cq)) + return -EINVAL; + ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); if (ret) { pr_err("percpu_ref init failed!\n"); + nvmet_cq_put(cq); return ret; } init_completion(&sq->free_done); init_completion(&sq->confirm_done); nvmet_auth_sq_init(sq); + sq->cq = cq; return 0; } @@ -1108,13 +1156,13 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return ret; } -bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) +bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq, + const struct nvmet_fabrics_ops *ops) { u8 flags = req->cmd->common.flags; u16 status; - req->cq = cq; + req->cq = sq->cq; req->sq = sq; req->ops = ops; req->sg = NULL; @@ -1612,12 +1660,17 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args) if (!ctrl->sqs) goto out_free_changed_ns_list; + ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *), + GFP_KERNEL); + if (!ctrl->cqs) + goto out_free_sqs; + ret = ida_alloc_range(&cntlid_ida, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; - goto out_free_sqs; + goto out_free_cqs; } ctrl->cntlid = ret; @@ -1676,6 +1729,8 @@ init_pr_fail: mutex_unlock(&subsys->lock); nvmet_stop_keep_alive_timer(ctrl); ida_free(&cntlid_ida, ctrl->cntlid); +out_free_cqs: + kfree(ctrl->cqs); out_free_sqs: kfree(ctrl->sqs); out_free_changed_ns_list: @@ -1712,6 +1767,7 @@ static void nvmet_ctrl_free(struct kref *ref) nvmet_async_events_free(ctrl); kfree(ctrl->sqs); + kfree(ctrl->cqs); kfree(ctrl->changed_ns_list); kfree(ctrl); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index df7207640506..c06f3e04296c 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -119,7 +119,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); - strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); + strscpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); } /* diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index f012bdf89850..7b8d8b397802 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -208,6 +208,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; } + kref_get(&ctrl->ref); + old = cmpxchg(&req->cq->ctrl, NULL, ctrl); + if (old) { + pr_warn("queue already connected!\n"); + req->error_loc = offsetof(struct nvmf_connect_command, opcode); + return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; + } + /* note: convert queue size from 0's-based value to 1's-based value */ nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); @@ -239,8 +247,8 @@ static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq) bool needs_auth = nvmet_has_auth(ctrl, sq); key_serial_t keyid = nvmet_queue_tls_keyid(sq); - /* Do not authenticate I/O queues for secure concatenation */ - if (ctrl->concat && sq->qid) + /* Do not authenticate I/O queues */ + if (sq->qid) needs_auth = false; if (keyid) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 7b50130f10f6..254537b93e63 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -816,7 +816,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) goto out_fail_iodlist; @@ -826,6 +827,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, return queue; out_fail_iodlist: + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); destroy_workqueue(queue->work_q); out_free_queue: @@ -934,6 +936,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) flush_workqueue(queue->work_q); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_tgt_q_put(queue); } @@ -1254,6 +1257,7 @@ nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, { lockdep_assert_held(&nvmet_fc_tgtlock); + nvmet_fc_tgtport_get(tgtport); pe->tgtport = tgtport; tgtport->pe = pe; @@ -1273,8 +1277,10 @@ nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); - if (pe->tgtport) + if (pe->tgtport) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport->pe = NULL; + } list_del(&pe->pe_list); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -1292,8 +1298,10 @@ nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) spin_lock_irqsave(&nvmet_fc_tgtlock, flags); pe = tgtport->pe; - if (pe) + if (pe) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport = NULL; + } tgtport->pe = NULL; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -1316,6 +1324,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { if (tgtport->fc_target_port.node_name == pe->node_name && tgtport->fc_target_port.port_name == pe->port_name) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + WARN_ON(pe->tgtport); tgtport->pe = pe; pe->tgtport = tgtport; @@ -1580,6 +1591,39 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } +static void +nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct nvmet_fc_ls_iod *iod; + int i; + + iod = tgtport->iod; + for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) + cancel_work(&iod->work); + + /* + * After this point the connection is lost and thus any pending + * request can't be processed by the normal completion path. This + * is likely a request from nvmet_fc_send_ls_req_async. + */ + while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, + struct nvmet_fc_ls_req_op, lsreq_list))) { + list_del(&lsop->lsreq_list); + + if (!lsop->req_queued) + continue; + + lsreq = &lsop->ls_req; + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + nvmet_fc_tgtport_put(tgtport); + kfree(lsop); + } +} + /** * nvmet_fc_unregister_targetport - transport entry point called by an * LLDD to deregister/remove a previously @@ -1608,13 +1652,7 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) flush_workqueue(nvmet_wq); - /* - * should terminate LS's as well. However, LS's will be generated - * at the tail end of association termination, so they likely don't - * exist yet. And even if they did, it's worthwhile to just let - * them finish and targetport ref counting will clean things up. - */ - + nvmet_fc_free_pending_reqs(tgtport); nvmet_fc_tgtport_put(tgtport); return 0; @@ -2531,10 +2569,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, fod->data_sg = NULL; fod->data_sg_cnt = 0; - ret = nvmet_req_init(&fod->req, - &fod->queue->nvme_cq, - &fod->queue->nvme_sq, - &nvmet_fc_tgt_fcp_ops); + ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq, + &nvmet_fc_tgt_fcp_ops); if (!ret) { /* bad SQE content or invalid ctrl state */ /* nvmet layer has already called op done to send rsp. */ @@ -2860,12 +2896,17 @@ nvmet_fc_add_port(struct nvmet_port *port) list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { if ((tgtport->fc_target_port.node_name == traddr.nn) && (tgtport->fc_target_port.port_name == traddr.pn)) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + /* a FC port can only be 1 nvmet port id */ if (!tgtport->pe) { nvmet_fc_portentry_bind(tgtport, pe, port); ret = 0; } else ret = -EALREADY; + + nvmet_fc_tgtport_put(tgtport); break; } } @@ -2881,11 +2922,21 @@ static void nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_portentry_unbind(pe); - /* terminate any outstanding associations */ - __nvmet_fc_free_assocs(pe->tgtport); + if (tgtport) { + /* terminate any outstanding associations */ + __nvmet_fc_free_assocs(tgtport); + nvmet_fc_tgtport_put(tgtport); + } kfree(pe); } @@ -2894,10 +2945,21 @@ static void nvmet_fc_discovery_chg(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; - struct nvmet_fc_tgtport *tgtport = pe->tgtport; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (!tgtport) + return; if (tgtport && tgtport->ops->discovery_event) tgtport->ops->discovery_event(&tgtport->fc_target_port); + + nvmet_fc_tgtport_put(tgtport); } static ssize_t diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 641201e62c1b..257b497d515a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -207,7 +207,6 @@ static LIST_HEAD(fcloop_nports); struct fcloop_lport { struct nvme_fc_local_port *localport; struct list_head lport_list; - struct completion unreg_done; refcount_t ref; }; @@ -215,6 +214,9 @@ struct fcloop_lport_priv { struct fcloop_lport *lport; }; +/* The port is already being removed, avoid double free */ +#define PORT_DELETED 0 + struct fcloop_rport { struct nvme_fc_remote_port *remoteport; struct nvmet_fc_target_port *targetport; @@ -223,6 +225,7 @@ struct fcloop_rport { spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; + unsigned long flags; }; struct fcloop_tport { @@ -233,6 +236,7 @@ struct fcloop_tport { spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; + unsigned long flags; }; struct fcloop_nport { @@ -288,6 +292,9 @@ struct fcloop_ini_fcpreq { spinlock_t inilock; }; +/* SLAB cache for fcloop_lsreq structures */ +static struct kmem_cache *lsreq_cache; + static inline struct fcloop_lsreq * ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp) { @@ -338,6 +345,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work) * callee may free memory containing tls_req. * do not reference lsreq after this. */ + kmem_cache_free(lsreq_cache, tls_req); spin_lock(&rport->lock); } @@ -349,10 +357,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, struct nvmefc_ls_req *lsreq) { - struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_rport *rport = remoteport->private; + struct fcloop_lsreq *tls_req; int ret = 0; + tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); + if (!tls_req) + return -ENOMEM; tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); @@ -389,14 +400,17 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, lsrsp->done(lsrsp); - if (remoteport) { - rport = remoteport->private; - spin_lock(&rport->lock); - list_add_tail(&tls_req->ls_list, &rport->ls_list); - spin_unlock(&rport->lock); - queue_work(nvmet_wq, &rport->ls_work); + if (!remoteport) { + kmem_cache_free(lsreq_cache, tls_req); + return 0; } + rport = remoteport->private; + spin_lock(&rport->lock); + list_add_tail(&tls_req->ls_list, &rport->ls_list); + spin_unlock(&rport->lock); + queue_work(nvmet_wq, &rport->ls_work); + return 0; } @@ -422,6 +436,7 @@ fcloop_tport_lsrqst_work(struct work_struct *work) * callee may free memory containing tls_req. * do not reference lsreq after this. */ + kmem_cache_free(lsreq_cache, tls_req); spin_lock(&tport->lock); } @@ -432,8 +447,8 @@ static int fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *lsreq) { - struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_tport *tport = targetport->private; + struct fcloop_lsreq *tls_req; int ret = 0; /* @@ -441,6 +456,10 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, * hosthandle ignored as fcloop currently is * 1:1 tgtport vs remoteport */ + + tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); + if (!tls_req) + return -ENOMEM; tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); @@ -457,6 +476,9 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp, lsreq->rqstaddr, lsreq->rqstlen); + if (ret) + kmem_cache_free(lsreq_cache, tls_req); + return ret; } @@ -471,18 +493,30 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, struct nvmet_fc_target_port *targetport = rport->targetport; struct fcloop_tport *tport; + if (!targetport) { + /* + * The target port is gone. The target doesn't expect any + * response anymore and the ->done call is not valid + * because the resources have been freed by + * nvmet_fc_free_pending_reqs. + * + * We end up here from delete association exchange: + * nvmet_fc_xmt_disconnect_assoc sends an async request. + */ + kmem_cache_free(lsreq_cache, tls_req); + return 0; + } + memcpy(lsreq->rspaddr, lsrsp->rspbuf, ((lsreq->rsplen < lsrsp->rsplen) ? lsreq->rsplen : lsrsp->rsplen)); lsrsp->done(lsrsp); - if (targetport) { - tport = targetport->private; - spin_lock(&tport->lock); - list_add_tail(&tls_req->ls_list, &tport->ls_list); - spin_unlock(&tport->lock); - queue_work(nvmet_wq, &tport->ls_work); - } + tport = targetport->private; + spin_lock(&tport->lock); + list_add_tail(&tls_req->ls_list, &tport->ls_list); + spin_unlock(&tport->lock); + queue_work(nvmet_wq, &tport->ls_work); return 0; } @@ -566,7 +600,8 @@ fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, } /* release original io reference on tgt struct */ - fcloop_tfcp_req_put(tfcp_req); + if (tfcp_req) + fcloop_tfcp_req_put(tfcp_req); } static bool drop_fabric_opcode; @@ -618,12 +653,13 @@ fcloop_fcp_recv_work(struct work_struct *work) { struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, fcp_rcv_work); - struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; + struct nvmefc_fcp_req *fcpreq; unsigned long flags; int ret = 0; bool aborted = false; spin_lock_irqsave(&tfcp_req->reqlock, flags); + fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_START: tfcp_req->inistate = INI_IO_ACTIVE; @@ -638,16 +674,19 @@ fcloop_fcp_recv_work(struct work_struct *work) } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - if (unlikely(aborted)) - ret = -ECANCELED; - else { - if (likely(!check_for_drop(tfcp_req))) - ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, - &tfcp_req->tgt_fcp_req, - fcpreq->cmdaddr, fcpreq->cmdlen); - else - pr_info("%s: dropped command ********\n", __func__); + if (unlikely(aborted)) { + /* the abort handler will call fcloop_call_host_done */ + return; + } + + if (unlikely(check_for_drop(tfcp_req))) { + pr_info("%s: dropped command ********\n", __func__); + return; } + + ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, + &tfcp_req->tgt_fcp_req, + fcpreq->cmdaddr, fcpreq->cmdlen); if (ret) fcloop_call_host_done(fcpreq, tfcp_req, ret); } @@ -662,15 +701,17 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) unsigned long flags; spin_lock_irqsave(&tfcp_req->reqlock, flags); - fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_ABORTED: + fcpreq = tfcp_req->fcpreq; + tfcp_req->fcpreq = NULL; break; case INI_IO_COMPLETED: completed = true; break; default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); + fcloop_tfcp_req_put(tfcp_req); WARN_ON(1); return; } @@ -686,10 +727,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, &tfcp_req->tgt_fcp_req); - spin_lock_irqsave(&tfcp_req->reqlock, flags); - tfcp_req->fcpreq = NULL; - spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); /* call_host_done releases reference for abort downcall */ } @@ -958,13 +995,16 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, spin_lock(&inireq->inilock); tfcp_req = inireq->tfcp_req; - if (tfcp_req) - fcloop_tfcp_req_get(tfcp_req); + if (tfcp_req) { + if (!fcloop_tfcp_req_get(tfcp_req)) + tfcp_req = NULL; + } spin_unlock(&inireq->inilock); - if (!tfcp_req) + if (!tfcp_req) { /* abort has already been called */ - return; + goto out_host_done; + } /* break initiator/target relationship for io */ spin_lock_irqsave(&tfcp_req->reqlock, flags); @@ -979,7 +1019,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); - return; + goto out_host_done; } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); @@ -993,6 +1033,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, */ fcloop_tfcp_req_put(tfcp_req); } + + return; + +out_host_done: + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); } static void @@ -1019,9 +1064,18 @@ fcloop_lport_get(struct fcloop_lport *lport) static void fcloop_nport_put(struct fcloop_nport *nport) { + unsigned long flags; + if (!refcount_dec_and_test(&nport->ref)) return; + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&nport->nport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (nport->lport) + fcloop_lport_put(nport->lport); + kfree(nport); } @@ -1037,9 +1091,6 @@ fcloop_localport_delete(struct nvme_fc_local_port *localport) struct fcloop_lport_priv *lport_priv = localport->private; struct fcloop_lport *lport = lport_priv->lport; - /* release any threads waiting for the unreg to complete */ - complete(&lport->unreg_done); - fcloop_lport_put(lport); } @@ -1047,18 +1098,38 @@ static void fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct fcloop_rport *rport = remoteport->private; + bool put_port = false; + unsigned long flags; flush_work(&rport->ls_work); - fcloop_nport_put(rport->nport); + + spin_lock_irqsave(&fcloop_lock, flags); + if (!test_and_set_bit(PORT_DELETED, &rport->flags)) + put_port = true; + rport->nport->rport = NULL; + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (put_port) + fcloop_nport_put(rport->nport); } static void fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) { struct fcloop_tport *tport = targetport->private; + bool put_port = false; + unsigned long flags; flush_work(&tport->ls_work); - fcloop_nport_put(tport->nport); + + spin_lock_irqsave(&fcloop_lock, flags); + if (!test_and_set_bit(PORT_DELETED, &tport->flags)) + put_port = true; + tport->nport->tport = NULL; + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (put_port) + fcloop_nport_put(tport->nport); } #define FCLOOP_HW_QUEUES 4 @@ -1082,7 +1153,6 @@ static struct nvme_fc_port_template fctemplate = { /* sizes of additional private data for data structures */ .local_priv_sz = sizeof(struct fcloop_lport_priv), .remote_priv_sz = sizeof(struct fcloop_rport), - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), }; @@ -1105,7 +1175,6 @@ static struct nvmet_fc_target_template tgttemplate = { .target_features = 0, /* sizes of additional private data for data structures */ .target_priv_sz = sizeof(struct fcloop_tport), - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), }; static ssize_t @@ -1170,51 +1239,92 @@ out_free_lport: } static int -__wait_localport_unreg(struct fcloop_lport *lport) +__localport_unreg(struct fcloop_lport *lport) { - int ret; + return nvme_fc_unregister_localport(lport->localport); +} - init_completion(&lport->unreg_done); +static struct fcloop_nport * +__fcloop_nport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_nport *nport; - ret = nvme_fc_unregister_localport(lport->localport); + list_for_each_entry(nport, &fcloop_nports, nport_list) { + if (nport->node_name != node_name || + nport->port_name != port_name) + continue; - if (!ret) - wait_for_completion(&lport->unreg_done); + if (fcloop_nport_get(nport)) + return nport; - return ret; + break; + } + + return NULL; } +static struct fcloop_nport * +fcloop_nport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_nport *nport; + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + nport = __fcloop_nport_lookup(node_name, port_name); + spin_unlock_irqrestore(&fcloop_lock, flags); + + return nport; +} + +static struct fcloop_lport * +__fcloop_lport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_lport *lport; + + list_for_each_entry(lport, &fcloop_lports, lport_list) { + if (lport->localport->node_name != node_name || + lport->localport->port_name != port_name) + continue; + + if (fcloop_lport_get(lport)) + return lport; + + break; + } + + return NULL; +} + +static struct fcloop_lport * +fcloop_lport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_lport *lport; + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + lport = __fcloop_lport_lookup(node_name, port_name); + spin_unlock_irqrestore(&fcloop_lock, flags); + + return lport; +} static ssize_t fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_lport *tlport, *lport = NULL; + struct fcloop_lport *lport; u64 nodename, portname; - unsigned long flags; int ret; ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tlport, &fcloop_lports, lport_list) { - if (tlport->localport->node_name == nodename && - tlport->localport->port_name == portname) { - if (!fcloop_lport_get(tlport)) - break; - lport = tlport; - break; - } - } - spin_unlock_irqrestore(&fcloop_lock, flags); - + lport = fcloop_lport_lookup(nodename, portname); if (!lport) return -ENOENT; - ret = __wait_localport_unreg(lport); + ret = __localport_unreg(lport); fcloop_lport_put(lport); return ret ? ret : count; @@ -1223,8 +1333,8 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, static struct fcloop_nport * fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) { - struct fcloop_nport *newnport, *nport = NULL; - struct fcloop_lport *tmplport, *lport = NULL; + struct fcloop_nport *newnport, *nport; + struct fcloop_lport *lport; struct fcloop_ctrl_options *opts; unsigned long flags; u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; @@ -1239,10 +1349,8 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) goto out_free_opts; /* everything there ? */ - if ((opts->mask & opts_mask) != opts_mask) { - ret = -EINVAL; + if ((opts->mask & opts_mask) != opts_mask) goto out_free_opts; - } newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); if (!newnport) @@ -1258,60 +1366,61 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) refcount_set(&newnport->ref, 1); spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmplport, &fcloop_lports, lport_list) { - if (tmplport->localport->node_name == opts->wwnn && - tmplport->localport->port_name == opts->wwpn) - goto out_invalid_opts; - - if (tmplport->localport->node_name == opts->lpwwnn && - tmplport->localport->port_name == opts->lpwwpn) - lport = tmplport; + lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn); + if (lport) { + /* invalid configuration */ + fcloop_lport_put(lport); + goto out_free_newnport; } if (remoteport) { - if (!lport) - goto out_invalid_opts; - newnport->lport = lport; - } - - list_for_each_entry(nport, &fcloop_nports, nport_list) { - if (nport->node_name == opts->wwnn && - nport->port_name == opts->wwpn) { - if ((remoteport && nport->rport) || - (!remoteport && nport->tport)) { - nport = NULL; - goto out_invalid_opts; - } - - fcloop_nport_get(nport); - - spin_unlock_irqrestore(&fcloop_lock, flags); - - if (remoteport) - nport->lport = lport; - if (opts->mask & NVMF_OPT_ROLES) - nport->port_role = opts->roles; - if (opts->mask & NVMF_OPT_FCADDR) - nport->port_id = opts->fcaddr; + lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn); + if (!lport) { + /* invalid configuration */ goto out_free_newnport; } } - list_add_tail(&newnport->nport_list, &fcloop_nports); + nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn); + if (nport) { + if ((remoteport && nport->rport) || + (!remoteport && nport->tport)) { + /* invalid configuration */ + goto out_put_nport; + } + + /* found existing nport, discard the new nport */ + kfree(newnport); + } else { + list_add_tail(&newnport->nport_list, &fcloop_nports); + nport = newnport; + } + if (opts->mask & NVMF_OPT_ROLES) + nport->port_role = opts->roles; + if (opts->mask & NVMF_OPT_FCADDR) + nport->port_id = opts->fcaddr; + if (lport) { + if (!nport->lport) + nport->lport = lport; + else + fcloop_lport_put(lport); + } spin_unlock_irqrestore(&fcloop_lock, flags); kfree(opts); - return newnport; + return nport; -out_invalid_opts: - spin_unlock_irqrestore(&fcloop_lock, flags); +out_put_nport: + if (lport) + fcloop_lport_put(lport); + fcloop_nport_put(nport); out_free_newnport: + spin_unlock_irqrestore(&fcloop_lock, flags); kfree(newnport); out_free_opts: kfree(opts); - return nport; + return NULL; } static ssize_t @@ -1352,6 +1461,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, rport->nport = nport; rport->lport = nport->lport; nport->rport = rport; + rport->flags = 0; spin_lock_init(&rport->lock); INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work); INIT_LIST_HEAD(&rport->ls_list); @@ -1365,21 +1475,18 @@ __unlink_remote_port(struct fcloop_nport *nport) { struct fcloop_rport *rport = nport->rport; + lockdep_assert_held(&fcloop_lock); + if (rport && nport->tport) nport->tport->remoteport = NULL; nport->rport = NULL; - list_del(&nport->nport_list); - return rport; } static int __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) { - if (!rport) - return -EALREADY; - return nvme_fc_unregister_remoteport(rport->remoteport); } @@ -1387,8 +1494,8 @@ static ssize_t fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_nport *nport = NULL, *tmpport; - static struct fcloop_rport *rport; + struct fcloop_nport *nport; + struct fcloop_rport *rport; u64 nodename, portname; unsigned long flags; int ret; @@ -1397,24 +1504,24 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmpport, &fcloop_nports, nport_list) { - if (tmpport->node_name == nodename && - tmpport->port_name == portname && tmpport->rport) { - nport = tmpport; - rport = __unlink_remote_port(nport); - break; - } - } + nport = fcloop_nport_lookup(nodename, portname); + if (!nport) + return -ENOENT; + spin_lock_irqsave(&fcloop_lock, flags); + rport = __unlink_remote_port(nport); spin_unlock_irqrestore(&fcloop_lock, flags); - if (!nport) - return -ENOENT; + if (!rport) { + ret = -ENOENT; + goto out_nport_put; + } ret = __remoteport_unreg(nport, rport); +out_nport_put: + fcloop_nport_put(nport); + return ret ? ret : count; } @@ -1452,6 +1559,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr, tport->nport = nport; tport->lport = nport->lport; nport->tport = tport; + tport->flags = 0; spin_lock_init(&tport->lock); INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work); INIT_LIST_HEAD(&tport->ls_list); @@ -1465,6 +1573,8 @@ __unlink_target_port(struct fcloop_nport *nport) { struct fcloop_tport *tport = nport->tport; + lockdep_assert_held(&fcloop_lock); + if (tport && nport->rport) nport->rport->targetport = NULL; nport->tport = NULL; @@ -1475,9 +1585,6 @@ __unlink_target_port(struct fcloop_nport *nport) static int __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) { - if (!tport) - return -EALREADY; - return nvmet_fc_unregister_targetport(tport->targetport); } @@ -1485,8 +1592,8 @@ static ssize_t fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_nport *nport = NULL, *tmpport; - struct fcloop_tport *tport = NULL; + struct fcloop_nport *nport; + struct fcloop_tport *tport; u64 nodename, portname; unsigned long flags; int ret; @@ -1495,24 +1602,24 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmpport, &fcloop_nports, nport_list) { - if (tmpport->node_name == nodename && - tmpport->port_name == portname && tmpport->tport) { - nport = tmpport; - tport = __unlink_target_port(nport); - break; - } - } + nport = fcloop_nport_lookup(nodename, portname); + if (!nport) + return -ENOENT; + spin_lock_irqsave(&fcloop_lock, flags); + tport = __unlink_target_port(nport); spin_unlock_irqrestore(&fcloop_lock, flags); - if (!nport) - return -ENOENT; + if (!tport) { + ret = -ENOENT; + goto out_nport_put; + } ret = __targetport_unreg(nport, tport); +out_nport_put: + fcloop_nport_put(nport); + return ret ? ret : count; } @@ -1578,15 +1685,20 @@ static const struct class fcloop_class = { }; static struct device *fcloop_device; - static int __init fcloop_init(void) { int ret; + lsreq_cache = kmem_cache_create("lsreq_cache", + sizeof(struct fcloop_lsreq), 0, + 0, NULL); + if (!lsreq_cache) + return -ENOMEM; + ret = class_register(&fcloop_class); if (ret) { pr_err("couldn't register class fcloop\n"); - return ret; + goto out_destroy_cache; } fcloop_device = device_create_with_groups( @@ -1604,13 +1716,15 @@ static int __init fcloop_init(void) out_destroy_class: class_unregister(&fcloop_class); +out_destroy_cache: + kmem_cache_destroy(lsreq_cache); return ret; } static void __exit fcloop_exit(void) { - struct fcloop_lport *lport = NULL; - struct fcloop_nport *nport = NULL; + struct fcloop_lport *lport; + struct fcloop_nport *nport; struct fcloop_tport *tport; struct fcloop_rport *rport; unsigned long flags; @@ -1621,7 +1735,7 @@ static void __exit fcloop_exit(void) for (;;) { nport = list_first_entry_or_null(&fcloop_nports, typeof(*nport), nport_list); - if (!nport) + if (!nport || !fcloop_nport_get(nport)) break; tport = __unlink_target_port(nport); @@ -1629,13 +1743,21 @@ static void __exit fcloop_exit(void) spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __targetport_unreg(nport, tport); - if (ret) - pr_warn("%s: Failed deleting target port\n", __func__); + if (tport) { + ret = __targetport_unreg(nport, tport); + if (ret) + pr_warn("%s: Failed deleting target port\n", + __func__); + } - ret = __remoteport_unreg(nport, rport); - if (ret) - pr_warn("%s: Failed deleting remote port\n", __func__); + if (rport) { + ret = __remoteport_unreg(nport, rport); + if (ret) + pr_warn("%s: Failed deleting remote port\n", + __func__); + } + + fcloop_nport_put(nport); spin_lock_irqsave(&fcloop_lock, flags); } @@ -1648,7 +1770,7 @@ static void __exit fcloop_exit(void) spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __wait_localport_unreg(lport); + ret = __localport_unreg(lport); if (ret) pr_warn("%s: Failed deleting local port\n", __func__); @@ -1663,6 +1785,7 @@ static void __exit fcloop_exit(void) device_destroy(&fcloop_class, MKDEV(0, 0)); class_unregister(&fcloop_class); + kmem_cache_destroy(lsreq_cache); } module_init(fcloop_init); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index a5c41144667c..f85a8441bcc6 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -33,10 +33,12 @@ struct nvme_loop_ctrl { struct list_head list; struct blk_mq_tag_set tag_set; - struct nvme_loop_iod async_event_iod; struct nvme_ctrl ctrl; struct nvmet_port *port; + + /* Must be last --ends in a flexible-array member. */ + struct nvme_loop_iod async_event_iod; }; static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) @@ -148,8 +150,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_start_request(req); iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; iod->req.port = queue->ctrl->port; - if (!nvmet_req_init(&iod->req, &queue->nvme_cq, - &queue->nvme_sq, &nvme_loop_ops)) + if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) return BLK_STS_OK; if (blk_rq_nr_phys_segments(req)) { @@ -181,8 +182,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; - if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, - &nvme_loop_ops)) { + if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) { dev_err(ctrl->ctrl.device, "failed async event work\n"); return; } @@ -273,6 +273,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) nvme_unquiesce_admin_queue(&ctrl->ctrl); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + nvmet_cq_put(&ctrl->queues[0].nvme_cq); nvme_remove_admin_tag_set(&ctrl->ctrl); } @@ -302,6 +303,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) for (i = 1; i < ctrl->ctrl.queue_count; i++) { clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvmet_cq_put(&ctrl->queues[i].nvme_cq); } ctrl->ctrl.queue_count = 1; /* @@ -327,9 +329,13 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) for (i = 1; i <= nr_io_queues; i++) { ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) + nvmet_cq_init(&ctrl->queues[i].nvme_cq); + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq, + &ctrl->queues[i].nvme_cq); + if (ret) { + nvmet_cq_put(&ctrl->queues[i].nvme_cq); goto out_destroy_queues; + } ctrl->ctrl.queue_count++; } @@ -360,9 +366,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) int error; ctrl->queues[0].ctrl = ctrl; - error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); - if (error) + nvmet_cq_init(&ctrl->queues[0].nvme_cq); + error = nvmet_sq_init(&ctrl->queues[0].nvme_sq, + &ctrl->queues[0].nvme_cq); + if (error) { + nvmet_cq_put(&ctrl->queues[0].nvme_cq); return error; + } ctrl->ctrl.queue_count = 1; error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, @@ -401,6 +411,7 @@ out_cleanup_tagset: nvme_remove_admin_tag_set(&ctrl->ctrl); out_free_sq: nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + nvmet_cq_put(&ctrl->queues[0].nvme_cq); return error; } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index b6db8b74dc4a..df69a9dee71c 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -141,13 +141,16 @@ static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) } struct nvmet_cq { + struct nvmet_ctrl *ctrl; u16 qid; u16 size; + refcount_t ref; }; struct nvmet_sq { struct nvmet_ctrl *ctrl; struct percpu_ref ref; + struct nvmet_cq *cq; u16 qid; u16 size; u32 sqhd; @@ -247,6 +250,7 @@ struct nvmet_pr_log_mgr { struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_sq **sqs; + struct nvmet_cq **cqs; void *drvdata; @@ -424,7 +428,7 @@ struct nvmet_fabrics_ops { u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl); /* Operations mandatory for PCI target controllers */ - u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags, + u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 prp1); u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid); u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags, @@ -557,8 +561,8 @@ u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req); u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req); -bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); +bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq, + const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); size_t nvmet_req_transfer_len(struct nvmet_req *req); bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); @@ -571,18 +575,24 @@ void nvmet_execute_set_features(struct nvmet_req *req); void nvmet_execute_get_features(struct nvmet_req *req); void nvmet_execute_keep_alive(struct nvmet_req *req); -u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid); +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create); +u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create); +void nvmet_cq_init(struct nvmet_cq *cq); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size); u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size); +void nvmet_cq_destroy(struct nvmet_cq *cq); +bool nvmet_cq_get(struct nvmet_cq *cq); +void nvmet_cq_put(struct nvmet_cq *cq); +bool nvmet_cq_in_use(struct nvmet_cq *cq); u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create); void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size); -u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, - u16 size); +u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, + struct nvmet_cq *cq, u16 qid, u16 size); void nvmet_sq_destroy(struct nvmet_sq *sq); -int nvmet_sq_init(struct nvmet_sq *sq); +int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq); void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index 7123c855b5a6..a4295a5b8d28 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -1354,15 +1354,17 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid) if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); + nvmet_cq_put(&cq->nvme_cq); return NVME_SC_SUCCESS; } static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, - u16 sqid, u16 flags, u16 qsize, u64 pci_addr) + u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr) { struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; + struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; u16 status; if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) @@ -1385,7 +1387,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, sq->qes = ctrl->io_sqes; sq->pci_size = sq->qes * sq->depth; - status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth); + status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid, + sq->depth); if (status != NVME_SC_SUCCESS) return status; @@ -1601,8 +1604,7 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) goto complete; } - if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq, - &nvmet_pci_epf_fabrics_ops)) + if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops)) goto complete; iod->data_len = nvmet_req_transfer_len(req); @@ -1879,8 +1881,8 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) qsize = aqa & 0x00000fff; pci_addr = asq & GENMASK_ULL(63, 12); - status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG, - qsize, pci_addr); + status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0, + NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr); if (status != NVME_SC_SUCCESS) { dev_err(ctrl->dev, "Failed to create admin submission queue\n"); nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 2a4536ef6184..432bdf7cd49e 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -976,8 +976,7 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); - if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, - &queue->nvme_sq, &nvmet_rdma_ops)) + if (!nvmet_req_init(&cmd->req, &queue->nvme_sq, &nvmet_rdma_ops)) return; status = nvmet_rdma_map_sgl(cmd); @@ -1353,6 +1352,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) pr_debug("freeing queue %d\n", queue->idx); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); nvmet_rdma_destroy_queue_ib(queue); if (!queue->nsrq) { @@ -1436,7 +1436,8 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, goto out_reject; } - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) { ret = NVME_RDMA_CM_NO_RSC; goto out_free_queue; @@ -1517,6 +1518,7 @@ out_ida_remove: out_destroy_sq: nvmet_sq_destroy(&queue->nvme_sq); out_free_queue: + nvmet_cq_put(&queue->nvme_cq); kfree(queue); out_reject: nvmet_rdma_cm_reject(cm_id, ret); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 12a5cb8641ca..c6603bd9c95e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -7,6 +7,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/crc32c.h> #include <linux/err.h> #include <linux/nvme-tcp.h> #include <linux/nvme-keyring.h> @@ -17,7 +18,6 @@ #include <net/handshake.h> #include <linux/inet.h> #include <linux/llist.h> -#include <crypto/hash.h> #include <trace/events/sock.h> #include "nvmet.h" @@ -172,8 +172,6 @@ struct nvmet_tcp_queue { /* digest state */ bool hdr_digest; bool data_digest; - struct ahash_request *snd_hash; - struct ahash_request *rcv_hash; /* TLS state */ key_serial_t tls_pskid; @@ -294,14 +292,9 @@ static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; } -static inline void nvmet_tcp_hdgst(struct ahash_request *hash, - void *pdu, size_t len) +static inline void nvmet_tcp_hdgst(void *pdu, size_t len) { - struct scatterlist sg; - - sg_init_one(&sg, pdu, len); - ahash_request_set_crypt(hash, &sg, pdu + len, len); - crypto_ahash_digest(hash); + put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len); } static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, @@ -318,7 +311,7 @@ static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, } recv_digest = *(__le32 *)(pdu + hdr->hlen); - nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); + nvmet_tcp_hdgst(pdu, len); exp_digest = *(__le32 *)(pdu + hdr->hlen); if (recv_digest != exp_digest) { pr_err("queue %d: header digest error: recv %#x expected %#x\n", @@ -441,12 +434,24 @@ err: return NVME_SC_INTERNAL; } -static void nvmet_tcp_calc_ddgst(struct ahash_request *hash, - struct nvmet_tcp_cmd *cmd) +static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd) { - ahash_request_set_crypt(hash, cmd->req.sg, - (void *)&cmd->exp_ddgst, cmd->req.transfer_len); - crypto_ahash_digest(hash); + size_t total_len = cmd->req.transfer_len; + struct scatterlist *sg = cmd->req.sg; + u32 crc = ~0; + + while (total_len) { + size_t len = min_t(size_t, total_len, sg->length); + + /* + * Note that the scatterlist does not contain any highmem pages, + * as it was allocated by sgl_alloc() with GFP_KERNEL. + */ + crc = crc32c(crc, sg_virt(sg), len); + total_len -= len; + sg = sg_next(sg); + } + cmd->exp_ddgst = cpu_to_le32(~crc); } static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) @@ -473,19 +478,18 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) if (queue->data_digest) { pdu->hdr.flags |= NVME_TCP_F_DDGST; - nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); + nvmet_tcp_calc_ddgst(cmd); } if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; - struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; @@ -503,14 +507,13 @@ static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; - struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; @@ -523,7 +526,7 @@ static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } @@ -857,42 +860,6 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); } -static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); - - ahash_request_free(queue->rcv_hash); - ahash_request_free(queue->snd_hash); - crypto_free_ahash(tfm); -} - -static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) -{ - struct crypto_ahash *tfm; - - tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->snd_hash) - goto free_tfm; - ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); - - queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->rcv_hash) - goto free_snd_hash; - ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); - - return 0; -free_snd_hash: - ahash_request_free(queue->snd_hash); -free_tfm: - crypto_free_ahash(tfm); - return -ENOMEM; -} - - static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) { struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; @@ -921,11 +888,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); - if (queue->hdr_digest || queue->data_digest) { - ret = nvmet_tcp_alloc_crypto(queue); - if (ret) - return ret; - } memset(icresp, 0, sizeof(*icresp)); icresp->hdr.type = nvme_tcp_icresp; @@ -1077,8 +1039,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) req = &queue->cmd->req; memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); - if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, - &queue->nvme_sq, &nvmet_tcp_ops))) { + if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) { pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n", req->cmd, req->cmd->common.command_id, req->cmd->common.opcode, @@ -1247,7 +1208,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; - nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); + nvmet_tcp_calc_ddgst(cmd); queue->offset = 0; queue->left = NVME_TCP_DIGEST_LENGTH; queue->rcv_state = NVMET_TCP_RECV_DDGST; @@ -1615,13 +1576,12 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) nvmet_sq_put_tls_key(&queue->nvme_sq); nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); cancel_work_sync(&queue->io_work); nvmet_tcp_free_cmd_data_in_buffers(queue); /* ->sock will be released by fput() */ fput(queue->sock->file); nvmet_tcp_free_cmds(queue); - if (queue->hdr_digest || queue->data_digest) - nvmet_tcp_free_crypto(queue); ida_free(&nvmet_tcp_queue_ida, queue->idx); page_frag_cache_drain(&queue->pf_cache); kfree(queue); @@ -1950,7 +1910,8 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_ida_remove; - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) goto out_free_connect; @@ -1993,6 +1954,7 @@ out_destroy_sq: mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_sq_destroy(&queue->nvme_sq); out_free_connect: + nvmet_cq_put(&queue->nvme_cq); nvmet_tcp_free_cmd(&queue->connect); out_ida_remove: ida_free(&nvmet_tcp_queue_ida, queue->idx); diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 9800b7681054..eb3cc28d43f8 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -40,6 +40,7 @@ config PCIE_APPLE depends on OF depends on PCI_MSI select PCI_HOST_COMMON + select IRQ_MSI_LIB help Say Y here if you want to enable PCIe controller support on Apple system-on-chips, like the Apple M1. This is required for the USB @@ -227,6 +228,7 @@ config PCI_TEGRA bool "NVIDIA Tegra PCIe controller" depends on ARCH_TEGRA || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want support for the PCIe host controller found on NVIDIA Tegra SoCs. @@ -303,6 +305,7 @@ config PCI_XGENE_MSI bool "X-Gene v1 PCIe MSI feature" depends on PCI_XGENE depends on PCI_MSI + select IRQ_MSI_LIB default y help Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 33d6bf460ffe..3219704aba0e 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp) irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, pp); - dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, pp); + dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + PCI_NUM_INTX, &intx_domain_ops, pp); of_node_put(pcie_intc_node); if (!dra7xx->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 76a37368ae4f..1385d9db7b32 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -761,7 +761,7 @@ static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie) ks_pcie); } - intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX, + intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX, &ks_pcie_intx_irq_domain_ops, NULL); if (!intx_irq_domain) { dev_err(dev, "Failed to add irq domain for INTX irqs\n"); diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c index 4eb2a4e8189d..9f7251a16d32 100644 --- a/drivers/pci/controller/dwc/pcie-amd-mdb.c +++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c @@ -290,8 +290,8 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie, return -ENODEV; } - pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32, - &event_domain_ops, pcie); + pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, + &event_domain_ops, pcie); if (!pcie->mdb_domain) { err = -ENOMEM; dev_err(dev, "Failed to add MDB domain\n"); @@ -300,8 +300,8 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie, irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS); - pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &amd_intx_domain_ops, pcie); + pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + PCI_NUM_INTX, &amd_intx_domain_ops, pcie); if (!pcie->intx_domain) { err = -ENOMEM; dev_err(dev, "Failed to add INTx domain\n"); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index ecc33f6789e3..d1cd48efad43 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -227,7 +227,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = { int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); + struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node); pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, &dw_pcie_msi_domain_ops, pp); diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index c624b7ebd118..678d510a261d 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -144,8 +144,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) return -EINVAL; } - rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &intx_domain_ops, rockchip); + rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX, + &intx_domain_ops, rockchip); of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index 5757ca3803c9..43b28f826edd 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -279,7 +279,7 @@ static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp) goto out_put_node; } - pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, + pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX, &uniphier_intx_domain_ops, pp); if (!pcie->intx_irq_domain) { dev_err(pci->dev, "Failed to get INTx domain\n"); diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 0e088e74155d..a600f46ee3c3 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -435,12 +435,12 @@ static const struct irq_domain_ops msi_domain_ops = { static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); + struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node); struct mobiveil_msi *msi = &pcie->rp.msi; mutex_init(&msi->lock); - msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, - &msi_domain_ops, pcie); + msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors, + &msi_domain_ops, pcie); if (!msi->dev_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; @@ -461,12 +461,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; struct mobiveil_root_port *rp = &pcie->rp; /* setup INTx */ - rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, - &intx_domain_ops, pcie); + rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX, + &intx_domain_ops, pcie); if (!rp->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index a29796cce420..7bac64533b14 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -1456,9 +1456,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) raw_spin_lock_init(&pcie->msi_irq_lock); mutex_init(&pcie->msi_used_lock); - pcie->msi_inner_domain = - irq_domain_add_linear(NULL, MSI_IRQ_NUM, - &advk_msi_domain_ops, pcie); + pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM, + &advk_msi_domain_ops, pcie); if (!pcie->msi_inner_domain) return -ENOMEM; @@ -1508,9 +1507,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) irq_chip->irq_mask = advk_pcie_irq_mask; irq_chip->irq_unmask = advk_pcie_irq_unmask; - pcie->irq_domain = - irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &advk_pcie_irq_domain_ops, pcie); + pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &advk_pcie_irq_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); ret = -ENOMEM; @@ -1549,9 +1547,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) { - pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1, - &advk_pcie_rp_irq_domain_ops, - pcie); + pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie); if (!pcie->rp_irq_domain) { dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index ffdeed25e961..28e43831c0f1 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -345,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) return irq ?: -EINVAL; } - p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &faraday_pci_irqdomain_ops, p); + p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX, + &faraday_pci_irqdomain_ops, p); of_node_put(intc); if (!p->irqdomain) { dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ac27bda5ba26..e1eaa24559a2 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -3975,24 +3975,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg) { struct irq_data *irq_data; struct msi_desc *entry; - int ret = 0; if (!pdev->msi_enabled && !pdev->msix_enabled) return 0; - msi_lock_descs(&pdev->dev); + guard(msi_descs_lock)(&pdev->dev); msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) { irq_data = irq_get_irq_data(entry->irq); - if (WARN_ON_ONCE(!irq_data)) { - ret = -EINVAL; - break; - } - + if (WARN_ON_ONCE(!irq_data)) + return -EINVAL; hv_compose_msi_msg(irq_data, &entry->msg); } - msi_unlock_descs(&pdev->dev); - - return ret; + return 0; } /* diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index b0e3bce10aa4..60da24ba0a19 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1078,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port) return -ENODEV; } - port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &mvebu_pcie_intx_irq_domain_ops, - port); + port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + PCI_NUM_INTX, + &mvebu_pcie_intx_irq_domain_ops, port); of_node_put(pcie_intc_node); if (!port->intx_irq_domain) { dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index d2f88997283a..467ddc701adc 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -22,6 +22,7 @@ #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -1547,7 +1548,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc) unsigned int index = i * 32 + offset; int ret; - ret = generic_handle_domain_irq(msi->domain->parent, index); + ret = generic_handle_domain_irq(msi->domain, index); if (ret) { /* * that's weird who triggered this? @@ -1565,30 +1566,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void tegra_msi_top_irq_ack(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void tegra_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void tegra_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip tegra_msi_top_chip = { - .name = "Tegra PCIe MSI", - .irq_ack = tegra_msi_top_irq_ack, - .irq_mask = tegra_msi_top_irq_mask, - .irq_unmask = tegra_msi_top_irq_unmask, -}; - static void tegra_msi_irq_ack(struct irq_data *d) { struct tegra_msi *msi = irq_data_get_irq_chip_data(d); @@ -1690,42 +1667,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = { .free = tegra_msi_domain_free, }; -static struct msi_domain_info tegra_msi_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &tegra_msi_top_chip, +static const struct msi_parent_ops tegra_msi_parent_ops = { + .supported_flags = (MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX), + .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSI_MASK_PARENT | + MSI_FLAG_NO_AFFINITY), + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int tegra_allocate_domains(struct tegra_msi *msi) { struct tegra_pcie *pcie = msi_to_pcie(msi); struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); - struct irq_domain *parent; - - parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, - &tegra_msi_domain_ops, msi); - if (!parent) { - dev_err(pcie->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + struct irq_domain_info info = { + .fwnode = fwnode, + .ops = &tegra_msi_domain_ops, + .size = INT_PCI_MSI_NR, + .host_data = msi, + }; - msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent); + msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops); if (!msi->domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); return -ENOMEM; } - return 0; } static void tegra_free_domains(struct tegra_msi *msi) { - struct irq_domain *parent = msi->domain->parent; - irq_domain_remove(msi->domain); - irq_domain_remove(parent); } static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index 7bce327897c9..b05ec8b0bb93 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/msi.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/of_pci.h> @@ -32,7 +33,6 @@ struct xgene_msi_group { struct xgene_msi { struct device_node *node; struct irq_domain *inner_domain; - struct irq_domain *msi_domain; u64 msi_addr; void __iomem *msi_regs; unsigned long *bitmap; @@ -44,20 +44,6 @@ struct xgene_msi { /* Global data */ static struct xgene_msi xgene_msi_ctrl; -static struct irq_chip xgene_msi_top_irq_chip = { - .name = "X-Gene1 MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info xgene_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), - .chip = &xgene_msi_top_irq_chip, -}; - /* * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where * n is group number (0..F), x is index of registers in each group (0..7) @@ -235,34 +221,35 @@ static void xgene_irq_domain_free(struct irq_domain *domain, irq_domain_free_irqs_parent(domain, virq, nr_irqs); } -static const struct irq_domain_ops msi_domain_ops = { +static const struct irq_domain_ops xgene_msi_domain_ops = { .alloc = xgene_irq_domain_alloc, .free = xgene_irq_domain_free, }; +static const struct msi_parent_ops xgene_msi_parent_ops = { + .supported_flags = (MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX), + .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS), + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + static int xgene_allocate_domains(struct xgene_msi *msi) { - msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, - &msi_domain_ops, msi); - if (!msi->inner_domain) - return -ENOMEM; - - msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), - &xgene_msi_domain_info, - msi->inner_domain); - - if (!msi->msi_domain) { - irq_domain_remove(msi->inner_domain); - return -ENOMEM; - } - - return 0; + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(msi->node), + .ops = &xgene_msi_domain_ops, + .size = NR_MSI_VEC, + .host_data = msi, + }; + + msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops); + return msi->inner_domain ? 0 : -ENOMEM; } static void xgene_free_domains(struct xgene_msi *msi) { - if (msi->msi_domain) - irq_domain_remove(msi->msi_domain); if (msi->inner_domain) irq_domain_remove(msi->inner_domain); } diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index e1cee3c0575f..a43f21eb8fbb 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -164,9 +164,9 @@ static const struct irq_domain_ops msi_domain_ops = { static int altera_allocate_domains(struct altera_msi *msi) { - struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); + struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node); - msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, + msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors, &msi_domain_ops, msi); if (!msi->inner_domain) { dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 70409e71a18f..0fc77176a52e 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -855,7 +855,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) struct device_node *node = dev->of_node; /* Setup INTx */ - pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, + pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX, &intx_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 18e11b9a7f46..3d412a931774 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/list.h> #include <linux/module.h> @@ -133,7 +134,6 @@ struct apple_pcie { struct mutex lock; struct device *dev; void __iomem *base; - struct irq_domain *domain; unsigned long *bitmap; struct list_head ports; struct completion event; @@ -162,27 +162,6 @@ static void rmw_clear(u32 clr, void __iomem *addr) writel_relaxed(readl_relaxed(addr) & ~clr, addr); } -static void apple_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void apple_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip apple_msi_top_chip = { - .name = "PCIe MSI", - .irq_mask = apple_msi_top_irq_mask, - .irq_unmask = apple_msi_top_irq_unmask, - .irq_eoi = irq_chip_eoi_parent, - .irq_set_affinity = irq_chip_set_affinity_parent, - .irq_set_type = irq_chip_set_type_parent, -}; - static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { msg->address_hi = upper_32_bits(DOORBELL_ADDR); @@ -226,8 +205,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, for (i = 0; i < nr_irqs; i++) { irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, - &apple_msi_bottom_chip, - domain->host_data); + &apple_msi_bottom_chip, pcie); } return 0; @@ -251,12 +229,6 @@ static const struct irq_domain_ops apple_msi_domain_ops = { .free = apple_msi_domain_free, }; -static struct msi_domain_info apple_msi_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), - .chip = &apple_msi_top_chip, -}; - static void apple_port_irq_mask(struct irq_data *data) { struct apple_pcie_port *port = irq_data_get_irq_chip_data(data); @@ -595,11 +567,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, return 0; } +static const struct msi_parent_ops apple_msi_parent_ops = { + .supported_flags = (MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX | + MSI_FLAG_MULTI_PCI_MSI), + .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSI_MASK_PARENT), + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + static int apple_msi_init(struct apple_pcie *pcie) { struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); + struct irq_domain_info info = { + .fwnode = fwnode, + .ops = &apple_msi_domain_ops, + .size = pcie->nvecs, + .host_data = pcie, + }; struct of_phandle_args args = {}; - struct irq_domain *parent; int ret; ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges", @@ -619,28 +608,16 @@ static int apple_msi_init(struct apple_pcie *pcie) if (!pcie->bitmap) return -ENOMEM; - parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED); - if (!parent) { + info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED); + if (!info.parent) { dev_err(pcie->dev, "failed to find parent domain\n"); return -ENXIO; } - parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode, - &apple_msi_domain_ops, pcie); - if (!parent) { + if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) { dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); - - pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info, - parent); - if (!pcie->domain) { - dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); - return -ENOMEM; - } - return 0; } diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e19628e13898..92887b394eb4 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -581,10 +581,10 @@ static const struct irq_domain_ops msi_domain_ops = { static int brcm_allocate_domains(struct brcm_msi *msi) { - struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np); + struct fwnode_handle *fwnode = of_fwnode_handle(msi->np); struct device *dev = msi->dev; - msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi); + msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi); if (!msi->inner_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 649fcb449f34..d2cb4c4f821a 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -446,12 +446,12 @@ static void iproc_msi_disable(struct iproc_msi *msi) static int iproc_msi_alloc_domains(struct device_node *node, struct iproc_msi *msi) { - msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, - &msi_domain_ops, msi); + msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs, + &msi_domain_ops, msi); if (!msi->inner_domain) return -ENOMEM; - msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node), &iproc_msi_domain_info, msi->inner_domain); if (!msi->msi_domain) { diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 9d52504acae4..b55f5973414c 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -745,8 +745,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) return -ENODEV; } - pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, - &intx_domain_ops, pcie); + pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX, + &intx_domain_ops, pcie); if (!pcie->intx_domain) { dev_err(dev, "failed to create INTx IRQ domain\n"); ret = -ENODEV; @@ -756,8 +756,9 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) /* Setup MSI */ mutex_init(&pcie->lock); - pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, - &mtk_msi_bottom_domain_ops, pcie); + pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node), + PCIE_MSI_IRQS_NUM, + &mtk_msi_bottom_domain_ops, pcie); if (!pcie->msi_bottom_domain) { dev_err(dev, "failed to create MSI bottom domain\n"); ret = -ENODEV; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 811a8b4acd50..e1934aa06c8d 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -485,7 +485,7 @@ static struct msi_domain_info mtk_msi_domain_info = { static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) { - struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); + struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node); mutex_init(&port->lock); @@ -569,8 +569,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, return -ENODEV; } - port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); + port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); of_node_put(pcie_intc_node); if (!port->irq_domain) { dev_err(dev, "failed to get INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 6a46be17aa91..b9e7a8710cf0 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -693,8 +693,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) return -EINVAL; } - rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &intx_domain_ops, rockchip); + rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX, + &intx_domain_ops, rockchip); of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index 13ca493d22bd..d38f27e20761 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -395,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) return -EINVAL; } - port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, - &event_domain_ops, - port); + port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, + &event_domain_ops, port); if (!port->cpm_domain) goto out; irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); - port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, - port); + port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); if (!port->intx_domain) goto out; diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c index dd117f07fc95..dc9690a535e1 100644 --- a/drivers/pci/controller/pcie-xilinx-dma-pl.c +++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c @@ -470,10 +470,10 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port) struct device *dev = port->dev; struct xilinx_msi *msi = &port->msi; int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long); - struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node); + struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node); - msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS, - &dev_msi_domain_ops, port); + msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS, + &dev_msi_domain_ops, port); if (!msi->dev_domain) goto out; @@ -585,15 +585,15 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port) return -EINVAL; } - port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32, - &event_domain_ops, port); + port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, + &event_domain_ops, port); if (!port->pldma_domain) return -ENOMEM; irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS); - port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); + port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); if (!port->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 8d6e2a89b067..c8b05477b719 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -495,11 +495,10 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) { #ifdef CONFIG_PCI_MSI struct device *dev = pcie->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); + struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node); struct nwl_msi *msi = &pcie->msi; - msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, - &dev_msi_domain_ops, pcie); + msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie); if (!msi->dev_domain) { dev_err(dev, "failed to create dev IRQ domain\n"); return -ENOMEM; @@ -582,10 +581,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) return -EINVAL; } - pcie->intx_irq_domain = irq_domain_add_linear(intc_node, - PCI_NUM_INTX, - &intx_domain_ops, - pcie); + pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX, + &intx_domain_ops, pcie); of_node_put(intc_node); if (!pcie->intx_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 0b534f73a942..e36aa874bae9 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -461,9 +461,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie) return -ENODEV; } - pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, - pcie); + pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, pcie); of_node_put(pcie_intc_node); if (!pcie->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c index 4153214ca410..3abedf723215 100644 --- a/drivers/pci/controller/plda/pcie-plda-host.c +++ b/drivers/pci/controller/plda/pcie-plda-host.c @@ -150,13 +150,12 @@ static struct msi_domain_info plda_msi_domain_info = { static int plda_allocate_msi_domains(struct plda_pcie_rp *port) { struct device *dev = port->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); + struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node); struct plda_msi *msi = &port->msi; mutex_init(&port->msi.lock); - msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors, - &msi_domain_ops, port); + msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port); if (!msi->dev_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; @@ -393,10 +392,9 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port) return -EINVAL; } - port->event_domain = irq_domain_add_linear(pcie_intc_node, - port->num_events, - &plda_event_domain_ops, - port); + port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + port->num_events, &plda_event_domain_ops, + port); if (!port->event_domain) { dev_err(dev, "failed to get event domain\n"); of_node_put(pcie_intc_node); @@ -405,8 +403,8 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port) irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS); - port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); + port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); if (!port->intx_domain) { dev_err(dev, "failed to get an INTx IRQ domain\n"); of_node_put(pcie_intc_node); diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index e9e9aaa91770..d9996516f49e 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -65,9 +65,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) rc = zpci_deconfigure_device(zdev); out: - mutex_unlock(&zdev->state_lock); if (pdev) pci_dev_put(pdev); + mutex_unlock(&zdev->state_lock); return rc; } diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c index 17ec6332cb1d..818d55fbad0d 100644 --- a/drivers/pci/msi/api.c +++ b/drivers/pci/msi/api.c @@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev) if (!pci_msi_enabled() || !dev || !dev->msi_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msi_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msi); @@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev) if (!pci_msi_enabled() || !dev || !dev->msix_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msix_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msix); @@ -401,7 +399,7 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state); * Return: true if MSI has not been globally disabled through ACPI FADT, * PCI bridge quirks, or the "pci=nomsi" kernel command-line option. */ -int pci_msi_enabled(void) +bool pci_msi_enabled(void) { return pci_msi_enable; } diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 8b8848788618..d6ce04054702 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -15,7 +15,7 @@ #include "../pci.h" #include "msi.h" -int pci_msi_enable = 1; +bool pci_msi_enable = true; /** * pci_msi_supported - check whether MSI may be enabled on a device @@ -335,43 +335,13 @@ static int msi_verify_entries(struct pci_dev *dev) return !entry ? 0 : -EIO; } -/** - * msi_capability_init - configure device's MSI capability structure - * @dev: pointer to the pci_dev data structure of MSI device function - * @nvec: number of interrupts to allocate - * @affd: description of automatic IRQ affinity assignments (may be %NULL) - * - * Setup the MSI capability structure of the device with the requested - * number of interrupts. A return value of zero indicates the successful - * setup of an entry with the new MSI IRQ. A negative return value indicates - * an error, and a positive return value indicates the number of interrupts - * which could have been allocated. - */ -static int msi_capability_init(struct pci_dev *dev, int nvec, - struct irq_affinity *affd) +static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks) { - struct irq_affinity_desc *masks = NULL; + int ret = msi_setup_msi_desc(dev, nvec, masks); struct msi_desc *entry, desc; - int ret; - /* Reject multi-MSI early on irq domain enabled architectures */ - if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) - return 1; - - /* - * Disable MSI during setup in the hardware, but mark it enabled - * so that setup code can evaluate it. - */ - pci_msi_set_enable(dev, 0); - dev->msi_enabled = 1; - - if (affd) - masks = irq_create_affinity_masks(nvec, affd); - - msi_lock_descs(&dev->dev); - ret = msi_setup_msi_desc(dev, nvec, masks); if (ret) - goto fail; + return ret; /* All MSIs are unmasked by default; mask them all */ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); @@ -393,24 +363,51 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, goto err; /* Set MSI enabled bits */ + dev->msi_enabled = 1; pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 1); pcibios_free_irq(dev); dev->irq = entry->irq; - goto unlock; - + return 0; err: pci_msi_unmask(&desc, msi_multi_mask(&desc)); pci_free_msi_irqs(dev); -fail: - dev->msi_enabled = 0; -unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); return ret; } +/** + * msi_capability_init - configure device's MSI capability structure + * @dev: pointer to the pci_dev data structure of MSI device function + * @nvec: number of interrupts to allocate + * @affd: description of automatic IRQ affinity assignments (may be %NULL) + * + * Setup the MSI capability structure of the device with the requested + * number of interrupts. A return value of zero indicates the successful + * setup of an entry with the new MSI IRQ. A negative return value indicates + * an error, and a positive return value indicates the number of interrupts + * which could have been allocated. + */ +static int msi_capability_init(struct pci_dev *dev, int nvec, + struct irq_affinity *affd) +{ + /* Reject multi-MSI early on irq domain enabled architectures */ + if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) + return 1; + + /* + * Disable MSI during setup in the hardware, but mark it enabled + * so that setup code can evaluate it. + */ + pci_msi_set_enable(dev, 0); + + struct irq_affinity_desc *masks __free(kfree) = + affd ? irq_create_affinity_masks(nvec, affd) : NULL; + + guard(msi_descs_lock)(&dev->dev); + return __msi_capability_init(dev, nvec, masks); +} + int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { @@ -666,38 +663,39 @@ static void msix_mask_all(void __iomem *base, int tsize) writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); } -static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, - int nvec, struct irq_affinity *affd) -{ - struct irq_affinity_desc *masks = NULL; - int ret; +DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T)); - if (affd) - masks = irq_create_affinity_masks(nvec, affd); +static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries, + int nvec, struct irq_affinity_desc *masks) +{ + struct pci_dev *dev __free(free_msi_irqs) = __dev; - msi_lock_descs(&dev->dev); - ret = msix_setup_msi_descs(dev, entries, nvec, masks); + int ret = msix_setup_msi_descs(dev, entries, nvec, masks); if (ret) - goto out_free; + return ret; ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) - goto out_free; + return ret; /* Check if all MSI entries honor device restrictions */ ret = msi_verify_entries(dev); if (ret) - goto out_free; + return ret; msix_update_entries(dev, entries); - goto out_unlock; + retain_and_null_ptr(dev); + return 0; +} -out_free: - pci_free_msi_irqs(dev); -out_unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); - return ret; +static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, + int nvec, struct irq_affinity *affd) +{ + struct irq_affinity_desc *masks __free(kfree) = + affd ? irq_create_affinity_masks(nvec, affd) : NULL; + + guard(msi_descs_lock)(&dev->dev); + return __msix_setup_interrupts(dev, entries, nvec, masks); } /** @@ -873,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev) write_msg = arch_restore_msi_irqs(dev); - msi_lock_descs(&dev->dev); - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { - if (write_msg) - __pci_write_msi_msg(entry, &entry->msg); - pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + scoped_guard (msi_descs_lock, &dev->dev) { + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + if (write_msg) + __pci_write_msi_msg(entry, &entry->msg); + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + } } - msi_unlock_descs(&dev->dev); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } @@ -918,6 +916,53 @@ void pci_free_msi_irqs(struct pci_dev *dev) } } +#ifdef CONFIG_PCIE_TPH +/** + * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector + * @pdev: The PCIe device to update + * @index: The MSI-X index to update + * @tag: The tag to write + * + * Returns: 0 on success, error code on failure + */ +int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) +{ + struct msi_desc *msi_desc; + struct irq_desc *irq_desc; + unsigned int virq; + + if (!pdev->msix_enabled) + return -ENXIO; + + guard(msi_descs_lock)(&pdev->dev); + virq = msi_get_virq(&pdev->dev, index); + if (!virq) + return -ENXIO; + /* + * This is a horrible hack, but short of implementing a PCI + * specific interrupt chip callback and a huge pile of + * infrastructure, this is the minor nuissance. It provides the + * protection against concurrent operations on this entry and keeps + * the control word cache in sync. + */ + irq_desc = irq_to_desc(virq); + if (!irq_desc) + return -ENXIO; + + guard(raw_spinlock_irq)(&irq_desc->lock); + msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data); + if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual) + return -ENXIO; + + msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST; + msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag); + pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl); + /* Flush the write */ + readl(pci_msix_desc_addr(msi_desc)); + return 0; +} +#endif + /* Misc. infrastructure */ struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) @@ -928,5 +973,5 @@ EXPORT_SYMBOL(msi_desc_to_pci_dev); void pci_no_msi(void) { - pci_msi_enable = 0; + pci_msi_enable = false; } diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h index ee53cf079f4e..fc70b601e942 100644 --- a/drivers/pci/msi/msi.h +++ b/drivers/pci/msi/msi.h @@ -87,7 +87,7 @@ static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc) void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc); /* Subsystem variables */ -extern int pci_msi_enable; +extern bool pci_msi_enable; /* MSI internal functions invoked from the public APIs */ void pci_msi_shutdown(struct pci_dev *dev); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index b81e99cd4b62..39f368d2f26d 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -1064,6 +1064,15 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name); void pcim_release_region(struct pci_dev *pdev, int bar); +#ifdef CONFIG_PCI_MSI +int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag); +#else +static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) +{ + return -ENODEV; +} +#endif + /* * Config Address for PCI Configuration Mechanism #1 * diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c index 07de59ca2ebf..77fce5e1b830 100644 --- a/drivers/pci/tph.c +++ b/drivers/pci/tph.c @@ -204,48 +204,6 @@ static u8 get_rp_completer_type(struct pci_dev *pdev) return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg); } -/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */ -static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag) -{ -#ifdef CONFIG_PCI_MSI - struct msi_desc *msi_desc = NULL; - void __iomem *vec_ctrl; - u32 val; - int err = 0; - - msi_lock_descs(&pdev->dev); - - /* Find the msi_desc entry with matching msix_idx */ - msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) { - if (msi_desc->msi_index == msix_idx) - break; - } - - if (!msi_desc) { - err = -ENXIO; - goto err_out; - } - - /* Get the vector control register (offset 0xc) pointed by msix_idx */ - vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE; - vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL; - - val = readl(vec_ctrl); - val &= ~PCI_MSIX_ENTRY_CTRL_ST; - val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag); - writel(val, vec_ctrl); - - /* Read back to flush the update */ - val = readl(vec_ctrl); - -err_out: - msi_unlock_descs(&pdev->dev); - return err; -#else - return -ENODEV; -#endif -} - /* Write tag to ST table - Return 0 if OK, otherwise -errno */ static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag) { @@ -346,7 +304,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag) switch (loc) { case PCI_TPH_LOC_MSIX: - err = write_tag_to_msix(pdev, index, tag); + err = pci_msix_write_tph_tag(pdev, index, tag); break; case PCI_TPH_LOC_CAP: err = write_tag_to_st_table(pdev, index, tag); diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index df9a28ba69dc..81b6f1a62349 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -474,8 +474,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - m1_pmu_disable_event(event); + perf_event_overflow(event, &data, regs); } cpu_pmu->start(cpu_pmu); diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index e506d59654e7..3db9f4ed17e8 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -887,8 +887,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) * an irq_work which will be taken care of in the handling of * IPI_IRQ_WORK. */ - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } armv8pmu_start(cpu_pmu); diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c index b09615bb2bb2..7cb12c8e06c7 100644 --- a/drivers/perf/arm_v6_pmu.c +++ b/drivers/perf/arm_v6_pmu.c @@ -276,8 +276,7 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } /* diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c index 17831e1920bd..a1e438101114 100644 --- a/drivers/perf/arm_v7_pmu.c +++ b/drivers/perf/arm_v7_pmu.c @@ -930,8 +930,7 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } /* diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c index 638fea9b1263..c2ac41dd9e19 100644 --- a/drivers/perf/arm_xscale_pmu.c +++ b/drivers/perf/arm_xscale_pmu.c @@ -186,8 +186,7 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } irq_work_run(); @@ -519,8 +518,7 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) if (!armpmu_event_set_period(event)) continue; - if (perf_event_overflow(event, &data, regs)) - cpu_pmu->disable(event); + perf_event_overflow(event, &data, regs); } irq_work_run(); diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index b4eb2beab691..e20aaba0a33a 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -565,9 +565,8 @@ int mtk_eint_do_init(struct mtk_eint *eint) goto err_eint; } - eint->domain = irq_domain_add_linear(eint->dev->of_node, - eint->hw->ap_num, - &irq_domain_simple_ops, NULL); + eint->domain = irq_domain_create_linear(of_fwnode_handle(eint->dev->of_node), + eint->hw->ap_num, &irq_domain_simple_ops, NULL); if (!eint->domain) goto err_eint; diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 8b01d312305a..e57ac4ea91dd 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -1206,7 +1206,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) dev_dbg(dev, "bank %i: irq=%d\n", i, ret); } - atmel_pioctrl->irq_domain = irq_domain_add_linear(dev->of_node, + atmel_pioctrl->irq_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), atmel_pioctrl->gpio_chip->ngpio, &irq_domain_simple_ops, NULL); if (!atmel_pioctrl->irq_domain) diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c index b693f4787044..0d7cc8280ea2 100644 --- a/drivers/pinctrl/pinctrl-keembay.c +++ b/drivers/pinctrl/pinctrl-keembay.c @@ -1268,7 +1268,7 @@ static void keembay_gpio_irq_handler(struct irq_desc *desc) for_each_set_clump8(bit, clump, ®, BITS_PER_TYPE(typeof(reg))) { pin = clump & ~KEEMBAY_GPIO_IRQ_ENABLE; val = keembay_read_pin(kpc->base0 + KEEMBAY_GPIO_DATA_IN, pin); - kmb_irq = irq_linear_revmap(gc->irq.domain, pin); + kmb_irq = irq_find_mapping(gc->irq.domain, pin); /* Checks if the interrupt is enabled */ if (val && (clump & KEEMBAY_GPIO_IRQ_ENABLE)) diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 5be14dc979e2..5cda6201b60f 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1611,15 +1611,16 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs, /* * We can use the register offset as the hardirq - * number as irq_domain_add_simple maps them lazily. + * number as irq_domain_create_simple maps them lazily. * This way we can easily support more than one * interrupt per function if needed. */ num_irqs = pcs->size; - pcs->domain = irq_domain_add_simple(np, num_irqs, 0, - &pcs_irqdomain_ops, - pcs_soc); + pcs->domain = irq_domain_create_simple(of_fwnode_handle(np), + num_irqs, 0, + &pcs_irqdomain_ops, + pcs_soc); if (!pcs->domain) { irq_set_chained_handler(pcs_soc->irq, NULL); return -EINVAL; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 82f0cc43bbf4..0eb816395dc6 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -44,7 +44,6 @@ * @pctrl: pinctrl handle. * @chip: gpiochip handle. * @desc: pin controller descriptor - * @restart_nb: restart notifier block. * @irq: parent irq for the TLMM irq_chip. * @intr_target_use_scm: route irq to application cpu using scm calls * @lock: Spinlock to protect register resources as well @@ -64,7 +63,6 @@ struct msm_pinctrl { struct pinctrl_dev *pctrl; struct gpio_chip chip; struct pinctrl_desc desc; - struct notifier_block restart_nb; int irq; @@ -1471,10 +1469,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return 0; } -static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, - void *data) +static int msm_ps_hold_restart(struct sys_off_data *data) { - struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb); + struct msm_pinctrl *pctrl = data->cb_data; writel(0, pctrl->regs[0] + PS_HOLD_OFFSET); mdelay(1000); @@ -1485,7 +1482,11 @@ static struct msm_pinctrl *poweroff_pctrl; static void msm_ps_hold_poweroff(void) { - msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL); + struct sys_off_data data = { + .cb_data = poweroff_pctrl, + }; + + msm_ps_hold_restart(&data); } static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) @@ -1495,9 +1496,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) for (i = 0; i < pctrl->soc->nfunctions; i++) if (!strcmp(func[i].name, "ps_hold")) { - pctrl->restart_nb.notifier_call = msm_ps_hold_restart; - pctrl->restart_nb.priority = 128; - if (register_restart_handler(&pctrl->restart_nb)) + if (devm_register_sys_off_handler(pctrl->dev, + SYS_OFF_MODE_RESTART, + 128, + msm_ps_hold_restart, + pctrl)) dev_err(pctrl->dev, "failed to setup restart handler.\n"); poweroff_pctrl = pctrl; @@ -1599,8 +1602,6 @@ void msm_pinctrl_remove(struct platform_device *pdev) struct msm_pinctrl *pctrl = platform_get_drvdata(pdev); gpiochip_remove(&pctrl->chip); - - unregister_restart_handler(&pctrl->restart_nb); } EXPORT_SYMBOL(msm_pinctrl_remove); diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index f1c5a991cf7b..bf8612d72daa 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1646,10 +1646,9 @@ int sunxi_pinctrl_init_with_flags(struct platform_device *pdev, } } - pctl->domain = irq_domain_add_linear(node, - pctl->desc->irq_banks * IRQ_PER_BANK, - &sunxi_pinctrl_irq_domain_ops, - pctl); + pctl->domain = irq_domain_create_linear(of_fwnode_handle(node), + pctl->desc->irq_banks * IRQ_PER_BANK, + &sunxi_pinctrl_irq_domain_ops, pctl); if (!pctl->domain) { dev_err(&pdev->dev, "Couldn't register IRQ domain\n"); ret = -ENOMEM; diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c index eaae044e4f82..73ca3f48c5cf 100644 --- a/drivers/platform/x86/amd/hsmp/acpi.c +++ b/drivers/platform/x86/amd/hsmp/acpi.c @@ -9,7 +9,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/amd_hsmp.h> +#include <asm/amd/hsmp.h> #include <linux/acpi.h> #include <linux/device.h> @@ -23,7 +23,7 @@ #include <uapi/asm-generic/errno-base.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "hsmp.h" diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c index a3ac09a90de4..e262e8a97b45 100644 --- a/drivers/platform/x86/amd/hsmp/hsmp.c +++ b/drivers/platform/x86/amd/hsmp/hsmp.c @@ -7,7 +7,7 @@ * This file provides a device implementation for HSMP interface */ -#include <asm/amd_hsmp.h> +#include <asm/amd/hsmp.h> #include <linux/acpi.h> #include <linux/delay.h> diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c index 81931e808bbc..62bf9547631e 100644 --- a/drivers/platform/x86/amd/hsmp/plat.c +++ b/drivers/platform/x86/amd/hsmp/plat.c @@ -9,7 +9,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/amd_hsmp.h> +#include <asm/amd/hsmp.h> #include <linux/acpi.h> #include <linux/build_bug.h> @@ -19,7 +19,7 @@ #include <linux/platform_device.h> #include <linux/sysfs.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "hsmp.h" diff --git a/drivers/platform/x86/amd/pmc/mp1_stb.c b/drivers/platform/x86/amd/pmc/mp1_stb.c index c005f00988f7..3b9b9f30faa3 100644 --- a/drivers/platform/x86/amd/pmc/mp1_stb.c +++ b/drivers/platform/x86/amd/pmc/mp1_stb.c @@ -11,7 +11,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 2e3f6fc67c56..5c7c01f66cde 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -11,6 +11,7 @@ #include <linux/dmi.h> #include <linux/io.h> #include <linux/ioport.h> +#include <asm/amd/fch.h> #include "pmc.h" @@ -20,7 +21,7 @@ struct quirk_entry { }; static struct quirk_entry quirk_s2idle_bug = { - .s2idle_bug_mmio = 0xfed80380, + .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH, }; static struct quirk_entry quirk_spurious_8042 = { diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index 0329fafe14eb..37c7a57afee5 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -28,7 +28,7 @@ #include <linux/seq_file.h> #include <linux/uaccess.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "pmc.h" diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index 96821101ec77..76910601cac8 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -14,7 +14,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/power_supply.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "pmf.h" /* PMF-SMU communication registers */ diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index 230e6ee96636..d8f1bf5e58a0 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj, int length; length = strlen(buf); - if (buf[length-1] == '\n') + if (length && buf[length - 1] == '\n') length--; /* firmware does verifiation of min/max password length, diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index a0eae24ca9e6..162809140f68 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -17,13 +17,13 @@ /* * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional * features made available on a range of Fujitsu laptops including the - * P2xxx/P5xxx/S6xxx/S7xxx series. + * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series. * * This driver implements a vendor-specific backlight control interface for * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu * laptops. * - * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and + * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and * P8010. It should work on most P-series and S-series Lifebooks, but * YMMV. * @@ -107,7 +107,11 @@ #define KEY2_CODE 0x411 #define KEY3_CODE 0x412 #define KEY4_CODE 0x413 -#define KEY5_CODE 0x420 +#define KEY5_CODE 0x414 +#define KEY6_CODE 0x415 +#define KEY7_CODE 0x416 +#define KEY8_CODE 0x417 +#define KEY9_CODE 0x420 /* Hotkey ringbuffer limits */ #define MAX_HOTKEY_RINGBUFFER_SIZE 100 @@ -560,7 +564,7 @@ static const struct key_entry keymap_default[] = { { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, { KE_KEY, KEY3_CODE, { KEY_PROG3 } }, { KE_KEY, KEY4_CODE, { KEY_PROG4 } }, - { KE_KEY, KEY5_CODE, { KEY_RFKILL } }, + { KE_KEY, KEY9_CODE, { KEY_RFKILL } }, /* Soft keys read from status flags */ { KE_KEY, FLAG_RFKILL, { KEY_RFKILL } }, { KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } }, @@ -584,6 +588,18 @@ static const struct key_entry keymap_p8010[] = { { KE_END, 0 } }; +static const struct key_entry keymap_s2110[] = { + { KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */ + { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */ + { KE_KEY, KEY3_CODE, { KEY_WWW } }, /* "Internet" */ + { KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */ + { KE_KEY, KEY5_CODE, { KEY_STOPCD } }, + { KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } }, + { KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } }, + { KE_KEY, KEY8_CODE, { KEY_NEXTSONG } }, + { KE_END, 0 } +}; + static const struct key_entry *keymap = keymap_default; static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id) @@ -621,6 +637,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = { }, .driver_data = (void *)keymap_p8010 }, + { + .callback = fujitsu_laptop_dmi_keymap_override, + .ident = "Fujitsu LifeBook S2110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"), + }, + .driver_data = (void *)keymap_s2110 + }, {} }; diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 1ae50702bdb7..b73e582128c9 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "ifs.h" @@ -115,13 +116,13 @@ static int __init ifs_init(void) if (!m) return -ENODEV; - if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval)) + if (rdmsrq_safe(MSR_IA32_CORE_CAPS, &msrval)) return -ENODEV; if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS)) return -ENODEV; - if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval)) + if (rdmsrq_safe(MSR_INTEGRITY_CAPS, &msrval)) return -ENODEV; ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index de54bd1a5970..50f1fdf7dfed 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -5,6 +5,7 @@ #include <linux/sizes.h> #include <asm/cpu.h> #include <asm/microcode.h> +#include <asm/msr.h> #include "ifs.h" @@ -127,8 +128,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) ifsd = ifs_get_data(dev); msrs = ifs_get_test_msrs(dev); /* run scan hash copy */ - wrmsrl(msrs->copy_hashes, ifs_hash_ptr); - rdmsrl(msrs->copy_hashes_status, hashes_status.data); + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); + rdmsrq(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ num_chunks = hashes_status.num_chunks; @@ -149,8 +150,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) linear_addr = base + i * chunk_size; linear_addr |= i; - wrmsrl(msrs->copy_chunks, linear_addr); - rdmsrl(msrs->copy_chunks_status, chunk_status.data); + wrmsrq(msrs->copy_chunks, linear_addr); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); ifsd->valid_chunks = chunk_status.valid_chunks; err_code = chunk_status.error_code; @@ -195,8 +196,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) msrs = ifs_get_test_msrs(dev); if (need_copy_scan_hashes(ifsd)) { - wrmsrl(msrs->copy_hashes, ifs_hash_ptr); - rdmsrl(msrs->copy_hashes_status, hashes_status.data); + wrmsrq(msrs->copy_hashes, ifs_hash_ptr); + rdmsrq(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ chunk_size = hashes_status.chunk_size * SZ_1K; @@ -216,8 +217,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) } if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { - wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); - rdmsrl(msrs->copy_chunks_status, chunk_status.data); + wrmsrq(msrs->test_ctrl, INVALIDATE_STRIDE); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); if (chunk_status.valid_chunks != 0) { dev_err(dev, "Couldn't invalidate installed stride - %d\n", chunk_status.valid_chunks); @@ -238,9 +239,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) chunk_table[1] = linear_addr; do { local_irq_disable(); - wrmsrl(msrs->copy_chunks, (u64)chunk_table); + wrmsrq(msrs->copy_chunks, (u64)chunk_table); local_irq_enable(); - rdmsrl(msrs->copy_chunks_status, chunk_status.data); + rdmsrq(msrs->copy_chunks_status, chunk_status.data); err_code = chunk_status.error_code; } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index f978dd05d4d8..dfc119d7354d 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -7,6 +7,7 @@ #include <linux/nmi.h> #include <linux/slab.h> #include <linux/stop_machine.h> +#include <asm/msr.h> #include "ifs.h" @@ -209,8 +210,8 @@ static int doscan(void *data) * take up to 200 milliseconds (in the case where all chunks * are processed in a single pass) before it retires. */ - wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data); - rdmsrl(MSR_SCAN_STATUS, status.data); + wrmsrq(MSR_ACTIVATE_SCAN, params->activate->data); + rdmsrq(MSR_SCAN_STATUS, status.data); trace_ifs_status(ifsd->cur_batch, start, stop, status.data); @@ -321,9 +322,9 @@ static int do_array_test(void *data) first = cpumask_first(cpu_smt_mask(cpu)); if (cpu == first) { - wrmsrl(MSR_ARRAY_BIST, command->data); + wrmsrq(MSR_ARRAY_BIST, command->data); /* Pass back the result of the test */ - rdmsrl(MSR_ARRAY_BIST, command->data); + rdmsrq(MSR_ARRAY_BIST, command->data); } return 0; @@ -374,8 +375,8 @@ static int do_array_test_gen1(void *status) first = cpumask_first(cpu_smt_mask(cpu)); if (cpu == first) { - wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); - rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status)); + wrmsrq(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); + rdmsrq(MSR_ARRAY_STATUS, *((u64 *)status)); } return 0; @@ -526,8 +527,8 @@ static int dosbaf(void *data) * starts scan of each requested bundle. The core test happens * during the "execution" of the WRMSR. */ - wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); - rdmsrl(MSR_SBAF_STATUS, status.data); + wrmsrq(MSR_ACTIVATE_SBAF, run_params->activate->data); + rdmsrq(MSR_SBAF_STATUS, status.data); trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); /* Pass back the result of the test */ diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c index 320993bd6d31..f9c48738b853 100644 --- a/drivers/platform/x86/intel/pmc/arl.c +++ b/drivers/platform/x86/intel/pmc/arl.c @@ -681,6 +681,7 @@ static struct pmc_info arl_pmc_info_list[] = { #define ARL_NPU_PCI_DEV 0xad1d #define ARL_GNA_PCI_DEV 0xae4c +#define ARL_H_NPU_PCI_DEV 0x7d1d #define ARL_H_GNA_PCI_DEV 0x774c /* * Set power state of select devices that do not have drivers to D3 @@ -694,7 +695,7 @@ static void arl_d3_fixup(void) static void arl_h_d3_fixup(void) { - pmc_core_set_device_d3(ARL_NPU_PCI_DEV); + pmc_core_set_device_d3(ARL_H_NPU_PCI_DEV); pmc_core_set_device_d3(ARL_H_GNA_PCI_DEV); } diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c index 2c5af158bbe2..efea4e1ba52b 100644 --- a/drivers/platform/x86/intel/pmc/cnp.c +++ b/drivers/platform/x86/intel/pmc/cnp.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/suspend.h> +#include <asm/msr.h> #include "core.h" /* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */ @@ -227,10 +228,10 @@ static void disable_c1_auto_demote(void *unused) int cpunum = smp_processor_id(); u64 val; - rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, val); + rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); per_cpu(pkg_cst_config, cpunum) = val; val &= ~NHM_C1_AUTO_DEMOTE; - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, val); + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, val); pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, val); } @@ -239,7 +240,7 @@ static void restore_c1_auto_demote(void *unused) { int cpunum = smp_processor_id(); - wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum)); + wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum)); pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, per_cpu(pkg_cst_config, cpunum)); diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index 7a1d11f2914f..9f678c753dfa 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -22,7 +22,7 @@ #include <linux/suspend.h> #include <linux/units.h> -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/msr.h> @@ -1082,7 +1082,7 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused) unsigned int index; for (index = 0; map[index].name ; index++) { - if (rdmsrl_safe(map[index].bit_mask, &pcstate_count)) + if (rdmsrq_safe(map[index].bit_mask, &pcstate_count)) continue; pcstate_count *= 1000; @@ -1587,7 +1587,7 @@ static __maybe_unused int pmc_core_suspend(struct device *dev) /* Save PKGC residency for checking later */ for (i = 0; i < pmcdev->num_of_pkgc; i++) { - if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) + if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) return -EIO; } @@ -1603,7 +1603,7 @@ static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev) u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask; u64 deepest_pkgc_residency; - if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) + if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) return false; if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]) @@ -1655,7 +1655,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev) for (i = 0; i < pmcdev->num_of_pkgc; i++) { u64 pc_cnt; - if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) { + if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) { dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n", msr_map[i].name, pmcdev->pkgc_res_cnt[i], msr_map[i].name, pc_cnt); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 31239a93dd71..8a5713593811 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -21,6 +21,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "isst_if_common.h" @@ -191,7 +192,7 @@ void isst_resume_common(void) if (cb->registered) isst_mbox_resume_command(cb, sst_cmd); } else { - wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, + wrmsrq_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd, sst_cmd->data); } } @@ -211,7 +212,7 @@ static void isst_restore_msr_local(int cpu) hash_for_each_possible(isst_hash, sst_cmd, hnode, punit_msr_white_list[i]) { if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu) - wrmsrl_safe(sst_cmd->cmd, sst_cmd->data); + wrmsrq_safe(sst_cmd->cmd, sst_cmd->data); } } mutex_unlock(&isst_hash_lock); @@ -406,7 +407,7 @@ static int isst_if_cpu_online(unsigned int cpu) isst_cpu_info[cpu].numa_node = cpu_to_node(cpu); - ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data); + ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data); if (ret) { /* This is not a fatal error on MSR mailbox only I/F */ isst_cpu_info[cpu].bus_info[0] = -1; @@ -420,12 +421,12 @@ static int isst_if_cpu_online(unsigned int cpu) if (isst_hpm_support) { - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); if (!ret) goto set_punit_id; } - ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data); + ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data); if (ret) { isst_cpu_info[cpu].punit_cpu_id = -1; return ret; @@ -524,7 +525,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu, + ret = wrmsrq_safe_on_cpu(msr_cmd->logical_cpu, msr_cmd->msr, msr_cmd->data); *write_only = 1; @@ -535,7 +536,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume) } else { u64 data; - ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu, + ret = rdmsrq_safe_on_cpu(msr_cmd->logical_cpu, msr_cmd->msr, &data); if (!ret) { msr_cmd->data = data; @@ -831,8 +832,8 @@ static int __init isst_if_common_init(void) u64 data; /* Can fail only on some Skylake-X generations */ - if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) || - rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data)) + if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) || + rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data)) return -ENODEV; } diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c index c4b7af00352b..22745b217c6f 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c @@ -18,6 +18,7 @@ #include <uapi/linux/isst_if.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "isst_if_common.h" @@ -39,7 +40,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, /* Poll for rb bit == 0 */ retries = OS_MAILBOX_RETRY_COUNT; do { - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { ret = -EBUSY; continue; @@ -52,19 +53,19 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, return ret; /* Write DATA register */ - wrmsrl(MSR_OS_MAILBOX_DATA, command_data); + wrmsrq(MSR_OS_MAILBOX_DATA, command_data); /* Write command register */ data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) | (parameter & GENMASK_ULL(13, 0)) << 16 | (sub_command << 8) | command; - wrmsrl(MSR_OS_MAILBOX_INTERFACE, data); + wrmsrq(MSR_OS_MAILBOX_INTERFACE, data); /* Poll for rb bit == 0 */ retries = OS_MAILBOX_RETRY_COUNT; do { - rdmsrl(MSR_OS_MAILBOX_INTERFACE, data); + rdmsrq(MSR_OS_MAILBOX_INTERFACE, data); if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) { ret = -EBUSY; continue; @@ -74,7 +75,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter, return -ENXIO; if (response_data) { - rdmsrl(MSR_OS_MAILBOX_DATA, data); + rdmsrq(MSR_OS_MAILBOX_DATA, data); *response_data = data; } ret = 0; @@ -176,11 +177,11 @@ static int __init isst_if_mbox_init(void) return -ENODEV; /* Check presence of mailbox MSRs */ - ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data); + ret = rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data); if (ret) return ret; - ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data); + ret = rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data); if (ret) return ret; diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 9978cdd19851..4d30d5360c8f 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -27,6 +27,7 @@ #include <linux/kernel.h> #include <linux/minmax.h> #include <linux/module.h> +#include <asm/msr.h> #include <uapi/linux/isst_if.h> #include "isst_tpmi_core.h" @@ -556,7 +557,7 @@ static bool disable_dynamic_sst_features(void) { u64 value; - rdmsrl(MSR_PM_ENABLE, value); + rdmsrq(MSR_PM_ENABLE, value); return !(value & 0x1); } diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c index 2f01cd22a6ee..c21b3cb99b7c 100644 --- a/drivers/platform/x86/intel/tpmi_power_domains.c +++ b/drivers/platform/x86/intel/tpmi_power_domains.c @@ -157,7 +157,7 @@ static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info) u64 data; int ret; - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); if (ret) return ret; @@ -203,7 +203,7 @@ static int __init tpmi_init(void) return -ENODEV; /* Check for MSR 0x54 presence */ - ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data); + ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data); if (ret) return ret; diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c index 79a0bcdeffb8..b5af3e91ba04 100644 --- a/drivers/platform/x86/intel/turbo_max_3.c +++ b/drivers/platform/x86/intel/turbo_max_3.c @@ -17,6 +17,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #define MSR_OC_MAILBOX 0x150 #define MSR_OC_MAILBOX_CMD_OFFSET 32 @@ -41,14 +42,14 @@ static int get_oc_core_priority(unsigned int cpu) value = cmd << MSR_OC_MAILBOX_CMD_OFFSET; /* Set the busy bit to indicate OS is trying to issue command */ value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT); - ret = wrmsrl_safe(MSR_OC_MAILBOX, value); + ret = wrmsrq_safe(MSR_OC_MAILBOX, value); if (ret) { pr_debug("cpu %d OC mailbox write failed\n", cpu); return ret; } for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) { - ret = rdmsrl_safe(MSR_OC_MAILBOX, &value); + ret = rdmsrq_safe(MSR_OC_MAILBOX, &value); if (ret) { pr_debug("cpu %d OC mailbox read failed\n", cpu); break; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index bdee5d00f30b..2a6897035150 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -21,6 +21,7 @@ #include <linux/suspend.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> #include "uncore-frequency-common.h" @@ -51,7 +52,7 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); if (ret) return ret; @@ -76,7 +77,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); if (ret) return ret; @@ -88,7 +89,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); } - ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); + ret = wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); if (ret) return ret; @@ -105,7 +106,7 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) if (data->control_cpu < 0) return -ENXIO; - ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio); + ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio); if (ret) return ret; @@ -212,7 +213,7 @@ static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode, if (!data || !data->valid || !data->stored_uncore_data) return 0; - wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, + wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, data->stored_uncore_data); } break; diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 5d717b1c23cf..9506f28fb7d8 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -370,7 +370,7 @@ static void ips_cpu_raise(struct ips_driver *ips) if (!ips->cpu_turbo_enabled) return; - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); cur_tdp_limit = turbo_override & TURBO_TDP_MASK; new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */ @@ -382,12 +382,12 @@ static void ips_cpu_raise(struct ips_driver *ips) thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_tdp_limit; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); } /** @@ -405,7 +405,7 @@ static void ips_cpu_lower(struct ips_driver *ips) u64 turbo_override; u16 cur_limit, new_limit; - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); cur_limit = turbo_override & TURBO_TDP_MASK; new_limit = cur_limit - 8; /* 1W decrease */ @@ -417,12 +417,12 @@ static void ips_cpu_lower(struct ips_driver *ips) thm_writew(THM_MPCPC, (new_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_limit; - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); } /** @@ -437,10 +437,10 @@ static void do_enable_cpu_turbo(void *data) { u64 perf_ctl; - rdmsrl(IA32_PERF_CTL, perf_ctl); + rdmsrq(IA32_PERF_CTL, perf_ctl); if (perf_ctl & IA32_PERF_TURBO_DIS) { perf_ctl &= ~IA32_PERF_TURBO_DIS; - wrmsrl(IA32_PERF_CTL, perf_ctl); + wrmsrq(IA32_PERF_CTL, perf_ctl); } } @@ -475,10 +475,10 @@ static void do_disable_cpu_turbo(void *data) { u64 perf_ctl; - rdmsrl(IA32_PERF_CTL, perf_ctl); + rdmsrq(IA32_PERF_CTL, perf_ctl); if (!(perf_ctl & IA32_PERF_TURBO_DIS)) { perf_ctl |= IA32_PERF_TURBO_DIS; - wrmsrl(IA32_PERF_CTL, perf_ctl); + wrmsrq(IA32_PERF_CTL, perf_ctl); } } @@ -1215,7 +1215,7 @@ static int cpu_clamp_show(struct seq_file *m, void *data) u64 turbo_override; int tdp, tdc; - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); tdp = (int)(turbo_override & TURBO_TDP_MASK); tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT); @@ -1290,7 +1290,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) return NULL; } - rdmsrl(IA32_MISC_ENABLE, misc_en); + rdmsrq(IA32_MISC_ENABLE, misc_en); /* * If the turbo enable bit isn't set, we shouldn't try to enable/disable * turbo manually or we'll get an illegal MSR access, even though @@ -1312,7 +1312,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) return NULL; } - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_power); tdp = turbo_power & TURBO_TDP_MASK; /* Sanity check TDP against CPU */ @@ -1496,7 +1496,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) * Check PLATFORM_INFO MSR to make sure this chip is * turbo capable. */ - rdmsrl(PLATFORM_INFO, platform_info); + rdmsrq(PLATFORM_INFO, platform_info); if (!(platform_info & PLATFORM_TDP)) { dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n"); return -ENODEV; @@ -1529,7 +1529,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) ips->mgta_val = thm_readw(THM_MGTA); /* Save turbo limits & ratios */ - rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); ips_disable_cpu_turbo(ips); ips->cpu_turbo_enabled = false; @@ -1596,10 +1596,10 @@ static void ips_remove(struct pci_dev *dev) if (ips->gpu_turbo_disable) symbol_put(i915_gpu_turbo_disable); - rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN); - wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); - wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); free_irq(ips->irq, ips); pci_free_irq_vectors(dev); diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 0fc275e461be..00b1e7c79a3d 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -1061,8 +1061,8 @@ static ssize_t current_value_store(struct kobject *kobj, ret = -EINVAL; goto out; } - set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name, - new_setting, tlmi_priv.pwd_admin->signature); + set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name, + new_setting, tlmi_priv.pwd_admin->signature); if (!set_str) { ret = -ENOMEM; goto out; @@ -1092,7 +1092,7 @@ static ssize_t current_value_store(struct kobject *kobj, goto out; } - set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name, + set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name, new_setting); if (!set_str) { ret = -ENOMEM; @@ -1120,11 +1120,11 @@ static ssize_t current_value_store(struct kobject *kobj, } if (auth_str) - set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name, - new_setting, auth_str); + set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name, + new_setting, auth_str); else - set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name, - new_setting); + set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name, + new_setting); if (!set_str) { ret = -ENOMEM; goto out; @@ -1629,9 +1629,6 @@ static int tlmi_analyze(struct wmi_device *wdev) continue; } - /* It is not allowed to have '/' for file name. Convert it into '\'. */ - strreplace(item, '/', '\\'); - /* Remove the value part */ strreplace(item, ',', '\0'); @@ -1644,11 +1641,16 @@ static int tlmi_analyze(struct wmi_device *wdev) } setting->wdev = wdev; setting->index = i; + + strscpy(setting->name, item); + /* It is not allowed to have '/' for file name. Convert it into '\'. */ + strreplace(item, '/', '\\'); strscpy(setting->display_name, item); + /* If BIOS selections supported, load those */ if (tlmi_priv.can_get_bios_selections) { - ret = tlmi_get_bios_selections(setting->display_name, - &setting->possible_values); + ret = tlmi_get_bios_selections(setting->name, + &setting->possible_values); if (ret || !setting->possible_values) pr_info("Error retrieving possible values for %d : %s\n", i, setting->display_name); diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index a80452482227..9b014644d316 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -90,6 +90,7 @@ struct tlmi_attr_setting { struct kobject kobj; struct wmi_device *wdev; int index; + char name[TLMI_SETTINGS_MAXLEN]; char display_name[TLMI_SETTINGS_MAXLEN]; char *possible_values; }; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 92b21e49faf6..657625dd60a0 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -231,6 +231,7 @@ enum tpacpi_hkey_event_t { /* Thermal events */ TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */ + TP_HKEY_EV_ALARM_BAT_LIM_CHANGE = 0x6013, /* battery charge limit changed*/ TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */ TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */ TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */ @@ -3777,6 +3778,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, bool *send_acpi_ev) pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n"); /* recommended action: immediate sleep/hibernate */ break; + case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE: + pr_debug("Battery Info: battery charge threshold changed\n"); + /* User changed charging threshold. No action needed */ + return true; case TP_HKEY_EV_ALARM_SENSOR_HOT: pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n"); /* recommended action: warn user through gui, that */ diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 9b2f28b34bb5..d6c1ddb807b2 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -3126,7 +3126,7 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, /* Verify that the index is within a valid range. */ num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", "#power-domain-cells"); - if (index >= num_domains) + if (num_domains < 0 || index >= num_domains) return NULL; /* Allocate and register device on the genpd bus. */ diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c index 66409cff2083..e001b5c25bed 100644 --- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c +++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c @@ -338,11 +338,6 @@ static int __init rcar_gen4_sysc_pd_init(void) struct rcar_gen4_sysc_pd *pd; size_t n; - if (!area->name) { - /* Skip NULLified area */ - continue; - } - n = strlen(area->name) + 1; pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c index dce1a6d37e80..047495f54e8a 100644 --- a/drivers/pmdomain/renesas/rcar-sysc.c +++ b/drivers/pmdomain/renesas/rcar-sysc.c @@ -396,11 +396,6 @@ static int __init rcar_sysc_pd_init(void) struct rcar_sysc_pd *pd; size_t n; - if (!area->name) { - /* Skip NULLified area */ - continue; - } - n = strlen(area->name) + 1; pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 6085a1471de2..6e1d4bfd28ac 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -290,7 +290,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) #ifdef CONFIG_AMD_NB -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> static void quirk_amd_mmconfig_area(struct pnp_dev *dev) { diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 5ab3feb29686..e3be40adc0d7 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -28,6 +28,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/iosf_mbi.h> +#include <asm/msr.h> /* bitmasks for RAPL MSRs, used by primitive access functions */ #define ENERGY_STATUS_MASK 0xffffffff diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 2b81aabdb0db..8ad2115d65f6 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -24,6 +24,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <asm/msr.h> /* Local defines */ #define MSR_PLATFORM_POWER_LIMIT 0x0000065C @@ -103,7 +104,7 @@ static int rapl_cpu_down_prep(unsigned int cpu) static int rapl_msr_read_raw(int cpu, struct reg_action *ra) { - if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) { + if (rdmsrq_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) { pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu); return -EIO; } @@ -116,14 +117,14 @@ static void rapl_msr_update_func(void *info) struct reg_action *ra = info; u64 val; - ra->err = rdmsrl_safe(ra->reg.msr, &val); + ra->err = rdmsrq_safe(ra->reg.msr, &val); if (ra->err) return; val &= ~ra->mask; val |= ra->value; - ra->err = wrmsrl_safe(ra->reg.msr, val); + ra->err = wrmsrq_safe(ra->reg.msr, val); } static int rapl_msr_write_raw(int cpu, struct reg_action *ra) diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 2ccdca4f6960..e63481f24238 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -315,6 +315,8 @@ struct ptp_ocp_serial_port { #define OCP_BOARD_ID_LEN 13 #define OCP_SERIAL_LEN 6 #define OCP_SMA_NUM 4 +#define OCP_SIGNAL_NUM 4 +#define OCP_FREQ_NUM 4 enum { PORT_GNSS, @@ -342,8 +344,8 @@ struct ptp_ocp { struct dcf_master_reg __iomem *dcf_out; struct dcf_slave_reg __iomem *dcf_in; struct tod_reg __iomem *nmea_out; - struct frequency_reg __iomem *freq_in[4]; - struct ptp_ocp_ext_src *signal_out[4]; + struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM]; + struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM]; struct ptp_ocp_ext_src *pps; struct ptp_ocp_ext_src *ts0; struct ptp_ocp_ext_src *ts1; @@ -378,10 +380,12 @@ struct ptp_ocp { u32 utc_tai_offset; u32 ts_window_adjust; u64 fw_cap; - struct ptp_ocp_signal signal[4]; + struct ptp_ocp_signal signal[OCP_SIGNAL_NUM]; struct ptp_ocp_sma_connector sma[OCP_SMA_NUM]; const struct ocp_sma_op *sma_op; struct dpll_device *dpll; + int signals_nr; + int freq_in_nr; }; #define OCP_REQ_TIMESTAMP BIT(0) @@ -2697,6 +2701,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->eeprom_map = fb_eeprom_map; bp->fw_version = ioread32(&bp->image->version); bp->sma_op = &ocp_fb_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4; ptp_ocp_fb_set_version(bp); @@ -2862,6 +2868,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->fw_version = ioread32(&bp->reg->version); bp->fw_tag = 2; bp->sma_op = &ocp_art_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4; /* Enable MAC serial port during initialisation */ iowrite32(1, &bp->board_config->mro50_serial_activate); @@ -2888,6 +2896,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->flash_start = 0xA00000; bp->eeprom_map = fb_eeprom_map; bp->sma_op = &ocp_adva_sma_op; + bp->signals_nr = 2; + bp->freq_in_nr = 2; version = ioread32(&bp->image->version); /* if lower 16 bits are empty, this is the fw loader. */ @@ -4008,7 +4018,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr) { struct signal_reg __iomem *reg = bp->signal_out[nr]->mem; struct ptp_ocp_signal *signal = &bp->signal[nr]; - char label[8]; + char label[16]; bool on; u32 val; @@ -4031,7 +4041,7 @@ static void _frequency_summary_show(struct seq_file *s, int nr, struct frequency_reg __iomem *reg) { - char label[8]; + char label[16]; bool on; u32 val; @@ -4175,11 +4185,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) } if (bp->fw_cap & OCP_CAP_SIGNAL) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->signals_nr; i++) _signal_summary_show(s, bp, i); if (bp->fw_cap & OCP_CAP_FREQ) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->freq_in_nr; i++) _frequency_summary_show(s, i, bp->freq_in[i]); if (bp->irig_out) { diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index d096b58cd0ae..2b6279d32774 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -17,8 +17,8 @@ #include <linux/bitops.h> #include <linux/ras.h> -#include <asm/amd_nb.h> -#include <asm/amd_node.h> +#include <asm/amd/nb.h> +#include <asm/amd/node.h> #include "reg_fields.h" diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 775b056d795a..2c7e519a2254 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -456,7 +456,8 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss, if (wcnss->num_pds) { info += wcnss->num_pds; /* Handle single power domain case */ - num_vregs += num_pd_vregs - wcnss->num_pds; + if (wcnss->num_pds < num_pd_vregs) + num_vregs += num_pd_vregs - wcnss->num_pds; } else { num_vregs += num_pd_vregs; } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 7248e547fefb..cdc7b2f16b88 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -314,7 +314,7 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info) if (*seg_info == NULL) return -ENOMEM; - strcpy((*seg_info)->segment_name, name); + strscpy((*seg_info)->segment_name, name); /* load the segment */ rc = segment_load(name, SEGMENT_SHARED, @@ -612,7 +612,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char rc = -ENOMEM; goto out; } - strcpy(dev_info->segment_name, local_buf); + strscpy(dev_info->segment_name, local_buf); dev_info->segment_type = seg_info->segment_type; INIT_LIST_HEAD(&dev_info->seg_list); } diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 34f3820d7f74..8402a0042c0d 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -102,6 +102,7 @@ struct tty3270 { /* Input stuff. */ char *prompt; /* Output string for input area. */ + size_t prompt_sz; /* Size of output string. */ char *input; /* Input string for read request. */ struct raw3270_request *read; /* Single read request. */ struct raw3270_request *kreset; /* Single keyboard reset request. */ @@ -206,7 +207,7 @@ static int tty3270_input_size(int cols) static void tty3270_update_prompt(struct tty3270 *tp, char *input) { - strcpy(tp->prompt, input); + strscpy(tp->prompt, input, tp->prompt_sz); tp->update_flags |= TTY_UPDATE_INPUT; tty3270_set_timer(tp, 1); } @@ -971,6 +972,7 @@ static void tty3270_resize(struct raw3270_view *view, char *old_input, *new_input; struct tty_struct *tty; struct winsize ws; + size_t prompt_sz; int new_allocated, old_allocated = tp->allocated_lines; if (old_model == new_model && @@ -982,10 +984,11 @@ static void tty3270_resize(struct raw3270_view *view, return; } - new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(new_cols); + new_input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!new_input) return; - new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL); + new_prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!new_prompt) goto out_input; screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated); @@ -1010,6 +1013,7 @@ static void tty3270_resize(struct raw3270_view *view, old_rcl_lines = tp->rcl_lines; tp->input = new_input; tp->prompt = new_prompt; + tp->prompt_sz = prompt_sz; tp->rcl_lines = new_rcl_lines; tp->rcl_read_index = 0; tp->rcl_write_index = 0; @@ -1096,6 +1100,7 @@ static int tty3270_create_view(int index, struct tty3270 **newtp) { struct tty3270 *tp; + size_t prompt_sz; int rc; if (tty3270_max_index < index + 1) @@ -1125,17 +1130,19 @@ tty3270_create_view(int index, struct tty3270 **newtp) goto out_free_screen; } - tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(tp->view.cols); + tp->input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!tp->input) { rc = -ENOMEM; goto out_free_converted_line; } - tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL); + tp->prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!tp->prompt) { rc = -ENOMEM; goto out_free_input; } + tp->prompt_sz = prompt_sz; tp->rcl_lines = tty3270_alloc_recall(tp->view.cols); if (!tp->rcl_lines) { diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c index 711f6982438e..f41b39c9d267 100644 --- a/drivers/s390/char/diag_ftp.c +++ b/drivers/s390/char/diag_ftp.c @@ -159,7 +159,7 @@ ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize) goto out; } - len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident)); + len = strscpy(ldfpl->fident, ftp->fname); if (len < 0) { len = -EINVAL; goto out_free; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 1564cd7e3f59..288734cd8f4b 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -41,6 +41,7 @@ #include <linux/module.h> #include <asm/uv.h> #include <asm/chsc.h> +#include <linux/mempool.h> #include "ap_bus.h" #include "ap_debug.h" @@ -103,6 +104,27 @@ static struct ap_config_info *const ap_qci_info_old = &qci[1]; debug_info_t *ap_dbf_info; /* + * There is a need for a do-not-allocate-memory path through the AP bus + * layer. The pkey layer may be triggered via the in-kernel interface from + * a protected key crypto algorithm (namely PAES) to convert a secure key + * into a protected key. This happens in a workqueue context, so sleeping + * is allowed but memory allocations causing IO operations are not permitted. + * To accomplish this, an AP message memory pool with pre-allocated space + * is established. When ap_init_apmsg() with use_mempool set to true is + * called, instead of kmalloc() the ap message buffer is allocated from + * the ap_msg_pool. This pool only holds a limited amount of buffers: + * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and + * exactly one of these items (if available) is returned if ap_init_apmsg() + * with the use_mempool arg set to true is called. When this pool is exhausted + * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without + * any attempt to allocate memory and the caller has to deal with that. + */ +static mempool_t *ap_msg_pool; +static unsigned int ap_msg_pool_min_items = 8; +module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440); +MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items"); + +/* * AP bus rescan related things. */ static bool ap_scan_bus(void); @@ -547,6 +569,48 @@ static void ap_poll_thread_stop(void) #define is_card_dev(x) ((x)->parent == ap_root_device) #define is_queue_dev(x) ((x)->parent != ap_root_device) +/* + * ap_init_apmsg() - Initialize ap_message. + */ +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags) +{ + unsigned int maxmsgsize; + + memset(ap_msg, 0, sizeof(*ap_msg)); + ap_msg->flags = flags; + + if (flags & AP_MSG_FLAG_MEMPOOL) { + ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; + return 0; + } + + maxmsgsize = atomic_read(&ap_max_msg_size); + ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = maxmsgsize; + + return 0; +} +EXPORT_SYMBOL(ap_init_apmsg); + +/* + * ap_release_apmsg() - Release ap_message. + */ +void ap_release_apmsg(struct ap_message *ap_msg) +{ + if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) { + memzero_explicit(ap_msg->msg, ap_msg->bufsize); + mempool_free(ap_msg->msg, ap_msg_pool); + } else { + kfree_sensitive(ap_msg->msg); + } +} +EXPORT_SYMBOL(ap_release_apmsg); + /** * ap_bus_match() * @dev: Pointer to device @@ -2431,6 +2495,14 @@ static int __init ap_module_init(void) /* init ap_queue hashtable */ hash_init(ap_queues); + /* create ap msg buffer memory pool */ + ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items, + AP_DEFAULT_MAX_MSG_SIZE); + if (!ap_msg_pool) { + rc = -ENOMEM; + goto out; + } + /* set up the AP permissions (ioctls, ap and aq masks) */ ap_perms_init(); @@ -2477,6 +2549,7 @@ out_device: out_bus: bus_unregister(&ap_bus_type); out: + mempool_destroy(ap_msg_pool); ap_debug_exit(); return rc; } @@ -2487,6 +2560,7 @@ static void __exit ap_module_exit(void) ap_irq_exit(); root_device_unregister(ap_root_device); bus_unregister(&ap_bus_type); + mempool_destroy(ap_msg_pool); ap_debug_exit(); } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index f4622ee4d894..88b625ba1978 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -214,6 +214,11 @@ struct ap_queue { typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue); +struct ap_response_type { + struct completion work; + int type; +}; + struct ap_message { struct list_head list; /* Request queueing. */ unsigned long psmid; /* Message id. */ @@ -222,7 +227,7 @@ struct ap_message { size_t bufsize; /* allocated msg buffer size */ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ int rc; /* Return code for this message */ - void *private; /* ap driver private pointer. */ + struct ap_response_type response; /* receive is called from tasklet context */ void (*receive)(struct ap_queue *, struct ap_message *, struct ap_message *); @@ -231,27 +236,10 @@ struct ap_message { #define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */ #define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */ #define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */ +#define AP_MSG_FLAG_MEMPOOL 0x0008 /* ap msg buffer allocated via mempool */ -/** - * ap_init_message() - Initialize ap_message. - * Initialize a message before using. Otherwise this might result in - * unexpected behaviour. - */ -static inline void ap_init_message(struct ap_message *ap_msg) -{ - memset(ap_msg, 0, sizeof(*ap_msg)); -} - -/** - * ap_release_message() - Release ap_message. - * Releases all memory used internal within the ap_message struct - * Currently this is the message and private field. - */ -static inline void ap_release_message(struct ap_message *ap_msg) -{ - kfree_sensitive(ap_msg->msg); - kfree_sensitive(ap_msg->private); -} +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags); +void ap_release_apmsg(struct ap_message *ap_msg); enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3a39e167bdbf..cef60770f68b 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -24,7 +24,8 @@ */ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { int rc; @@ -32,14 +33,14 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, rc = pkey_handler_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); /* if this did not work, try the slowpath way */ if (rc == -ENODEV) { rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); if (rc) rc = -ENODEV; } @@ -52,16 +53,16 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, * In-Kernel function: Transform a key blob (of any type) into a protected key */ int pkey_key2protkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { int rc; rc = key2protkey(NULL, 0, key, keylen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, xflags); if (rc == -ENODEV) { pkey_handler_request_modules(); rc = key2protkey(NULL, 0, key, keylen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, xflags); } return rc; @@ -103,7 +104,7 @@ static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs) keybuflen = sizeof(kgs.seckey.seckey); rc = pkey_handler_gen_key(&apqn, 1, kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, - kgs.seckey.seckey, &keybuflen, NULL); + kgs.seckey.seckey, &keybuflen, NULL, 0); pr_debug("gen_key()=%d\n", rc); if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) rc = -EFAULT; @@ -129,7 +130,7 @@ static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs) kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, kcs.clrkey.clrkey, pkey_keytype_aes_to_size(kcs.keytype), - kcs.seckey.seckey, &keybuflen, NULL); + kcs.seckey.seckey, &keybuflen, NULL, 0); pr_debug("clr_to_key()=%d\n", rc); if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) rc = -EFAULT; @@ -154,7 +155,8 @@ static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp) ksp.seckey.seckey, sizeof(ksp.seckey.seckey), ksp.protkey.protkey, - &ksp.protkey.len, &ksp.protkey.type); + &ksp.protkey.len, &ksp.protkey.type, + 0); pr_debug("key_to_protkey()=%d\n", rc); if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) rc = -EFAULT; @@ -198,7 +200,7 @@ static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp) rc = key2protkey(NULL, 0, tmpbuf, sizeof(*t) + keylen, kcp.protkey.protkey, - &kcp.protkey.len, &kcp.protkey.type); + &kcp.protkey.len, &kcp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree_sensitive(tmpbuf); @@ -228,12 +230,12 @@ static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc) rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, sizeof(kfc.seckey.seckey), PKEY_FLAGS_MATCH_CUR_MKVP, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); if (rc == -ENODEV) rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, sizeof(kfc.seckey.seckey), PKEY_FLAGS_MATCH_ALT_MKVP, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); pr_debug("apqns_for_key()=%d\n", rc); if (rc) { kfree(apqns); @@ -262,7 +264,7 @@ static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp) sizeof(ksp.seckey.seckey), ksp.protkey.protkey, &ksp.protkey.len, - &ksp.protkey.type); + &ksp.protkey.type, 0); pr_debug("key_to_protkey()=%d\n", rc); if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) rc = -EFAULT; @@ -285,7 +287,7 @@ static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk) rc = pkey_handler_verify_key(kvk.seckey.seckey, sizeof(kvk.seckey.seckey), &kvk.cardnr, &kvk.domain, - &keytype, &keybitsize, &flags); + &keytype, &keybitsize, &flags, 0); pr_debug("verify_key()=%d\n", rc); if (!rc && keytype != PKEY_TYPE_CCA_DATA) rc = -EINVAL; @@ -312,7 +314,7 @@ static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp) rc = pkey_handler_gen_key(NULL, 0, kgp.keytype, PKEY_TYPE_PROTKEY, 0, 0, kgp.protkey.protkey, &kgp.protkey.len, - &kgp.protkey.type); + &kgp.protkey.type, 0); pr_debug("gen_key()=%d\n", rc); if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) rc = -EFAULT; @@ -354,7 +356,7 @@ static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp) memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len); rc = pkey_handler_verify_key(tmpbuf, sizeof(*t), - NULL, NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL, 0); pr_debug("verify_key()=%d\n", rc); kfree_sensitive(tmpbuf); @@ -377,7 +379,7 @@ static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp) ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = key2protkey(NULL, 0, kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); + &ktp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree_sensitive(kkey); if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) @@ -414,7 +416,7 @@ static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs) } rc = pkey_handler_gen_key(apqns, kgs.apqn_entries, u, kgs.type, kgs.size, kgs.keygenflags, - kkey, &klen, NULL); + kkey, &klen, NULL, 0); pr_debug("gen_key()=%d\n", rc); kfree(apqns); if (rc) { @@ -471,7 +473,7 @@ static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs) rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries, u, kcs.type, kcs.size, kcs.keygenflags, kcs.clrkey.clrkey, kcs.size / 8, - kkey, &klen, NULL); + kkey, &klen, NULL, 0); pr_debug("clr_to_key()=%d\n", rc); kfree(apqns); if (rc) { @@ -514,7 +516,7 @@ static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk) rc = pkey_handler_verify_key(kkey, kvk.keylen, &kvk.cardnr, &kvk.domain, - &kvk.type, &kvk.size, &kvk.flags); + &kvk.type, &kvk.size, &kvk.flags, 0); pr_debug("verify_key()=%d\n", rc); kfree_sensitive(kkey); @@ -544,7 +546,7 @@ static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp) ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); + &ktp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree(apqns); kfree_sensitive(kkey); @@ -579,7 +581,7 @@ static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak) return PTR_ERR(kkey); } rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); pr_debug("apqns_for_key()=%d\n", rc); kfree_sensitive(kkey); if (rc && rc != -ENOSPC) { @@ -626,7 +628,7 @@ static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat) } rc = pkey_handler_apqns_for_keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, - kat.flags, apqns, &nr_apqns); + kat.flags, apqns, &nr_apqns, 0); pr_debug("apqns_for_keytype()=%d\n", rc); if (rc && rc != -ENOSPC) { kfree(apqns); @@ -678,7 +680,7 @@ static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp) return -ENOMEM; } rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, - protkey, &protkeylen, &ktp.pkeytype); + protkey, &protkeylen, &ktp.pkeytype, 0); pr_debug("key2protkey()=%d\n", rc); kfree(apqns); kfree_sensitive(kkey); diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c index 64a376501d26..9e6f319acc63 100644 --- a/drivers/s390/crypto/pkey_base.c +++ b/drivers/s390/crypto/pkey_base.c @@ -150,7 +150,8 @@ EXPORT_SYMBOL(pkey_handler_put); int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -159,7 +160,7 @@ int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->key_to_protkey) { rc = h->key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); } pkey_handler_put(h); @@ -177,7 +178,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 xflags) { const struct pkey_handler *h, *htmp[10]; int i, n = 0, rc = -ENODEV; @@ -199,7 +200,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, rc = h->slowpath_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); module_put(h->module); } @@ -210,7 +211,7 @@ EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey); int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -219,7 +220,7 @@ int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->gen_key) { rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, xflags); } pkey_handler_put(h); @@ -231,7 +232,8 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -240,7 +242,7 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->clr_to_key) { rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype, keybitsize, flags, clrkey, clrkeylen, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, xflags); } pkey_handler_put(h); @@ -250,7 +252,8 @@ EXPORT_SYMBOL(pkey_handler_clr_to_key); int pkey_handler_verify_key(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -258,7 +261,7 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen, h = pkey_handler_get_keybased(key, keylen); if (h && h->verify_key) { rc = h->verify_key(key, keylen, card, dom, - keytype, keybitsize, flags); + keytype, keybitsize, flags, xflags); } pkey_handler_put(h); @@ -267,14 +270,16 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen, EXPORT_SYMBOL(pkey_handler_verify_key); int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; h = pkey_handler_get_keybased(key, keylen); if (h && h->apqns_for_key) - rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns); + rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns, + xflags); pkey_handler_put(h); return rc; @@ -283,7 +288,8 @@ EXPORT_SYMBOL(pkey_handler_apqns_for_key); int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -292,7 +298,7 @@ int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, if (h && h->apqns_for_keytype) { rc = h->apqns_for_keytype(keysubtype, cur_mkvp, alt_mkvp, flags, - apqns, nr_apqns); + apqns, nr_apqns, xflags); } pkey_handler_put(h); diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h index 7347647dfaa7..9cdb3e74477f 100644 --- a/drivers/s390/crypto/pkey_base.h +++ b/drivers/s390/crypto/pkey_base.h @@ -159,29 +159,33 @@ struct pkey_handler { bool (*is_supported_keytype)(enum pkey_key_type); int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int (*verify_key)(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags); + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); int (*apqns_for_keytype)(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); /* used internal by pkey base */ struct list_head list; }; @@ -199,29 +203,34 @@ void pkey_handler_put(const struct pkey_handler *handler); int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags); int pkey_handler_verify_key(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags); + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); /* * Unconditional try to load all handler modules diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c index cda22db31f6c..6c7897a93f27 100644 --- a/drivers/s390/crypto/pkey_cca.c +++ b/drivers/s390/crypto/pkey_cca.c @@ -70,12 +70,15 @@ static bool is_cca_keytype(enum pkey_key_type key_type) } static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (!flags) flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP; @@ -107,9 +110,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, /* unknown CCA internal token type */ return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -126,9 +129,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, /* unknown CCA internal 2 token type */ return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -147,18 +150,21 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_apqns4type(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 pflags) { - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + zcrypt_wait_api_operational(); if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { @@ -171,9 +177,9 @@ static int cca_apqns4type(enum pkey_key_type ktype, old_mkvp = *((u64 *)alt_mkvp); if (ktype == PKEY_TYPE_CCA_CIPHER) minhwtype = ZCRYPT_CEX6; - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -184,9 +190,9 @@ static int cca_apqns4type(enum pkey_key_type ktype, cur_mkvp = *((u64 *)cur_mkvp); if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) old_mkvp = *((u64 *)alt_mkvp); - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -205,19 +211,22 @@ static int cca_apqns4type(enum pkey_key_type ktype, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; int i, rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -253,14 +262,10 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; - rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + rc = cca_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { @@ -268,16 +273,16 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, hdr->version == TOKVER_CCA_AES) { rc = cca_sec2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_CCA_INTERNAL && hdr->version == TOKVER_CCA_VLSC) { rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else { rc = -EINVAL; break; @@ -285,7 +290,6 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -302,10 +306,13 @@ out: static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, keybitsize */ switch (keytype) { @@ -340,32 +347,27 @@ static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = cca_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { if (subtype == PKEY_TYPE_CCA_CIPHER) { rc = cca_gencipherkey(apqns[i].card, apqns[i].domain, keybitsize, flags, - keybuf, keybuflen); + keybuf, keybuflen, xflags); } else { /* PKEY_TYPE_CCA_DATA */ rc = cca_genseckey(apqns[i].card, apqns[i].domain, - keybitsize, keybuf); + keybitsize, keybuf, xflags); *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); } } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -383,10 +385,13 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, clrkeylen, keybitsize */ switch (keytype) { @@ -426,44 +431,42 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = cca_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { if (subtype == PKEY_TYPE_CCA_CIPHER) { rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain, keybitsize, flags, clrkey, - keybuf, keybuflen); + keybuf, keybuflen, xflags); } else { /* PKEY_TYPE_CCA_DATA */ rc = cca_clr2seckey(apqns[i].card, apqns[i].domain, - keybitsize, clrkey, keybuf); + keybitsize, clrkey, keybuf, xflags); *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); } } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_verifykey(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 nr_apqns, *apqns = NULL; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -478,15 +481,16 @@ static int cca_verifykey(const u8 *key, u32 keylen, goto out; *keytype = PKEY_TYPE_CCA_DATA; *keybitsize = t->bitsize; - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX3C, AES_MK_SET, - t->mkvp, 0, 1); + t->mkvp, 0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX3C, AES_MK_SET, - 0, t->mkvp, 1); + 0, t->mkvp, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -511,15 +515,16 @@ static int cca_verifykey(const u8 *key, u32 keylen, *keybitsize = PKEY_SIZE_AES_192; else if (!t->plfver && t->wpllen == 640) *keybitsize = PKEY_SIZE_AES_256; - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX6, AES_MK_SET, - t->mkvp0, 0, 1); + t->mkvp0, 0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX6, AES_MK_SET, - 0, t->mkvp0, 1); + 0, t->mkvp0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -535,7 +540,6 @@ static int cca_verifykey(const u8 *key, u32 keylen, } out: - kfree(apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -551,12 +555,12 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 pflags) { const struct keytoken_header *hdr = (const struct keytoken_header *)key; const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[SECKEYBLOBSIZE]; /* 64 bytes */ u32 tmplen, keysize = 0; - u8 *tmpbuf; int i, rc; if (keylen < sizeof(*hdr)) @@ -568,26 +572,20 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, if (!keysize || t->len != keysize) return -EINVAL; - /* alloc tmp key buffer */ - tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - /* try two times in case of failure */ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { tmplen = SECKEYBLOBSIZE; rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA, 8 * keysize, 0, t->clearkey, t->len, - tmpbuf, &tmplen, NULL); + tmpbuf, &tmplen, NULL, pflags); pr_debug("cca_clr2key()=%d\n", rc); if (rc) continue; rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, pflags); pr_debug("cca_key2protkey()=%d\n", rc); } - kfree(tmpbuf); pr_debug("rc=%d\n", rc); return rc; } diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c index 5b033ca3e828..6b23adc560c8 100644 --- a/drivers/s390/crypto/pkey_ep11.c +++ b/drivers/s390/crypto/pkey_ep11.c @@ -70,12 +70,15 @@ static bool is_ep11_keytype(enum pkey_key_type key_type) } static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (!flags) flags = PKEY_FLAGS_MATCH_CUR_MKVP; @@ -98,8 +101,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, minhwtype = ZCRYPT_CEX7; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); if (rc) goto out; @@ -115,8 +118,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, minhwtype = ZCRYPT_CEX7; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); if (rc) goto out; @@ -135,18 +138,20 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_apqns4type(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + zcrypt_wait_api_operational(); if (ktype == PKEY_TYPE_EP11 || @@ -158,8 +163,8 @@ static int ep11_apqns4type(enum pkey_key_type ktype, if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) wkvp = cur_mkvp; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, api, wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, api, wkvp, xflags); if (rc) goto out; @@ -178,19 +183,22 @@ static int ep11_apqns4type(enum pkey_key_type ktype, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; int i, rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -225,14 +233,10 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; - rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + rc = ep11_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { @@ -241,19 +245,19 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_ECC_WITH_HEADER && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES && is_ep11_keyblob(key)) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else { rc = -EINVAL; break; @@ -261,7 +265,6 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -278,10 +281,13 @@ out: static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, keybitsize */ switch (keytype) { @@ -316,25 +322,20 @@ static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = ep11_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { rc = ep11_genaeskey(apqns[i].card, apqns[i].domain, keybitsize, flags, - keybuf, keybuflen, subtype); + keybuf, keybuflen, subtype, xflags); } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -352,10 +353,13 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, clrkeylen, keybitsize */ switch (keytype) { @@ -395,37 +399,35 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = ep11_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain, keybitsize, flags, clrkey, - keybuf, keybuflen, subtype); + keybuf, keybuflen, subtype, xflags); } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_verifykey(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 nr_apqns, *apqns = NULL; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -443,9 +445,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen, *keybitsize = kb->head.bitlen; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); + ep11_kb_wkvp(key, keylen), xflags); if (rc) goto out; @@ -467,9 +469,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen, *keybitsize = kh->bitlen; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); + ep11_kb_wkvp(key, keylen), xflags); if (rc) goto out; @@ -484,7 +486,6 @@ static int ep11_verifykey(const u8 *key, u32 keylen, } out: - kfree(apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -500,12 +501,12 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 pflags) { const struct keytoken_header *hdr = (const struct keytoken_header *)key; const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[MAXEP11AESKEYBLOBSIZE]; /* 336 bytes */ u32 tmplen, keysize = 0; - u8 *tmpbuf; int i, rc; if (keylen < sizeof(*hdr)) @@ -517,26 +518,20 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, if (!keysize || t->len != keysize) return -EINVAL; - /* alloc tmp key buffer */ - tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - /* try two times in case of failure */ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { tmplen = MAXEP11AESKEYBLOBSIZE; rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11, 8 * keysize, 0, t->clearkey, t->len, - tmpbuf, &tmplen, NULL); + tmpbuf, &tmplen, NULL, pflags); pr_debug("ep11_clr2key()=%d\n", rc); if (rc) continue; rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, pflags); pr_debug("ep11_key2protkey()=%d\n", rc); } - kfree(tmpbuf); pr_debug("rc=%d\n", rc); return rc; } diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c index 835d59f4fbc5..7eca9f1340bd 100644 --- a/drivers/s390/crypto/pkey_pckmo.c +++ b/drivers/s390/crypto/pkey_pckmo.c @@ -406,7 +406,8 @@ out: static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, size_t _nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *keyinfo) + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) { return pckmo_key2protkey(key, keylen, protkey, protkeylen, keyinfo); @@ -415,7 +416,8 @@ static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, u32 keytype, u32 keysubtype, u32 _keybitsize, u32 _flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 _xflags __always_unused) { return pckmo_gen_protkey(keytype, keysubtype, keybuf, keybuflen, keyinfo); @@ -423,7 +425,8 @@ static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, static int pkey_pckmo_verifykey(const u8 *key, u32 keylen, u16 *_card, u16 *_dom, - u32 *_keytype, u32 *_keybitsize, u32 *_flags) + u32 *_keytype, u32 *_keybitsize, + u32 *_flags, u32 _xflags __always_unused) { return pckmo_verify_key(key, keylen); } diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c index 57edc97bafd2..cea772973649 100644 --- a/drivers/s390/crypto/pkey_sysfs.c +++ b/drivers/s390/crypto/pkey_sysfs.c @@ -29,13 +29,13 @@ static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype, rc = pkey_handler_gen_key(NULL, 0, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, 0); if (rc == -ENODEV) { pkey_handler_request_modules(); rc = pkey_handler_gen_key(NULL, 0, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, 0); } return rc; diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c index 805817b14354..e5c6e01acaf3 100644 --- a/drivers/s390/crypto/pkey_uv.c +++ b/drivers/s390/crypto/pkey_uv.c @@ -21,6 +21,12 @@ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 protected key UV handler"); /* + * One pre-allocated uv_secret_list for use with uv_find_secret() + */ +static struct uv_secret_list *uv_list; +static DEFINE_MUTEX(uv_list_mutex); + +/* * UV secret token struct and defines. */ @@ -85,13 +91,26 @@ static bool is_uv_keytype(enum pkey_key_type keytype) } } +static int get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list_item_hdr *secret) +{ + int rc; + + mutex_lock(&uv_list_mutex); + memset(uv_list, 0, sizeof(*uv_list)); + rc = uv_find_secret(secret_id, uv_list, secret); + mutex_unlock(&uv_list_mutex); + + return rc; +} + static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN], u16 *secret_type, u8 *buf, u32 *buflen) { struct uv_secret_list_item_hdr secret_meta_data; int rc; - rc = uv_get_secret_metadata(secret_id, &secret_meta_data); + rc = get_secret_metadata(secret_id, &secret_meta_data); if (rc) return rc; @@ -172,7 +191,8 @@ static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype) static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused, size_t _nr_apqns __always_unused, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *keyinfo) + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) { struct uvsecrettoken *t = (struct uvsecrettoken *)key; u32 pkeysize, pkeytype; @@ -214,7 +234,8 @@ out: static int uv_verifykey(const u8 *key, u32 keylen, u16 *_card __always_unused, u16 *_dom __always_unused, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags __always_unused) { struct uvsecrettoken *t = (struct uvsecrettoken *)key; struct uv_secret_list_item_hdr secret_meta_data; @@ -225,7 +246,7 @@ static int uv_verifykey(const u8 *key, u32 keylen, if (rc) goto out; - rc = uv_get_secret_metadata(t->secret_id, &secret_meta_data); + rc = get_secret_metadata(t->secret_id, &secret_meta_data); if (rc) goto out; @@ -263,13 +284,23 @@ static struct pkey_handler uv_handler = { */ static int __init pkey_uv_init(void) { + int rc; + if (!is_prot_virt_guest()) return -ENODEV; if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list)) return -ENODEV; - return pkey_handler_register(&uv_handler); + uv_list = kmalloc(sizeof(*uv_list), GFP_KERNEL); + if (!uv_list) + return -ENOMEM; + + rc = pkey_handler_register(&uv_handler); + if (rc) + kfree(uv_list); + + return rc; } /* @@ -278,6 +309,9 @@ static int __init pkey_uv_init(void) static void __exit pkey_uv_exit(void) { pkey_handler_unregister(&uv_handler); + mutex_lock(&uv_list_mutex); + kvfree(uv_list); + mutex_unlock(&uv_list_mutex); } module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 5020696f1379..89baa87a13fc 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -50,6 +50,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +unsigned int zcrypt_mempool_threshold = 5; +module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440); +MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)"); + /* * zcrypt tracepoint functions */ @@ -642,16 +646,17 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (mex->outputdatalength < mex->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -728,7 +733,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -746,16 +751,17 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(crt, TP_ICARSACRT); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (crt->outputdatalength < crt->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -832,7 +838,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -842,23 +848,28 @@ out: return rc; } -static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ica_xcRB *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; unsigned short *domain, tdom; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); xcrb->status = 0; - ap_init_message(&ap_msg); + + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) @@ -962,7 +973,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -972,7 +983,7 @@ out: return rc; } -long zcrypt_send_cprb(struct ica_xcRB *xcrb) +long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags) { struct zcrypt_track tr; int rc; @@ -980,13 +991,13 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb) memset(&tr, 0, sizeof(tr)); do { - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1024,50 +1035,50 @@ static bool is_desired_ep11_queue(unsigned int dev_qid, return false; } -static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ep11_urb *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; - struct ep11_target_dev *targets; + struct ep11_target_dev *targets = NULL; unsigned short target_num; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code, domain; + unsigned int func_code = 0, domain; struct ap_message ap_msg; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; target_num = (unsigned short)xcrb->targets_num; /* empty list indicates autoselect (all available targets) */ - targets = NULL; + rc = -ENOMEM; if (target_num != 0) { - struct ep11_target_dev __user *uptr; - - targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); - if (!targets) { - func_code = 0; - rc = -ENOMEM; - goto out; - } - - uptr = (struct ep11_target_dev __force __user *)xcrb->targets; - if (z_copy_from_user(userspace, targets, uptr, - target_num * sizeof(*targets))) { - func_code = 0; - rc = -EFAULT; - goto out_free; + if (userspace) { + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); + if (!targets) + goto out; + if (copy_from_user(targets, xcrb->targets, + target_num * sizeof(*targets))) { + rc = -EFAULT; + goto out; + } + } else { + targets = (struct ep11_target_dev __force __kernel *)xcrb->targets; } } rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) - goto out_free; + goto out; print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, ap_msg.msg, ap_msg.len, false); @@ -1075,11 +1086,11 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { if (!test_bit_inv(domain, perms->adm)) { rc = -ENODEV; - goto out_free; + goto out; } } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { rc = -EOPNOTSUPP; - goto out_free; + goto out; } } @@ -1147,7 +1158,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, pr_debug("no match for address ff.ffff => ENODEV\n"); } rc = -ENODEV; - goto out_free; + goto out; } qid = pref_zq->queue->qid; @@ -1161,10 +1172,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); -out_free: - kfree(targets); out: - ap_release_message(&ap_msg); + if (userspace) + kfree(targets); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -1174,7 +1185,7 @@ out: return rc; } -long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags) { struct zcrypt_track tr; int rc; @@ -1182,13 +1193,13 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) memset(&tr, 0, sizeof(tr)); do { - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1204,7 +1215,7 @@ static long zcrypt_rng(char *buffer) struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; struct ap_message ap_msg; unsigned int domain; int qid = 0, rc = -ENODEV; @@ -1212,7 +1223,9 @@ static long zcrypt_rng(char *buffer) trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); if (rc) goto out; @@ -1258,7 +1271,7 @@ static long zcrypt_rng(char *buffer) spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); trace_s390_zcrypt_rep(buffer, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; @@ -1291,19 +1304,25 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) spin_unlock(&zcrypt_list_lock); } -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue) { struct zcrypt_card *zc; struct zcrypt_queue *zq; struct zcrypt_device_status_ext *stat; int card, queue; + maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT); + maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT); + spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { card = AP_QID_CARD(zq->queue->qid); queue = AP_QID_QUEUE(zq->queue->qid); - stat = &devstatus[card * AP_DOMAINS + queue]; + if (card >= maxcard || queue >= maxqueue) + continue; + stat = &devstatus[card * maxqueue + queue]; stat->hwtype = zc->card->ap_dev.device_type; stat->functions = zc->card->hwinfo.fac >> 26; stat->qid = zq->queue->qid; @@ -1523,6 +1542,7 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ica_xcRB xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ica_xcRB __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1530,13 +1550,13 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1553,6 +1573,7 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ep11_urb xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ep11_urb __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1560,13 +1581,13 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1607,7 +1628,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, GFP_KERNEL); if (!device_status) return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + zcrypt_device_status_mask_ext(device_status, + MAX_ZDEV_CARDIDS_EXT, + MAX_ZDEV_DOMAINS_EXT); if (copy_to_user((char __user *)arg, device_status, total_size)) rc = -EFAULT; @@ -1827,6 +1850,7 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct compat_ica_xcrb xcrb32; struct zcrypt_track tr; struct ica_xcRB xcrb64; @@ -1856,13 +1880,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp, xcrb64.priority_window = xcrb32.priority_window; xcrb64.status = xcrb32.status; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -2132,13 +2156,27 @@ int __init zcrypt_api_init(void) { int rc; + /* make sure the mempool threshold is >= 1 */ + if (zcrypt_mempool_threshold < 1) { + rc = -EINVAL; + goto out; + } + rc = zcrypt_debug_init(); if (rc) goto out; rc = zcdn_init(); if (rc) - goto out; + goto out_zcdn_init_failed; + + rc = zcrypt_ccamisc_init(); + if (rc) + goto out_ccamisc_init_failed; + + rc = zcrypt_ep11misc_init(); + if (rc) + goto out_ep11misc_init_failed; /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); @@ -2151,7 +2189,12 @@ int __init zcrypt_api_init(void) return 0; out_misc_register_failed: + zcrypt_ep11misc_exit(); +out_ep11misc_init_failed: + zcrypt_ccamisc_exit(); +out_ccamisc_init_failed: zcdn_exit(); +out_zcdn_init_failed: zcrypt_debug_exit(); out: return rc; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 4ed481df57ca..6ef8850a42df 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -76,6 +76,13 @@ struct zcrypt_track { #define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000 #define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000 +/* + * xflags - to be used with zcrypt_send_cprb() and + * zcrypt_send_ep11_cprb() for the xflags parameter. + */ +#define ZCRYPT_XFLAG_USERSPACE 0x0001 /* data ptrs address userspace */ +#define ZCRYPT_XFLAG_NOMEMALLOC 0x0002 /* do not allocate memory via kmalloc */ + struct zcrypt_ops { long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *, struct ap_message *); @@ -132,6 +139,8 @@ extern atomic_t zcrypt_rescan_req; extern spinlock_t zcrypt_list_lock; extern struct list_head zcrypt_card_list; +extern unsigned int zcrypt_mempool_threshold; + #define for_each_zcrypt_card(_zc) \ list_for_each_entry(_zc, &zcrypt_card_list, list) @@ -161,9 +170,10 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); -long zcrypt_send_cprb(struct ica_xcRB *xcRB); -long zcrypt_send_ep11_cprb(struct ep11_urb *urb); -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); +long zcrypt_send_cprb(struct ica_xcRB *xcRB, u32 xflags); +long zcrypt_send_ep11_cprb(struct ep11_urb *urb, u32 xflags); +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue); int zcrypt_device_status_ext(int card, int queue, struct zcrypt_device_status_ext *devstatus); diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 43a27cb3db84..b975a3728c23 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/random.h> @@ -29,16 +30,31 @@ /* Size of vardata block used for some of the cca requests/replies */ #define VARDATASIZE 4096 -struct cca_info_list_entry { - struct list_head list; - u16 cardnr; - u16 domain; - struct cca_info info; -}; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used + * when alloc_and_prep_cprbmem() is called with the xflag + * ZCRYPT_XFLAG_NOMEMALLOC. The cprb memory needs to hold + * space for request AND reply! + */ +#define CPRB_MEMPOOL_ITEM_SIZE (16 * 1024) +static mempool_t *cprb_mempool; -/* a list with cca_info_list_entry entries */ -static LIST_HEAD(cca_info_list); -static DEFINE_SPINLOCK(cca_info_list_lock); +/* + * This is a pre-allocated memory for the device status array + * used within the findcard() functions. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); /* * Simple check if the token is a valid CCA secure AES data key @@ -219,19 +235,27 @@ EXPORT_SYMBOL(cca_check_sececckeytoken); static int alloc_and_prep_cprbmem(size_t paramblen, u8 **p_cprb_mem, struct CPRBX **p_req_cprb, - struct CPRBX **p_rep_cprb) + struct CPRBX **p_rep_cprb, + u32 xflags) { - u8 *cprbmem; + u8 *cprbmem = NULL; size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; + size_t len = 2 * cprbplusparamblen; struct CPRBX *preqcblk, *prepcblk; /* * allocate consecutive memory for request CPRB, request param * block, reply CPRB and reply param block */ - cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprbmem = mempool_alloc_preallocated(cprb_mempool); + } else { + cprbmem = kmalloc(len, GFP_KERNEL); + } if (!cprbmem) return -ENOMEM; + memset(cprbmem, 0, len); preqcblk = (struct CPRBX *)cprbmem; prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); @@ -261,11 +285,15 @@ static int alloc_and_prep_cprbmem(size_t paramblen, * with zeros before freeing (useful if there was some * clear key material in there). */ -static void free_cprbmem(void *mem, size_t paramblen, int scrub) +static void free_cprbmem(void *mem, size_t paramblen, bool scrub, u32 xflags) { - if (scrub) + if (mem && scrub) memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); - kfree(mem); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); } /* @@ -290,7 +318,7 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, * Generate (random) CCA AES DATA secure key. */ int cca_genseckey(u16 cardnr, u16 domain, - u32 keybitsize, u8 *seckey) + u32 keybitsize, u8 *seckey, u32 xflags) { int i, rc, keysize; int seckeysize; @@ -332,7 +360,8 @@ int cca_genseckey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -379,7 +408,7 @@ int cca_genseckey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", __func__, (int)cardnr, (int)domain, rc); @@ -424,7 +453,7 @@ int cca_genseckey(u16 cardnr, u16 domain, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_genseckey); @@ -433,7 +462,7 @@ EXPORT_SYMBOL(cca_genseckey); * Generate an CCA AES DATA secure key with given key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey) + const u8 *clrkey, u8 *seckey, u32 xflags) { int rc, keysize, seckeysize; u8 *mem, *ptr; @@ -473,7 +502,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -517,7 +547,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -563,7 +593,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 1); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_clr2seckey); @@ -573,7 +603,7 @@ EXPORT_SYMBOL(cca_clr2seckey); */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -619,7 +649,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -644,7 +675,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -712,7 +743,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, *protkeylen = prepparm->lv3.ckb.len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_sec2protkey); @@ -737,7 +768,7 @@ static const u8 aes_cipher_key_skeleton[] = { * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize) + u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; u8 *mem, *ptr; @@ -813,7 +844,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, struct cipherkeytoken *t; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -872,7 +904,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -923,7 +955,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, *keybufsize = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_gencipherkey); @@ -938,7 +970,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, const u8 *clr_key_value, int clr_key_bit_size, u8 *key_token, - int *key_token_size) + int *key_token_size, + u32 xflags) { int rc, n; u8 *mem, *ptr; @@ -989,7 +1022,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1038,7 +1072,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1077,7 +1111,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, *key_token_size = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } @@ -1085,23 +1119,31 @@ out: * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, u32 *keybufsize) + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; - u8 *token; + void *mem; int tokensize; - u8 exorbuf[32]; + u8 *token, exorbuf[32]; struct cipherkeytoken *t; /* fill exorbuf with random data */ get_random_bytes(exorbuf, sizeof(exorbuf)); - /* allocate space for the key token to build */ - token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL); - if (!token) + /* + * Allocate space for the key token to build. + * Also we only need up to MAXCCAVLSCTOKENSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; /* prepare the token with the key skeleton */ + token = (u8 *)mem; tokensize = SIZEOF_SKELETON; memcpy(token, aes_cipher_key_skeleton, tokensize); @@ -1120,28 +1162,28 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, * 4/4 COMPLETE the secure cipher key import */ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART", - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - clrkey, keybitsize, token, &tokensize); + clrkey, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL, - NULL, keybitsize, token, &tokensize); + NULL, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); @@ -1158,7 +1200,7 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, *keybufsize = tokensize; out: - kfree(token); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(cca_clr2cipherkey); @@ -1167,7 +1209,8 @@ EXPORT_SYMBOL(cca_clr2cipherkey); * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { int rc; u8 *mem, *ptr; @@ -1219,7 +1262,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, int keytoklen = ((struct cipherkeytoken *)ckey)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1249,7 +1293,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1323,7 +1367,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, *protkeylen = prepparm->vud.ckb.keylen; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_cipher2protkey); @@ -1332,7 +1376,7 @@ EXPORT_SYMBOL(cca_cipher2protkey); * Derive protected key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -1382,7 +1426,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int keylen = ((struct eccprivkeytoken *)key)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1412,7 +1457,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1470,7 +1515,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, *protkeytype = PKEY_KEYTYPE_ECC; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_ecc2protkey); @@ -1481,7 +1526,8 @@ EXPORT_SYMBOL(cca_ecc2protkey); int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen) + u8 *varray, size_t *varraylen, + u32 xflags) { int rc; u16 len; @@ -1505,7 +1551,8 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(parmbsize, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1526,7 +1573,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1573,94 +1620,21 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } out: - free_cprbmem(mem, parmbsize, 0); + free_cprbmem(mem, parmbsize, false, xflags); return rc; } EXPORT_SYMBOL(cca_query_crypto_facility); -static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci) -{ - int rc = -ENOENT; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && ptr->domain == domain) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&cca_info_list_lock); - - return rc; -} - -static void cca_info_cache_update(u16 cardnr, u16 domain, - const struct cca_info *ci) -{ - int found = 0; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&cca_info_list_lock); - return; - } - ptr->cardnr = cardnr; - ptr->domain = domain; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &cca_info_list); - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void cca_info_cache_scrub(u16 cardnr, u16 domain) -{ - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void __exit mkvp_cache_free(void) -{ - struct cca_info_list_entry *ptr, *pnext; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&cca_info_list_lock); -} - /* - * Fetch cca_info values via query_crypto_facility from adapter. + * Fetch cca_info values about a CCA queue via + * query_crypto_facility from adapter. */ -static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) +int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags) { + void *mem; int rc, found = 0; size_t rlen, vlen; - u8 *rarray, *varray, *pg; + u8 *rarray, *varray; struct zcrypt_device_status_ext devstat; memset(ci, 0, sizeof(*ci)); @@ -1671,17 +1645,22 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) return rc; ci->hwtype = devstat.hwtype; - /* prep page for rule array and var array use */ - pg = (u8 *)__get_free_page(GFP_KERNEL); - if (!pg) + /* + * Prep memory for rule array and var array use. + * Use the cprb mempool for this. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; - rarray = pg; - varray = pg + PAGE_SIZE / 2; + rarray = (u8 *)mem; + varray = (u8 *)mem + PAGE_SIZE / 2; rlen = vlen = PAGE_SIZE / 2; /* QF for this card/domain */ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { memcpy(ci->serial, rarray, 8); ci->new_asym_mk_state = (char)rarray[4 * 8]; @@ -1708,7 +1687,7 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) goto out; rlen = vlen = PAGE_SIZE / 2; rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { ci->new_apka_mk_state = (char)rarray[10 * 8]; ci->cur_apka_mk_state = (char)rarray[11 * 8]; @@ -1723,177 +1702,32 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) } out: - free_page((unsigned long)pg); + mempool_free(mem, cprb_mempool); return found == 2 ? 0 : -ENOENT; } - -/* - * Fetch cca information about a CCA queue. - */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify) -{ - int rc; - - rc = cca_info_cache_fetch(card, dom, ci); - if (rc || verify) { - rc = fetch_cca_info(card, dom, ci); - if (rc == 0) - cca_info_cache_update(card, dom, ci); - } - - return rc; -} EXPORT_SYMBOL(cca_get_info); -/* - * Search for a matching crypto card based on the - * Master Key Verification Pattern given. - */ -static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, - int verify, int minhwtype) -{ - struct zcrypt_device_status_ext *device_status; - u16 card, dom; - struct cca_info ci; - int i, rc, oi = -1; - - /* mkvp must not be zero, minhwtype needs to be >= 0 */ - if (mkvp == 0 || minhwtype < 0) - return -EINVAL; - - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); - - /* walk through all crypto cards */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - if (device_status[i].online && - device_status[i].functions & 0x04) { - /* enabled CCA card, check current mkvp from cache */ - if (cca_info_cache_fetch(card, dom, &ci) == 0 && - ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) { - if (!verify) - break; - /* verify: refresh card info */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - } - } - } else { - /* Card is offline and/or not a CCA card. */ - /* del mkvp entry from cache if it exists */ - cca_info_cache_scrub(card, dom); - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT) { - /* nothing found, so this time without cache */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - if (!(device_status[i].online && - device_status[i].functions & 0x04)) - continue; - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - /* fresh fetch mkvp from adapter */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - if (ci.hwtype >= minhwtype && - ci.old_aes_mk_state == '2' && - ci.old_aes_mkvp == mkvp && - oi < 0) - oi = i; - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { - /* old mkvp matched, use this card then */ - card = AP_QID_CARD(device_status[oi].qid); - dom = AP_QID_QUEUE(device_status[oi].qid); - } - } - if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { - if (pcardnr) - *pcardnr = card; - if (pdomain) - *pdomain = dom; - rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); - } else { - rc = -ENODEV; - } - - kvfree(device_status); - return rc; -} - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key token. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) -{ - u64 mkvp; - int minhwtype = 0; - const struct keytoken_header *hdr = (struct keytoken_header *)key; - - if (hdr->type != TOKTYPE_CCA_INTERNAL) - return -EINVAL; - - switch (hdr->version) { - case TOKVER_CCA_AES: - mkvp = ((struct secaeskeytoken *)key)->mkvp; - break; - case TOKVER_CCA_VLSC: - mkvp = ((struct cipherkeytoken *)key)->mkvp0; - minhwtype = AP_DEVICE_TYPE_CEX6; - break; - default: - return -EINVAL; - } - - return findcard(mkvp, pcardnr, pdomain, verify, minhwtype); -} -EXPORT_SYMBOL(cca_findcard); - -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify) + u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, curmatch, oldmatch, rc = 0; + int i, card, dom, curmatch, oldmatch; struct cca_info ci; + u32 _nr_apqns = 0; - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1909,7 +1743,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, if (domain != 0xFFFF && dom != domain) continue; /* get cca info on this apqn */ - if (cca_get_info(card, dom, &ci, verify)) + if (cca_get_info(card, dom, &ci, xflags)) continue; /* current master key needs to be valid */ if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2') @@ -1939,27 +1773,41 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + /* release the device status memory */ + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(cca_findcard2); -void __exit zcrypt_ccamisc_exit(void) +int __init zcrypt_ccamisc_init(void) +{ + /* Pre-allocate a small memory pool for cca cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in findcard() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ccamisc_exit(void) { - mkvp_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index 26bdca702523..1ecc4e37e9ad 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -160,44 +160,47 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, /* * Generate (random) CCA AES DATA secure key. */ -int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey); +int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey, + u32 xflags); /* * Generate CCA AES DATA secure key with given clear key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey); + const u8 *clrkey, u8 *seckey, u32 xflags); /* * Derive proteced key from an CCA AES DATA secure key. */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); /* * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize); + u8 *keybuf, u32 *keybufsize, u32 xflags); /* * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); /* * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, u32 *keybufsize); + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, + u32 xflags); /* * Derive proteced key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags); /* * Query cryptographic facility from CCA adapter @@ -205,16 +208,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen); - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key. - * Works with CCA AES data and cipher keys. - * Returns < 0 on failure, 0 if CURRENT MKVP matches and - * 1 if OLD MKVP matches. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); + u8 *varray, size_t *varraylen, + u32 xflags); /* * Build a list of cca apqns meeting the following constrains: @@ -224,21 +219,16 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); * - if minhwtype > 0 only apqns with hwtype >= minhwtype * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp * - if old_mkvp != 0 only apqns where old_mkvp == mkvp - * - if verify is enabled and a cur_mkvp and/or old_mkvp - * value is given, then refetch the cca_info and make sure the current - * cur_mkvp or old_mkvp values of the apqn are used. * The mktype determines which set of master keys to use: * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify); + u32 xflags); #define AES_MK_SET 0 #define APKA_MK_SET 1 @@ -270,8 +260,9 @@ struct cca_info { /* * Fetch cca information about an CCA queue. */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify); +int cca_get_info(u16 card, u16 dom, struct cca_info *ci, u32 xflags); +int zcrypt_ccamisc_init(void); void zcrypt_ccamisc_exit(void); #endif /* _ZCRYPT_CCAMISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 64df7d2f6266..6ba7fbddd3f7 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -79,14 +79,13 @@ static ssize_t cca_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct cca_info ci; struct ap_card *ac = to_ap_card(dev); + struct cca_info ci; memset(&ci, 0, sizeof(ci)); if (ap_domain_index >= 0) - cca_get_info(ac->id, ap_domain_index, &ci, zc->online); + cca_get_info(ac->id, ap_domain_index, &ci, 0); return sysfs_emit(buf, "%s\n", ci.serial); } @@ -110,17 +109,17 @@ static ssize_t cca_mkvps_show(struct device *dev, struct device_attribute *attr, char *buf) { + static const char * const new_state[] = { "empty", "partial", "full" }; + static const char * const cao_state[] = { "invalid", "valid" }; struct zcrypt_queue *zq = dev_get_drvdata(dev); - int n = 0; struct cca_info ci; - static const char * const cao_state[] = { "invalid", "valid" }; - static const char * const new_state[] = { "empty", "partial", "full" }; + int n = 0; memset(&ci, 0, sizeof(ci)); cca_get_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &ci, zq->online); + &ci, 0); if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n", @@ -210,13 +209,12 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.API_ord_nr > 0) return sysfs_emit(buf, "%u\n", ci.API_ord_nr); @@ -231,13 +229,12 @@ static ssize_t ep11_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.FW_version > 0) return sysfs_emit(buf, "%d.%d\n", @@ -254,13 +251,12 @@ static ssize_t ep11_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.serial[0]) return sysfs_emit(buf, "%16.16s\n", ci.serial); @@ -291,14 +287,13 @@ static ssize_t ep11_card_op_modes_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - int i, n = 0; - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; + int i, n = 0; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { @@ -348,7 +343,7 @@ static ssize_t ep11_mkvps_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); if (di.cur_wk_state == '0') { n = sysfs_emit(buf, "WK CUR: %s -\n", @@ -395,7 +390,7 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index cb7e6da43602..2f50fc7b8f61 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -10,9 +10,10 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/random.h> +#include <linux/slab.h> #include <asm/zcrypt.h> #include <asm/pkey.h> #include <crypto/aes.h> @@ -30,85 +31,29 @@ static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; -/* ep11 card info cache */ -struct card_list_entry { - struct list_head list; - u16 cardnr; - struct ep11_card_info info; -}; -static LIST_HEAD(card_list); -static DEFINE_SPINLOCK(card_list_lock); - -static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci) -{ - int rc = -ENOENT; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&card_list_lock); - - return rc; -} - -static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci) -{ - int found = 0; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&card_list_lock); - return; - } - ptr->cardnr = cardnr; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &card_list); - } - spin_unlock_bh(&card_list_lock); -} - -static void card_cache_scrub(u16 cardnr) -{ - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&card_list_lock); -} - -static void __exit card_cache_free(void) -{ - struct card_list_entry *ptr, *pnext; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used when + * alloc_cprbmem() is called with the xflag ZCRYPT_XFLAG_NOMEMALLOC. + */ +#define CPRB_MEMPOOL_ITEM_SIZE (8 * 1024) +static mempool_t *cprb_mempool; - spin_lock_bh(&card_list_lock); - list_for_each_entry_safe(ptr, pnext, &card_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&card_list_lock); -} +/* + * This is a pre-allocated memory for the device status array + * used within the ep11_findcard2() function. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver, struct ep11kblob_header **kbhdr, size_t *kbhdrsize, @@ -411,14 +356,20 @@ EXPORT_SYMBOL(ep11_check_aes_key); /* * Allocate and prepare ep11 cprb plus additional payload. */ -static inline struct ep11_cprb *alloc_cprb(size_t payload_len) +static void *alloc_cprbmem(size_t payload_len, u32 xflags) { size_t len = sizeof(struct ep11_cprb) + payload_len; - struct ep11_cprb *cprb; + struct ep11_cprb *cprb = NULL; - cprb = kzalloc(len, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprb = mempool_alloc_preallocated(cprb_mempool); + } else { + cprb = kmalloc(len, GFP_KERNEL); + } if (!cprb) return NULL; + memset(cprb, 0, len); cprb->cprb_len = sizeof(struct ep11_cprb); cprb->cprb_ver_id = 0x04; @@ -430,6 +381,20 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len) } /* + * Free ep11 cprb buffer space. + */ +static void free_cprbmem(void *mem, size_t payload_len, bool scrub, u32 xflags) +{ + if (mem && scrub) + memzero_explicit(mem, sizeof(struct ep11_cprb) + payload_len); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); +} + +/* * Some helper functions related to ASN1 encoding. * Limited to length info <= 2 byte. */ @@ -489,6 +454,7 @@ static inline void prep_urb(struct ep11_urb *u, struct ep11_cprb *req, size_t req_len, struct ep11_cprb *rep, size_t rep_len) { + memset(u, 0, sizeof(*u)); u->targets = (u8 __user *)t; u->targets_num = nt; u->req = (u8 __user *)req; @@ -583,7 +549,7 @@ static int check_reply_cprb(const struct ep11_cprb *rep, const char *func) * Helper function which does an ep11 query with given query type. */ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, - size_t buflen, u8 *buf) + size_t buflen, u8 *buf, u32 xflags) { struct ep11_info_req_pl { struct pl_head head; @@ -605,11 +571,11 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api = EP11_API_V1, rc = -ENOMEM; /* request cprb and payload */ - req = alloc_cprb(sizeof(struct ep11_info_req_pl)); + req = alloc_cprbmem(sizeof(struct ep11_info_req_pl), xflags); if (!req) goto out; req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -621,22 +587,19 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, req_pl->query_subtype_len = sizeof(u32); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); + rep = alloc_cprbmem(sizeof(struct ep11_info_rep_pl) + buflen, xflags); if (!rep) goto out; rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = cardnr; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + sizeof(*req_pl), rep, sizeof(*rep) + sizeof(*rep_pl) + buflen); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -667,16 +630,15 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, 0, false, xflags); return rc; } /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags) { int rc; struct ep11_module_query_info { @@ -706,30 +668,26 @@ int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) u32 max_CP_index; } __packed * pmqi = NULL; - rc = card_cache_fetch(card, info); - if (rc || verify) { - pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL); - if (!pmqi) - return -ENOMEM; - rc = ep11_query_info(card, AUTOSEL_DOM, - 0x01 /* module info query */, - sizeof(*pmqi), (u8 *)pmqi); - if (rc) { - if (rc == -ENODEV) - card_cache_scrub(card); - goto out; - } - memset(info, 0, sizeof(*info)); - info->API_ord_nr = pmqi->API_ord_nr; - info->FW_version = - (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; - memcpy(info->serial, pmqi->serial, sizeof(info->serial)); - info->op_mode = pmqi->op_mode; - card_cache_update(card, info); - } + /* use the cprb mempool to satisfy this short term mem alloc */ + pmqi = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!pmqi) + return -ENOMEM; + rc = ep11_query_info(card, AUTOSEL_DOM, + 0x01 /* module info query */, + sizeof(*pmqi), (u8 *)pmqi, xflags); + if (rc) + goto out; + + memset(info, 0, sizeof(*info)); + info->API_ord_nr = pmqi->API_ord_nr; + info->FW_version = (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; + memcpy(info->serial, pmqi->serial, sizeof(info->serial)); + info->op_mode = pmqi->op_mode; out: - kfree(pmqi); + mempool_free(pmqi, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_get_card_info); @@ -737,7 +695,8 @@ EXPORT_SYMBOL(ep11_get_card_info); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags) { int rc; struct ep11_domain_query_info { @@ -746,36 +705,32 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) u8 new_WK_VP[32]; u32 dom_flags; u64 op_mode; - } __packed * p_dom_info; - - p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL); - if (!p_dom_info) - return -ENOMEM; + } __packed dom_query_info; rc = ep11_query_info(card, domain, 0x03 /* domain info query */, - sizeof(*p_dom_info), (u8 *)p_dom_info); + sizeof(dom_query_info), (u8 *)&dom_query_info, + xflags); if (rc) goto out; memset(info, 0, sizeof(*info)); info->cur_wk_state = '0'; info->new_wk_state = '0'; - if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) { - if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) { + if (dom_query_info.dom_flags & 0x10 /* left imprint mode */) { + if (dom_query_info.dom_flags & 0x02 /* cur wk valid */) { info->cur_wk_state = '1'; - memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); + memcpy(info->cur_wkvp, dom_query_info.cur_WK_VP, 32); } - if (p_dom_info->dom_flags & 0x04 || /* new wk present */ - p_dom_info->dom_flags & 0x08 /* new wk committed */) { + if (dom_query_info.dom_flags & 0x04 || /* new wk present */ + dom_query_info.dom_flags & 0x08 /* new wk committed */) { info->new_wk_state = - p_dom_info->dom_flags & 0x08 ? '2' : '1'; - memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); + dom_query_info.dom_flags & 0x08 ? '2' : '1'; + memcpy(info->new_wkvp, dom_query_info.new_WK_VP, 32); } } - info->op_mode = p_dom_info->op_mode; + info->op_mode = dom_query_info.op_mode; out: - kfree(p_dom_info); return rc; } EXPORT_SYMBOL(ep11_get_domain_info); @@ -788,7 +743,7 @@ EXPORT_SYMBOL(ep11_get_domain_info); static int _ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct keygen_req_pl { struct pl_head head; @@ -823,7 +778,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -851,7 +806,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, pinblob_size = EP11_PINBLOB_V1_BYTES; } req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -877,22 +832,19 @@ static int _ep11_genaeskey(u16 card, u16 domain, *p++ = pinblob_size; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct keygen_rep_pl)); + rep = alloc_cprbmem(sizeof(struct keygen_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -925,14 +877,13 @@ static int _ep11_genaeskey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, sizeof(struct keygen_rep_pl), true, xflags); return rc; } int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize, u32 keybufver) + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -953,7 +904,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, return rc; rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -973,7 +924,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, u16 mode, u32 mech, const u8 *iv, const u8 *key, size_t keysize, const u8 *inbuf, size_t inbufsize, - u8 *outbuf, size_t *outbufsize) + u8 *outbuf, size_t *outbufsize, + u32 xflags) { struct crypt_req_pl { struct pl_head head; @@ -1000,8 +952,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; - size_t req_pl_size, rep_pl_size; + struct ep11_urb urb; + size_t req_pl_size, rep_pl_size = 0; int n, api = EP11_API_V1, rc = -ENOMEM; u8 *p; @@ -1012,7 +964,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1034,22 +986,19 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* reply cprb and payload, assume out data size <= in data size + 32 */ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32); - rep = alloc_cprb(rep_pl_size); + rep = alloc_cprbmem(rep_pl_size, xflags); if (!rep) goto out; rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + rep_pl_size); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1095,9 +1044,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, *outbufsize = n; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, rep_pl_size, true, xflags); return rc; } @@ -1106,7 +1054,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, const u8 *enckey, size_t enckeysize, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct uw_req_pl { struct pl_head head; @@ -1143,7 +1091,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -1161,7 +1109,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keksize) + ASN1TAGLEN(0) + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1197,22 +1145,19 @@ static int _ep11_unwrapkey(u16 card, u16 domain, p += asn1tag_write(p, 0x04, enckey, enckeysize); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct uw_rep_pl)); + rep = alloc_cprbmem(sizeof(struct uw_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1245,9 +1190,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct uw_rep_pl), true, xflags); return rc; } @@ -1257,7 +1201,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, u8 *keybuf, u32 *keybufsize, - u8 keybufver) + u8 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -1271,7 +1215,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize, mech, iv, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -1290,7 +1234,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, static int _ep11_wrapkey(u16 card, u16 domain, const u8 *key, size_t keysize, u32 mech, const u8 *iv, - u8 *databuf, size_t *datasize) + u8 *databuf, size_t *datasize, u32 xflags) { struct wk_req_pl { struct pl_head head; @@ -1319,7 +1263,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; size_t req_pl_size; int api, rc = -ENOMEM; u8 *p; @@ -1327,7 +1271,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + 4; - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; if (!mech || mech == 0x80060001) @@ -1357,22 +1301,19 @@ static int _ep11_wrapkey(u16 card, u16 domain, *p++ = 0; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct wk_rep_pl)); + rep = alloc_cprbmem(sizeof(struct wk_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1405,18 +1346,18 @@ static int _ep11_wrapkey(u16 card, u16 domain, *datasize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct wk_rep_pl), true, xflags); return rc; } int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, u32 *keybufsize, - u32 keytype) + u32 keytype, u32 xflags) { int rc; - u8 encbuf[64], *kek = NULL; + void *mem; + u8 encbuf[64], *kek; size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { @@ -1427,18 +1368,24 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, return -EINVAL; } - /* allocate memory for the temp kek */ + /* + * Allocate space for the temp kek. + * Also we only need up to MAXEP11AESKEYBLOBSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) + return -ENOMEM; + kek = (u8 *)mem; keklen = MAXEP11AESKEYBLOBSIZE; - kek = kmalloc(keklen, GFP_ATOMIC); - if (!kek) { - rc = -ENOMEM; - goto out; - } /* Step 1: generate AES 256 bit random kek key */ rc = _ep11_genaeskey(card, domain, 256, 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ - kek, &keklen); + kek, &keklen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n", __func__, rc); @@ -1447,7 +1394,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 2: encrypt clear key value with the kek key */ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, - clrkey, clrkeylen, encbuf, &encbuflen); + clrkey, clrkeylen, encbuf, &encbuflen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n", __func__, rc); @@ -1457,22 +1404,23 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 3: import the encrypted key value as a new key */ rc = ep11_unwrapkey(card, domain, kek, keklen, encbuf, encbuflen, 0, def_iv, - keybitsize, 0, keybuf, keybufsize, keytype); + keybitsize, 0, keybuf, keybufsize, keytype, xflags); if (rc) { - ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n", + ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n", __func__, rc); goto out; } out: - kfree(kek); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_clr2keyblob); int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, u32 keybloblen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { struct ep11kblob_header *hdr; struct ep11keyblob *key; @@ -1498,15 +1446,29 @@ int ep11_kblob2protkey(u16 card, u16 dom, } /* !!! hdr is no longer a valid header !!! */ - /* alloc temp working buffer */ + /* need a temp working buffer */ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); - wkbuf = kmalloc(wkbuflen, GFP_ATOMIC); - if (!wkbuf) - return -ENOMEM; + if (wkbuflen > CPRB_MEMPOOL_ITEM_SIZE) { + /* this should never happen */ + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s wkbuflen %d > cprb mempool item size %d, rc=%d\n", + __func__, (int)wkbuflen, CPRB_MEMPOOL_ITEM_SIZE, rc); + return rc; + } + /* use the cprb mempool to satisfy this short term mem allocation */ + wkbuf = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_ATOMIC); + if (!wkbuf) { + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s allocating tmp buffer via cprb mempool failed, rc=%d\n", + __func__, rc); + return rc; + } /* ep11 secure key -> protected key + info */ rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen, - 0, def_iv, wkbuf, &wkbuflen); + 0, def_iv, wkbuf, &wkbuflen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n", __func__, rc); @@ -1573,37 +1535,32 @@ int ep11_kblob2protkey(u16 card, u16 dom, *protkeylen = wki->pkeysize; out: - kfree(wkbuf); + mempool_free(wkbuf, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_kblob2protkey); -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp) +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, rc = -ENOMEM; struct ep11_domain_info edi; struct ep11_card_info eci; + u32 _nr_apqns = 0; + int i, card, dom; - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1623,14 +1580,14 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; /* check min api version if given */ if (minapi > 0) { - if (ep11_get_card_info(card, &eci, 0)) + if (ep11_get_card_info(card, &eci, xflags)) continue; if (minapi > eci.API_ord_nr) continue; } /* check wkvp if given */ if (wkvp) { - if (ep11_get_domain_info(card, dom, &edi)) + if (ep11_get_domain_info(card, dom, &edi, xflags)) continue; if (edi.cur_wk_state != '1') continue; @@ -1638,27 +1595,40 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(ep11_findcard2); -void __exit zcrypt_ep11misc_exit(void) +int __init zcrypt_ep11misc_init(void) +{ + /* Pre-allocate a small memory pool for ep11 cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(2 * zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in ep11_findcard2() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ep11misc_exit(void) { - card_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index 9f1bdffdec68..b5e6fd861815 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -104,25 +104,26 @@ struct ep11_domain_info { /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify); +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags); /* * Generate (random) EP11 AES secure key. */ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize, u32 keybufver); + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags); /* * Generate EP11 AES secure key with given clear key value. */ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, u32 *keybufsize, - u32 keytype); + u32 keytype, u32 xflags); /* * Build a list of ep11 apqns meeting the following constrains: @@ -136,22 +137,22 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, * key for this domain. When a wkvp is given there will always be a re-fetch * of the domain info for the potential apqn - so this triggers an request * reply to each apqn eligible. - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp); +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags); /* * Derive proteced key from EP11 key blob (AES and ECC keys). */ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); +int zcrypt_ep11misc_init(void); void zcrypt_ep11misc_exit(void); #endif /* _ZCRYPT_EP11MISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index adc65eddaa1e..fc0a2a053dc2 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -438,7 +438,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq, msg->len = sizeof(error_reply); } out: - complete((struct completion *)msg->private); + complete(&msg->response.work); } static atomic_t zcrypt_step = ATOMIC_INIT(0); @@ -449,30 +449,30 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @mex: pointer to the modexpo request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -485,7 +485,6 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) pr_debug("send me cprb at dev=%02x.%04x rc=%d\n", AP_QID_CARD(zq->queue->qid), @@ -499,30 +498,30 @@ out: * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @crt: pointer to the modexpoc_crt request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -535,7 +534,6 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n", AP_QID_CARD(zq->queue->qid), diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index b64c9d9fc613..9cefbb30960f 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -31,11 +31,6 @@ #define CEIL4(x) ((((x) + 3) / 4) * 4) -struct response_type { - struct completion work; - int type; -}; - #define CEXXC_RESPONSE_TYPE_ICA 0 #define CEXXC_RESPONSE_TYPE_XCRB 1 #define CEXXC_RESPONSE_TYPE_EP11 2 @@ -856,7 +851,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86x_reply *t86r; int len; @@ -920,7 +915,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86_ep11_reply *t86r; int len; @@ -967,9 +962,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -979,15 +972,15 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1001,7 +994,6 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1017,9 +1009,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -1029,15 +1019,15 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1051,7 +1041,6 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1061,28 +1050,21 @@ out_free: * Prepare a CCA AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned short **dom) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); } @@ -1097,7 +1079,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, struct ica_xcRB *xcrb, struct ap_message *ap_msg) { - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct CPRBX cprbx; @@ -1128,11 +1110,11 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, msg->hdr.fromcardlen1 -= delta; } - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1158,28 +1140,21 @@ out: * Prepare an EP11 AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_EP11, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive_ep11; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_EP11; return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code, domain); } @@ -1197,7 +1172,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * { int rc; unsigned int lfmt; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct ep11_cprb cprbx; @@ -1251,11 +1226,11 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * msg->hdr.fromcardlen1 = zq->reply.bufsize - sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1276,23 +1251,25 @@ out: return rc; } +/* + * Prepare a CEXXC get random request ap message. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_max_msg_size is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. + */ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < AP_DEFAULT_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); @@ -1319,16 +1296,16 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, short int verb_length; short int key_length; } __packed * msg = ap_msg->msg; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; int rc; msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 9e580ef69bda..aaa1eea6149b 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -179,7 +179,7 @@ void ctcmpc_dumpit(char *buf, int len) ctcm_pr_debug(" %s (+%s) : %s [%s]\n", addr, boff, bhex, basc); dup = 0; - strcpy(duphex, bhex); + strscpy(duphex, bhex); } else dup++; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 5a3c670aec27..5522310bab8d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -403,6 +403,7 @@ config SCSI_ACARD config SCSI_AHA152X tristate "Adaptec AHA152X/2825 support" depends on ISA && SCSI + depends on !HIGHMEM select SCSI_SPI_ATTRS select CHECK_SIGNATURE help @@ -795,6 +796,7 @@ config SCSI_PPA tristate "IOMEGA parallel port (ppa - older drives)" depends on SCSI && PARPORT_PC depends on HAS_IOPORT + depends on !HIGHMEM help This driver supports older versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -822,6 +824,7 @@ config SCSI_PPA config SCSI_IMM tristate "IOMEGA parallel port (imm - newer drives)" depends on SCSI && PARPORT_PC + depends on !HIGHMEM help This driver supports newer versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 4276f868cd91..e94c0a19c435 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -746,7 +746,6 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) /* need to have host registered before triggering any interrupt */ list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list); - shpnt->no_highmem = true; shpnt->io_port = setup->io_port; shpnt->n_io_port = IO_RANGE; shpnt->irq = setup->irq; diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 1d4c7310f1a6..0821cf994b98 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -1224,7 +1224,6 @@ static int __imm_attach(struct parport *pb) host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); if (!host) goto out1; - host->no_highmem = true; host->io_port = pb->base; host->n_io_port = ports; host->dma_channel = -1; diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index a06329b47851..1ed3171f1797 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -1104,7 +1104,6 @@ static int __ppa_attach(struct parport *pb) host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); if (!host) goto out1; - host->no_highmem = true; host->io_port = pb->base; host->n_io_port = ports; host->dma_channel = -1; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 2fa45556e1ea..0ddc95bafc71 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -601,7 +601,7 @@ static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write, } if (bytes) { - err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO); + err = blk_rq_map_kern(rq, buffer, bytes, GFP_NOIO); if (err) goto error; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1b43013d72c0..144c72f0737a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -313,8 +313,7 @@ retry: return PTR_ERR(req); if (bufflen) { - ret = blk_rq_map_kern(sdev->request_queue, req, - buffer, bufflen, GFP_NOIO); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO); if (ret) goto out; } @@ -2004,9 +2003,6 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim) lim->dma_alignment = max_t(unsigned int, shost->dma_alignment, dma_get_cache_alignment() - 1); - if (shost->no_highmem) - lim->features |= BLK_FEAT_BOUNCE_HIGH; - /* * Propagate the DMA formation properties to the dma-mapping layer as * a courtesy service to the LLDDs. This needs to check that the buses diff --git a/drivers/sh/intc/irqdomain.c b/drivers/sh/intc/irqdomain.c index 3968f1c3c5c3..ed7a570ffdf2 100644 --- a/drivers/sh/intc/irqdomain.c +++ b/drivers/sh/intc/irqdomain.c @@ -59,10 +59,9 @@ void __init intc_irq_domain_init(struct intc_desc_int *d, * tree penalty for linear cases with non-zero hwirq bases. */ if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1)) - d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, - &intc_evt_ops, NULL); + d->domain = irq_domain_create_linear(NULL, hw->nr_vectors, &intc_evt_ops, NULL); else - d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); + d->domain = irq_domain_create_tree(NULL, &intc_evt_ops, NULL); BUG_ON(!d->domain); } diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c index 6202dbcd20a8..7bbd3f940e4d 100644 --- a/drivers/soc/dove/pmu.c +++ b/drivers/soc/dove/pmu.c @@ -257,10 +257,9 @@ static void pmu_irq_handler(struct irq_desc *desc) * So, let's structure the code so that the window is as small as * possible. */ - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); done &= readl_relaxed(base + PMC_IRQ_CAUSE); writel_relaxed(done, base + PMC_IRQ_CAUSE); - irq_gc_unlock(gc); } static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq) @@ -274,8 +273,8 @@ static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq) writel(0, pmu->pmc_base + PMC_IRQ_MASK); writel(0, pmu->pmc_base + PMC_IRQ_CAUSE); - domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS, - &irq_generic_chip_ops, NULL); + domain = irq_domain_create_linear(of_fwnode_handle(pmu->of_node), NR_PMU_IRQS, + &irq_generic_chip_ops, NULL); if (!domain) { pr_err("%s: unable to add irq domain\n", name); return -ENOMEM; diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 77bf0e83ffcc..e4b6ff2cc76b 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -446,8 +446,8 @@ static int qe_ic_init(struct platform_device *pdev) high_handler = NULL; } - qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, - &qe_ic_host_ops, qe_ic); + qe_ic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), NR_QE_IC_INTS, + &qe_ic_host_ops, qe_ic); if (qe_ic->irqhost == NULL) { dev_err(dev, "failed to add irq domain\n"); return -ENODEV; diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index a3e88ced328a..8c8878bc87f5 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -399,7 +399,7 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p, struct smp2p_entry *entry, struct device_node *node) { - entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry); + entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smp2p_irq_ops, entry); if (!entry->domain) { dev_err(smp2p->dev, "failed to add irq_domain\n"); return -ENOMEM; diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index e803ea342c97..021e9d1f61dc 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -456,7 +456,7 @@ static int smsm_inbound_entry(struct qcom_smsm *smsm, return ret; } - entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry); + entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smsm_irq_ops, entry); if (!entry->domain) { dev_err(smsm->dev, "failed to add irq_domain\n"); return -ENOMEM; diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c index c5661ac19f7b..5f7bdf3bab05 100644 --- a/drivers/soc/samsung/exynos-usi.c +++ b/drivers/soc/samsung/exynos-usi.c @@ -233,7 +233,7 @@ static void exynos_usi_unconfigure(void *data) /* Make sure that we've stopped providing the clock to USI IP */ val = readl(usi->regs + USI_OPTION); val &= ~USI_OPTION_CLKREQ_ON; - val |= ~USI_OPTION_CLKSTOP_ON; + val |= USI_OPTION_CLKSTOP_ON; writel(val, usi->regs + USI_OPTION); /* Set USI block state to reset */ diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 51b9d852bb6a..e0d67bfe955c 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2500,8 +2500,9 @@ static int tegra_pmc_irq_init(struct tegra_pmc *pmc) pmc->irq.irq_set_type = pmc->soc->irq_set_type; pmc->irq.irq_set_wake = pmc->soc->irq_set_wake; - pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node, - &tegra_pmc_irq_domain_ops, pmc); + pmc->domain = irq_domain_create_hierarchy(parent, 0, 96, + of_fwnode_handle(pmc->dev->of_node), + &tegra_pmc_irq_domain_ops, pmc); if (!pmc->domain) { dev_err(pmc->dev, "failed to allocate domain\n"); return -ENOMEM; diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c index c36364522157..193266f5e3f9 100644 --- a/drivers/soc/ti/ti_sci_inta_msi.c +++ b/drivers/soc/ti/ti_sci_inta_msi.c @@ -103,19 +103,15 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev, if (ret) return ret; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); nvec = ti_sci_inta_msi_alloc_descs(dev, res); - if (nvec <= 0) { - ret = nvec; - goto unlock; - } + if (nvec <= 0) + return nvec; /* Use alloc ALL as it's unclear whether there are gaps in the indices */ ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec); if (ret) dev_err(dev, "Failed to allocate IRQs %d\n", ret); -unlock: - msi_unlock_descs(dev); return ret; } EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 067c954cb6ea..863781ba6c16 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. -// Copyright 2020 NXP +// Copyright 2020-2025 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -62,6 +62,7 @@ #define SPI_SR_TFIWF BIT(18) #define SPI_SR_RFDF BIT(17) #define SPI_SR_CMDFFF BIT(16) +#define SPI_SR_TXRXS BIT(30) #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ SPI_SR_TFUF | SPI_SR_TFFF | \ SPI_SR_CMDTCF | SPI_SR_SPEF | \ @@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, struct spi_transfer *transfer; bool cs = false; int status = 0; + u32 val = 0; + bool cs_change = false; message->actual_length = 0; + /* Put DSPI in running mode if halted. */ + regmap_read(dspi->regmap, SPI_MCR, &val); + if (val & SPI_MCR_HALT) { + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + !(val & SPI_SR_TXRXS)) + ; + } + list_for_each_entry(transfer, &message->transfers, transfer_list) { dspi->cur_transfer = transfer; dspi->cur_msg = message; @@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; } + cs_change = transfer->cs_change; dspi->tx = transfer->tx_buf; dspi->rx = transfer->rx_buf; dspi->len = transfer->len; @@ -962,6 +975,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); + regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); + spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, dspi->progress, !dspi->irq); @@ -988,6 +1003,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi_deassert_cs(spi, &cs); } + if (status || !cs_change) { + /* Put DSPI in stop mode */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_HALT, SPI_MCR_HALT); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + val & SPI_SR_TXRXS) + ; + } + message->status = status; spi_finalize_current_message(ctlr); @@ -1167,6 +1191,20 @@ static int dspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); +static const struct regmap_range dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(3)), + regmap_reg_range(SPI_SR, SPI_TXFR3), + regmap_reg_range(SPI_RXFR0, SPI_RXFR3), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_access_table = { + .yes_ranges = dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), +}; + static const struct regmap_range dspi_volatile_ranges[] = { regmap_reg_range(SPI_MCR, SPI_TCR), regmap_reg_range(SPI_SR, SPI_SR), @@ -1184,6 +1222,8 @@ static const struct regmap_config dspi_regmap_config = { .reg_stride = 4, .max_register = 0x88, .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }; static const struct regmap_range dspi_xspi_volatile_ranges[] = { @@ -1205,6 +1245,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { .reg_stride = 4, .max_register = 0x13c, .volatile_table = &dspi_xspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }, { .name = "pushr", @@ -1227,6 +1269,8 @@ static int dspi_init(struct fsl_dspi *dspi) if (!spi_controller_is_target(dspi->ctlr)) mcr |= SPI_MCR_HOST; + mcr |= SPI_MCR_HALT; + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index c868d8b7bd1c..57cf46f69669 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/thermal.h> +#include <asm/msr.h> #include "int340x_thermal_zone.h" #include "processor_thermal_device.h" #include "../intel_soc_dts_iosf.h" @@ -153,7 +154,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev, u64 val; int err; - err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); + err = rdmsrq_safe(MSR_PLATFORM_INFO, &val); if (err) return err; diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index 5b18a46a10b0..bd2fca7dc017 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -284,7 +284,7 @@ void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) if (!raw_spin_trylock(&hfi_instance->event_lock)) return; - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr); + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr); hfi = msr & PACKAGE_THERM_STATUS_HFI_UPDATED; if (!hfi) { raw_spin_unlock(&hfi_instance->event_lock); @@ -356,9 +356,9 @@ static void hfi_enable(void) { u64 msr_val; - rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); } static void hfi_set_hw_table(struct hfi_instance *hfi_instance) @@ -368,7 +368,7 @@ static void hfi_set_hw_table(struct hfi_instance *hfi_instance) hw_table_pa = virt_to_phys(hfi_instance->hw_table); msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); + wrmsrq(MSR_IA32_HW_FEEDBACK_PTR, msr_val); } /* Caller must hold hfi_instance_lock. */ @@ -377,9 +377,9 @@ static void hfi_disable(void) u64 msr_val; int i; - rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; - wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); /* * Wait for hardware to acknowledge the disabling of HFI. Some @@ -388,7 +388,7 @@ static void hfi_disable(void) * memory. */ for (i = 0; i < 2000; i++) { - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED) break; diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 96a24df79686..9a4cec000910 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -340,7 +340,7 @@ static bool has_pkg_state_counter(void) /* check if any one of the counter msrs exists */ while (info->msr_index) { - if (!rdmsrl_safe(info->msr_index, &val)) + if (!rdmsrq_safe(info->msr_index, &val)) return true; info++; } @@ -356,7 +356,7 @@ static u64 pkg_state_counter(void) while (info->msr_index) { if (!info->skip) { - if (!rdmsrl_safe(info->msr_index, &val)) + if (!rdmsrq_safe(info->msr_index, &val)) count += val; else info->skip = true; diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c index 9ff0ebdde0ef..f352ecafbedf 100644 --- a/drivers/thermal/intel/intel_tcc_cooling.c +++ b/drivers/thermal/intel/intel_tcc_cooling.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/thermal.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #define TCC_PROGRAMMABLE BIT(30) #define TCC_LOCKED BIT(31) @@ -81,14 +82,14 @@ static int __init tcc_cooling_init(void) if (!id) return -ENODEV; - err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); + err = rdmsrq_safe(MSR_PLATFORM_INFO, &val); if (err) return err; if (!(val & TCC_PROGRAMMABLE)) return -ENODEV; - err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val); + err = rdmsrq_safe(MSR_IA32_TEMPERATURE_TARGET, &val); if (err) return err; diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c index e69868e868eb..debc94e2dc16 100644 --- a/drivers/thermal/intel/therm_throt.c +++ b/drivers/thermal/intel/therm_throt.c @@ -273,7 +273,7 @@ void thermal_clear_package_intr_status(int level, u64 bit_mask) } msr_val &= ~bit_mask; - wrmsrl(msr, msr_val); + wrmsrq(msr, msr_val); } EXPORT_SYMBOL_GPL(thermal_clear_package_intr_status); @@ -287,7 +287,7 @@ static void get_therm_status(int level, bool *proc_hot, u8 *temp) else msr = MSR_IA32_PACKAGE_THERM_STATUS; - rdmsrl(msr, msr_val); + rdmsrq(msr, msr_val); if (msr_val & THERM_STATUS_PROCHOT_LOG) *proc_hot = true; else @@ -643,7 +643,7 @@ static void notify_thresholds(__u64 msr_val) void __weak notify_hwp_interrupt(void) { - wrmsrl_safe(MSR_HWP_STATUS, 0); + wrmsrq_safe(MSR_HWP_STATUS, 0); } /* Thermal transition interrupt handler */ @@ -654,7 +654,7 @@ void intel_thermal_interrupt(void) if (static_cpu_has(X86_FEATURE_HWP)) notify_hwp_interrupt(); - rdmsrl(MSR_IA32_THERM_STATUS, msr_val); + rdmsrq(MSR_IA32_THERM_STATUS, msr_val); /* Check for violation of core thermal thresholds*/ notify_thresholds(msr_val); @@ -669,7 +669,7 @@ void intel_thermal_interrupt(void) CORE_LEVEL); if (this_cpu_has(X86_FEATURE_PTS)) { - rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); + rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); /* check violations of package thermal thresholds */ notify_package_thresholds(msr_val); therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 496abf8e55e0..3fc679b6f11b 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -20,6 +20,7 @@ #include <linux/debugfs.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "thermal_interrupt.h" @@ -329,6 +330,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) tj_max = intel_tcc_get_tjmax(cpu); if (tj_max < 0) return tj_max; + tj_max *= 1000; zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL); if (!zonedev) diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index d2d49264cf83..991d1573983d 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -209,7 +209,8 @@ static int lmh_probe(struct platform_device *pdev) } lmh_data->irq = platform_get_irq(pdev, 0); - lmh_data->domain = irq_domain_add_linear(np, 1, &lmh_irq_ops, lmh_data); + lmh_data->domain = irq_domain_create_linear(of_fwnode_handle(np), 1, &lmh_irq_ops, + lmh_data); if (!lmh_data->domain) { dev_err(dev, "Error adding irq_domain\n"); return -EINVAL; diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c index 2c5ddf0db40c..926f1052e6de 100644 --- a/drivers/thermal/tegra/soctherm.c +++ b/drivers/thermal/tegra/soctherm.c @@ -1234,7 +1234,7 @@ static int soctherm_oc_int_init(struct device_node *np, int num_irqs) soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type; soc_irq_cdata.irq_chip.irq_set_wake = NULL; - soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs, + soc_irq_cdata.domain = irq_domain_create_linear(of_fwnode_handle(np), num_irqs, &soctherm_oc_domain_ops, &soc_irq_cdata); diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index c0761ccc1381..77f8ddb1f207 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1849,25 +1849,38 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ufshcd_mcq_config_esi(hba, msg); } +struct ufs_qcom_irq { + unsigned int irq; + unsigned int idx; + struct ufs_hba *hba; +}; + static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) { - struct msi_desc *desc = data; - struct device *dev = msi_desc_to_dev(desc); - struct ufs_hba *hba = dev_get_drvdata(dev); - u32 id = desc->msi_index; - struct ufs_hw_queue *hwq = &hba->uhq[id]; + struct ufs_qcom_irq *qi = data; + struct ufs_hba *hba = qi->hba; + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; - ufshcd_mcq_write_cqis(hba, 0x1, id); + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; } +static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi) +{ + for (struct ufs_qcom_irq *q = uqi; q->irq; q++) + devm_free_irq(q->hba->dev, q->irq, q->hba); + + platform_device_msi_free_irqs_all(uqi->hba->dev); + devm_kfree(uqi->hba->dev, uqi); +} + +DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T)) + static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; int nr_irqs, ret; if (host->esi_enabled) @@ -1878,6 +1891,14 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + + struct ufs_qcom_irq *qi __free(ufs_qcom_irq) = + devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + if (!qi) + return -ENOMEM; + /* Preset so __free() has a pointer to hba in all error paths */ + qi[0].hba = hba; + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { @@ -1885,41 +1906,31 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) return ret; } - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - ret = devm_request_irq(hba->dev, desc->irq, - ufs_qcom_mcq_esi_handler, - IRQF_SHARED, "qcom-mcq-esi", desc); + for (int idx = 0; idx < nr_irqs; idx++) { + qi[idx].irq = msi_get_virq(hba->dev, idx); + qi[idx].idx = idx; + qi[idx].hba = hba; + + ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, + IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", - __func__, desc->irq, ret); - failed_desc = desc; - break; + __func__, qi[idx].irq, ret); + qi[idx].irq = 0; + return ret; } } - msi_unlock_descs(hba->dev); - if (ret) { - /* Rewind */ - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - if (desc == failed_desc) - break; - devm_free_irq(hba->dev, desc->irq, hba); - } - msi_unlock_descs(hba->dev); - platform_device_msi_free_irqs_all(hba->dev); - } else { - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) - ufshcd_rmwl(hba, ESI_VEC_MASK, - FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), - REG_UFS_CFG3); - ufshcd_mcq_enable_esi(hba); - host->esi_enabled = true; - } + retain_and_null_ptr(qi); - return ret; + if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && + host->hw_ver.step == 0) { + ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), + REG_UFS_CFG3); + } + ufshcd_mcq_enable_esi(hba); + host->esi_enabled = true; + return 0; } static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index d36f3b6992bb..152ee3376550 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -1056,13 +1056,20 @@ int usb_stor_probe1(struct us_data **pus, goto BadDevice; /* - * Some USB host controllers can't do DMA; they have to use PIO. - * For such controllers we need to make sure the block layer sets - * up bounce buffers in addressable memory. + * Some USB host controllers can't do DMA: They have to use PIO, or they + * have to use a small dedicated local memory area, or they have other + * restrictions on addressable memory. + * + * We can't support these controllers on highmem systems as we don't + * kmap or bounce buffer. */ - if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) || - bus_to_hcd(us->pusb_dev->bus)->localmem_pool) - host->no_highmem = true; + if (IS_ENABLED(CONFIG_HIGHMEM) && + (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) || + bus_to_hcd(us->pusb_dev->bus)->localmem_pool)) { + dev_warn(&intf->dev, "USB Mass Storage not supported on this host controller\n"); + result = -EINVAL; + goto release; + } /* Get the unusual_devs entries and the descriptors */ result = get_device_info(us, id, unusual_dev); @@ -1081,6 +1088,7 @@ int usb_stor_probe1(struct us_data **pus, BadDevice: usb_stor_dbg(us, "storage_probe() failed\n"); +release: release_everything(us); return result; } diff --git a/drivers/video/fbdev/geode/display_gx.c b/drivers/video/fbdev/geode/display_gx.c index b5f25dffd274..099322cefce0 100644 --- a/drivers/video/fbdev/geode/display_gx.c +++ b/drivers/video/fbdev/geode/display_gx.c @@ -13,6 +13,7 @@ #include <asm/io.h> #include <asm/div64.h> #include <asm/delay.h> +#include <asm/msr.h> #include <linux/cs5535.h> #include "gxfb.h" diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c index af996634c1a9..8d69be7c9d31 100644 --- a/drivers/video/fbdev/geode/gxfb_core.c +++ b/drivers/video/fbdev/geode/gxfb_core.c @@ -29,6 +29,7 @@ #include <linux/pci.h> #include <linux/cs5535.h> +#include <asm/msr.h> #include <asm/olpc.h> #include "gxfb.h" @@ -377,7 +378,7 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Figure out if this is a TFT or CRT part */ - rdmsrl(MSR_GX_GLD_MSR_CONFIG, val); + rdmsrq(MSR_GX_GLD_MSR_CONFIG, val); if ((val & MSR_GX_GLD_MSR_CONFIG_FP) == MSR_GX_GLD_MSR_CONFIG_FP) par->enable_crt = 0; diff --git a/drivers/video/fbdev/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c index 32baaf59fcf7..2e33da9849b0 100644 --- a/drivers/video/fbdev/geode/lxfb_ops.c +++ b/drivers/video/fbdev/geode/lxfb_ops.c @@ -11,6 +11,7 @@ #include <linux/delay.h> #include <linux/cs5535.h> +#include <asm/msr.h> #include "lxfb.h" /* TODO @@ -358,7 +359,7 @@ void lx_set_mode(struct fb_info *info) /* Set output mode */ - rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval); + rdmsrq(MSR_LX_GLD_MSR_CONFIG, msrval); msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT; if (par->output & OUTPUT_PANEL) { @@ -371,7 +372,7 @@ void lx_set_mode(struct fb_info *info) } else msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_CRT; - wrmsrl(MSR_LX_GLD_MSR_CONFIG, msrval); + wrmsrq(MSR_LX_GLD_MSR_CONFIG, msrval); /* Clear the various buffers */ /* FIXME: Adjust for panning here */ @@ -419,7 +420,7 @@ void lx_set_mode(struct fb_info *info) /* Set default watermark values */ - rdmsrl(MSR_LX_SPARE_MSR, msrval); + rdmsrq(MSR_LX_SPARE_MSR, msrval); msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO | MSR_LX_SPARE_MSR_VFIFO_ARB_SEL @@ -427,7 +428,7 @@ void lx_set_mode(struct fb_info *info) | MSR_LX_SPARE_MSR_WM_LPEN_OVRD); msrval |= MSR_LX_SPARE_MSR_DIS_VIFO_WM | MSR_LX_SPARE_MSR_DIS_INIT_V_PRI; - wrmsrl(MSR_LX_SPARE_MSR, msrval); + wrmsrq(MSR_LX_SPARE_MSR, msrval); gcfg = DC_GENERAL_CFG_DFLE; /* Display fifo enable */ gcfg |= (0x6 << DC_GENERAL_CFG_DFHPSL_SHIFT) | /* default priority */ @@ -591,10 +592,10 @@ static void lx_save_regs(struct lxfb_par *par) } while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE)); /* save MSRs */ - rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel); - rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll); - rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); - rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare); + rdmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel); + rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll); + rdmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); + rdmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare); write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK); @@ -664,7 +665,7 @@ static void lx_restore_display_ctlr(struct lxfb_par *par) uint32_t filt; int i; - wrmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare); + wrmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare); for (i = 0; i < ARRAY_SIZE(par->dc); i++) { switch (i) { @@ -729,8 +730,8 @@ static void lx_restore_video_proc(struct lxfb_par *par) { int i; - wrmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); - wrmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel); + wrmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg); + wrmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel); for (i = 0; i < ARRAY_SIZE(par->vp); i++) { switch (i) { diff --git a/drivers/video/fbdev/geode/suspend_gx.c b/drivers/video/fbdev/geode/suspend_gx.c index 8c49d4e98772..73307f42e343 100644 --- a/drivers/video/fbdev/geode/suspend_gx.c +++ b/drivers/video/fbdev/geode/suspend_gx.c @@ -21,8 +21,8 @@ static void gx_save_regs(struct gxfb_par *par) } while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY)); /* save MSRs */ - rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel); - rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll); + rdmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel); + rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll); write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK); @@ -43,14 +43,14 @@ static void gx_set_dotpll(uint32_t dotpll_hi) uint32_t dotpll_lo; int i; - rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo); + rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo); dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET; dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS; wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi); /* wait for the PLL to lock */ for (i = 0; i < 200; i++) { - rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo); + rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo); if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK) break; udelay(1); @@ -133,7 +133,7 @@ static void gx_restore_video_proc(struct gxfb_par *par) { int i; - wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel); + wrmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel); for (i = 0; i < ARRAY_SIZE(par->vp); i++) { switch (i) { diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 91dac2aa247c..5717c3356949 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c @@ -142,8 +142,8 @@ void gx_set_dclk_frequency(struct fb_info *info) } } - rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); - rdmsrl(MSR_GLCP_DOTPLL, dotpll); + rdmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll); + rdmsrq(MSR_GLCP_DOTPLL, dotpll); /* Program new M, N and P. */ dotpll &= 0x00000000ffffffffull; @@ -151,7 +151,7 @@ void gx_set_dclk_frequency(struct fb_info *info) dotpll |= MSR_GLCP_DOTPLL_DOTRESET; dotpll &= ~MSR_GLCP_DOTPLL_BYPASS; - wrmsrl(MSR_GLCP_DOTPLL, dotpll); + wrmsrq(MSR_GLCP_DOTPLL, dotpll); /* Program dividers. */ sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 @@ -159,15 +159,15 @@ void gx_set_dclk_frequency(struct fb_info *info) | MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 ); sys_rstpll |= pll_table[best_i].sys_rstpll_bits; - wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll); + wrmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll); /* Clear reset bit to start PLL. */ dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET); - wrmsrl(MSR_GLCP_DOTPLL, dotpll); + wrmsrq(MSR_GLCP_DOTPLL, dotpll); /* Wait for LOCK bit. */ do { - rdmsrl(MSR_GLCP_DOTPLL, dotpll); + rdmsrq(MSR_GLCP_DOTPLL, dotpll); } while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK)); } @@ -180,10 +180,10 @@ gx_configure_tft(struct fb_info *info) /* Set up the DF pad select MSR */ - rdmsrl(MSR_GX_MSR_PADSEL, val); + rdmsrq(MSR_GX_MSR_PADSEL, val); val &= ~MSR_GX_MSR_PADSEL_MASK; val |= MSR_GX_MSR_PADSEL_TFT; - wrmsrl(MSR_GX_MSR_PADSEL, val); + wrmsrq(MSR_GX_MSR_PADSEL, val); /* Turn off the panel */ diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c index 1cae9b243ff8..76026d615111 100644 --- a/drivers/w1/slaves/w1_ds2406.c +++ b/drivers/w1/slaves/w1_ds2406.c @@ -29,8 +29,6 @@ static ssize_t w1_f12_read_state( { u8 w1_buf[6] = {W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0}; struct w1_slave *sl = kobj_to_w1_slave(kobj); - u16 crc = 0; - int i; ssize_t rtnval = 1; if (off != 0) @@ -47,9 +45,7 @@ static ssize_t w1_f12_read_state( w1_write_block(sl->master, w1_buf, 3); w1_read_block(sl->master, w1_buf+3, 3); - for (i = 0; i < 6; i++) - crc = crc16_byte(crc, w1_buf[i]); - if (crc == 0xb001) /* good read? */ + if (crc16(0, w1_buf, sizeof(w1_buf)) == 0xb001) /* good read? */ *buf = ((w1_buf[3]>>5)&3)|0x30; else rtnval = -EIO; @@ -66,8 +62,6 @@ static ssize_t w1_f12_write_output( { struct w1_slave *sl = kobj_to_w1_slave(kobj); u8 w1_buf[6] = {W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0}; - u16 crc = 0; - int i; ssize_t rtnval = 1; if (count != 1 || off != 0) @@ -83,9 +77,7 @@ static ssize_t w1_f12_write_output( w1_buf[3] = (((*buf)&3)<<5)|0x1F; w1_write_block(sl->master, w1_buf, 4); w1_read_block(sl->master, w1_buf+4, 2); - for (i = 0; i < 6; i++) - crc = crc16_byte(crc, w1_buf[i]); - if (crc == 0xb001) /* good read? */ + if (crc16(0, w1_buf, sizeof(w1_buf)) == 0xb001) /* good read? */ w1_write_8(sl->master, 0xFF); else rtnval = -EIO; diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index 76dffc89c641..887d5a6c155b 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -29,26 +29,13 @@ #include <linux/watchdog.h> #include <asm/machine.h> #include <asm/ebcdic.h> +#include <asm/diag288.h> #include <asm/diag.h> #include <linux/io.h> #define MAX_CMDLEN 240 #define DEFAULT_CMD "SYSTEM RESTART" -#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */ -#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ - -#define WDT_DEFAULT_TIMEOUT 30 - -/* Function codes - init, change, cancel */ -#define WDT_FUNC_INIT 0 -#define WDT_FUNC_CHANGE 1 -#define WDT_FUNC_CANCEL 2 -#define WDT_FUNC_CONCEAL 0x80000000 - -/* Action codes for LPAR watchdog */ -#define LPARWDT_RESTART 0 - static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD; static bool conceal_on; static bool nowayout_info = WATCHDOG_NOWAYOUT; @@ -75,22 +62,8 @@ static char *cmd_buf; static int diag288(unsigned int func, unsigned int timeout, unsigned long action, unsigned int len) { - union register_pair r1 = { .even = func, .odd = timeout, }; - union register_pair r3 = { .even = action, .odd = len, }; - int err; - diag_stat_inc(DIAG_STAT_X288); - - err = -EINVAL; - asm volatile( - " diag %[r1],%[r3],0x288\n" - "0: la %[err],0\n" - "1:\n" - EX_TABLE(0b, 1b) - : [err] "+d" (err) - : [r1] "d" (r1.pair), [r3] "d" (r3.pair) - : "cc", "memory"); - return err; + return __diag288(func, timeout, action, len); } static int diag288_str(unsigned int func, unsigned int timeout, char *cmd) @@ -189,8 +162,6 @@ static struct watchdog_device wdt_dev = { static int __init diag288_init(void) { - int ret; - watchdog_set_nowayout(&wdt_dev, nowayout_info); if (machine_is_vm()) { @@ -199,24 +170,6 @@ static int __init diag288_init(void) pr_err("The watchdog cannot be initialized\n"); return -ENOMEM; } - - ret = diag288_str(WDT_FUNC_INIT, MIN_INTERVAL, "BEGIN"); - if (ret != 0) { - pr_err("The watchdog cannot be initialized\n"); - kfree(cmd_buf); - return -EINVAL; - } - } else { - if (diag288(WDT_FUNC_INIT, WDT_DEFAULT_TIMEOUT, - LPARWDT_RESTART, 0)) { - pr_err("The watchdog cannot be initialized\n"); - return -EINVAL; - } - } - - if (diag288(WDT_FUNC_CANCEL, 0, 0, 0)) { - pr_err("The watchdog cannot be deactivated\n"); - return -EINVAL; } return watchdog_register_device(&wdt_dev); @@ -228,5 +181,5 @@ static void __exit diag288_exit(void) kfree(cmd_buf); } -module_init(diag288_init); +module_cpu_feature_match(S390_CPU_FEATURE_D288, diag288_init); module_exit(diag288_exit); diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 32619d146cbc..1286d96a29bc 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -164,4 +164,5 @@ const struct address_space_operations v9fs_addr_operations = { .invalidate_folio = netfs_invalidate_folio, .direct_IO = noop_direct_IO, .writepages = netfs_writepages, + .migrate_folio = filemap_migrate_folio, }; diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 9e7b1fe82c27..bfb69e066672 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -943,7 +943,7 @@ static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry) } strcpy(p, name); - ret = lookup_one_len(buf, dentry->d_parent, len); + ret = lookup_noperm(&QSTR(buf), dentry->d_parent); if (IS_ERR(ret) || d_is_positive(ret)) goto out_s; dput(ret); diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c index a1e581946b93..0b80eb93fa40 100644 --- a/fs/afs/dir_silly.c +++ b/fs/afs/dir_silly.c @@ -113,16 +113,14 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, sdentry = NULL; do { - int slen; - dput(sdentry); sillycounter++; /* Create a silly name. Note that the ".__afs" prefix is * understood by the salvager and must not be changed. */ - slen = scnprintf(silly, sizeof(silly), ".__afs%04X", sillycounter); - sdentry = lookup_one_len(silly, dentry->d_parent, slen); + scnprintf(silly, sizeof(silly), ".__afs%04X", sillycounter); + sdentry = lookup_noperm(&QSTR(silly), dentry->d_parent); /* N.B. Better to return EBUSY here ... it could be dangerous * to delete the file while it's in use. @@ -1511,6 +1511,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type) { int ret; + req->ki_write_stream = 0; req->ki_complete = aio_complete_rw; req->private = NULL; req->ki_pos = iocb->aio_offset; diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 583ac81669c2..e51e7d88980a 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -24,10 +24,51 @@ #include <linux/uaccess.h> +#include "internal.h" + static struct vfsmount *anon_inode_mnt __ro_after_init; static struct inode *anon_inode_inode __ro_after_init; /* + * User space expects anonymous inodes to have no file type in st_mode. + * + * In particular, 'lsof' has this legacy logic: + * + * type = s->st_mode & S_IFMT; + * switch (type) { + * ... + * case 0: + * if (!strcmp(p, "anon_inode")) + * Lf->ntype = Ntype = N_ANON_INODE; + * + * to detect our old anon_inode logic. + * + * Rather than mess with our internal sane inode data, just fix it + * up here in getattr() by masking off the format bits. + */ +int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path, + struct kstat *stat, u32 request_mask, + unsigned int query_flags) +{ + struct inode *inode = d_inode(path->dentry); + + generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); + stat->mode &= ~S_IFMT; + return 0; +} + +int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + struct iattr *attr) +{ + return -EOPNOTSUPP; +} + +static const struct inode_operations anon_inode_operations = { + .getattr = anon_inode_getattr, + .setattr = anon_inode_setattr, +}; + +/* * anon_inodefs_dname() is called from d_path(). */ static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen) @@ -45,6 +86,8 @@ static int anon_inodefs_init_fs_context(struct fs_context *fc) struct pseudo_fs_context *ctx = init_pseudo(fc, ANON_INODE_FS_MAGIC); if (!ctx) return -ENOMEM; + fc->s_iflags |= SB_I_NOEXEC; + fc->s_iflags |= SB_I_NODEV; ctx->dops = &anon_inodefs_dentry_operations; return 0; } @@ -66,6 +109,7 @@ static struct inode *anon_inode_make_secure_inode( if (IS_ERR(inode)) return inode; inode->i_flags &= ~S_PRIVATE; + inode->i_op = &anon_inode_operations; error = security_inode_init_security_anon(inode, &QSTR(name), context_inode); if (error) { @@ -313,6 +357,7 @@ static int __init anon_inode_init(void) anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); if (IS_ERR(anon_inode_inode)) panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode)); + anon_inode_inode->i_op = &anon_inode_operations; return 0; } diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c index c5a6aae12d2c..d8dd150cbd74 100644 --- a/fs/autofs/dev-ioctl.c +++ b/fs/autofs/dev-ioctl.c @@ -459,7 +459,8 @@ static int autofs_dev_ioctl_timeout(struct file *fp, "the parent autofs mount timeout which could " "prevent shutdown\n"); - dentry = try_lookup_one_len(param->path, base, path_len); + dentry = try_lookup_noperm(&QSTR_LEN(param->path, path_len), + base); if (IS_ERR_OR_NULL(dentry)) return dentry ? PTR_ERR(dentry) : -ENOENT; ino = autofs_dentry_ino(dentry); diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig index 07709b0d7688..8cb2b9d5da96 100644 --- a/fs/bcachefs/Kconfig +++ b/fs/bcachefs/Kconfig @@ -103,6 +103,14 @@ config BCACHEFS_PATH_TRACEPOINTS Enable extra tracepoints for debugging btree_path operations; we don't normally want these enabled because they happen at very high rates. +config BCACHEFS_TRANS_KMALLOC_TRACE + bool "Trace bch2_trans_kmalloc() calls" + depends on BCACHEFS_FS + +config BCACHEFS_ASYNC_OBJECT_LISTS + bool "Keep async objects on fast_lists for debugfs visibility" + depends on BCACHEFS_FS && DEBUG_FS + config MEAN_AND_VARIANCE_UNIT_TEST tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile index 9af65079374f..93c8ee5425c8 100644 --- a/fs/bcachefs/Makefile +++ b/fs/bcachefs/Makefile @@ -35,11 +35,13 @@ bcachefs-y := \ disk_accounting.o \ disk_groups.o \ ec.o \ + enumerated_ref.o \ errcode.o \ error.o \ extents.o \ extent_update.o \ eytzinger.o \ + fast_list.o \ fs.o \ fs-ioctl.o \ fs-io.o \ @@ -97,6 +99,8 @@ bcachefs-y := \ varint.o \ xattr.o +bcachefs-$(CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS) += async_objs.o + obj-$(CONFIG_MEAN_AND_VARIANCE_UNIT_TEST) += mean_and_variance_test.o # Silence "note: xyz changed in GCC X.X" messages diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 94ea9e49aec4..173e81c2bbcb 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -17,6 +17,7 @@ #include "debug.h" #include "disk_accounting.h" #include "ec.h" +#include "enumerated_ref.h" #include "error.h" #include "lru.h" #include "recovery.h" @@ -308,7 +309,8 @@ int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, "data type inconsistency"); bkey_fsck_err_on(!a.io_time[READ] && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, + !(c->recovery.passes_to_run & + BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs)), c, alloc_key_cached_but_read_time_zero, "cached bucket with read_time == 0"); break; @@ -478,12 +480,27 @@ struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, enum btree_iter_update_trigger_flags flags) { struct btree_iter iter; - struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos); - int ret = PTR_ERR_OR_ZERO(a); - if (ret) + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, pos, + BTREE_ITER_with_updates| + BTREE_ITER_cached| + BTREE_ITER_intent); + int ret = bkey_err(k); + if (unlikely(ret)) return ERR_PTR(ret); - ret = bch2_trans_update(trans, &iter, &a->k_i, flags); + if ((void *) k.v >= trans->mem && + (void *) k.v < trans->mem + trans->mem_top) { + bch2_trans_iter_exit(trans, &iter); + return container_of(bkey_s_c_to_alloc_v4(k).v, struct bkey_i_alloc_v4, v); + } + + struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); + if (IS_ERR(a)) { + bch2_trans_iter_exit(trans, &iter); + return a; + } + + ret = bch2_trans_update_ip(trans, &iter, &a->k_i, flags, _RET_IP_); bch2_trans_iter_exit(trans, &iter); return unlikely(ret) ? ERR_PTR(ret) : a; } @@ -913,15 +930,6 @@ int bch2_trigger_alloc(struct btree_trans *trans, goto err; } - if ((flags & BTREE_TRIGGER_bucket_invalidate) && - old_a->cached_sectors) { - ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, - -((s64) old_a->cached_sectors), - flags & BTREE_TRIGGER_gc); - if (ret) - goto err; - } - ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); if (ret) goto err; @@ -1381,7 +1389,7 @@ static void check_discard_freespace_key_work(struct work_struct *work) container_of(work, struct check_discard_freespace_key_async, work); bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos)); - bch2_write_ref_put(w->c, BCH_WRITE_REF_check_discard_freespace_key); + enumerated_ref_put(&w->c->writes, BCH_WRITE_REF_check_discard_freespace_key); kfree(w); } @@ -1458,7 +1466,7 @@ delete: if (!w) goto out; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_check_discard_freespace_key)) { + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_check_discard_freespace_key)) { kfree(w); goto out; } @@ -1467,6 +1475,8 @@ delete: w->c = c; w->pos = BBPOS(iter->btree_id, iter->pos); queue_work(c->write_ref_wq, &w->work); + + ret = 1; /* don't allocate from this bucket */ goto out; } } @@ -1806,19 +1816,6 @@ struct discard_buckets_state { u64 discarded; }; -/* - * This is needed because discard is both a filesystem option and a device - * option, and mount options are supposed to apply to that mount and not be - * persisted, i.e. if it's set as a mount option we can't propagate it to the - * device. - */ -static inline bool discard_opt_enabled(struct bch_fs *c, struct bch_dev *ca) -{ - return test_bit(BCH_FS_discard_mount_opt_set, &c->flags) - ? c->opts.discard - : ca->mi.discard; -} - static int bch2_discard_one_bucket(struct btree_trans *trans, struct bch_dev *ca, struct btree_iter *need_discard_iter, @@ -1882,7 +1879,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, s->discarded++; *discard_pos_done = iter.pos; - if (discard_opt_enabled(c, ca) && !c->opts.nochanges) { + if (bch2_discard_opt_enabled(c, ca) && !c->opts.nochanges) { /* * This works without any other locks because this is the only * thread that removes items from the need_discard tree @@ -1952,26 +1949,26 @@ static void bch2_do_discards_work(struct work_struct *work) trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); - percpu_ref_put(&ca->io_ref[WRITE]); - bch2_write_ref_put(c, BCH_WRITE_REF_discard); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard); } void bch2_dev_do_discards(struct bch_dev *ca) { struct bch_fs *c = ca->fs; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_discard)) return; - if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) + if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_dev_do_discards)) goto put_write_ref; if (queue_work(c->write_ref_wq, &ca->discard_work)) return; - percpu_ref_put(&ca->io_ref[WRITE]); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards); put_write_ref: - bch2_write_ref_put(c, BCH_WRITE_REF_discard); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard); } void bch2_do_discards(struct bch_fs *c) @@ -2047,8 +2044,8 @@ static void bch2_do_discards_fast_work(struct work_struct *work) trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); bch2_trans_put(trans); - percpu_ref_put(&ca->io_ref[WRITE]); - bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard_fast); } static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) @@ -2058,18 +2055,18 @@ static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) if (discard_in_flight_add(ca, bucket, false)) return; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_discard_fast)) return; - if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) + if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_discard_one_bucket_fast)) goto put_ref; if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) return; - percpu_ref_put(&ca->io_ref[WRITE]); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast); put_ref: - bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard_fast); } static int invalidate_one_bp(struct btree_trans *trans, @@ -2180,8 +2177,11 @@ static int invalidate_one_bucket(struct btree_trans *trans, BUG_ON(a->data_type != BCH_DATA_cached); BUG_ON(a->dirty_sectors); - if (!a->cached_sectors) - bch_err(c, "invalidating empty bucket, confused"); + if (!a->cached_sectors) { + bch2_check_bucket_backpointer_mismatch(trans, ca, bucket.offset, + true, last_flushed); + goto out; + } unsigned cached_sectors = a->cached_sectors; u8 gen = a->gen; @@ -2261,27 +2261,27 @@ restart_err: bch2_trans_iter_exit(trans, &iter); err: bch2_trans_put(trans); - percpu_ref_put(&ca->io_ref[WRITE]); bch2_bkey_buf_exit(&last_flushed, c); - bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate); } void bch2_dev_do_invalidates(struct bch_dev *ca) { struct bch_fs *c = ca->fs; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_invalidate)) return; - if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) + if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_do_invalidates)) goto put_ref; if (queue_work(c->write_ref_wq, &ca->invalidate_work)) return; - percpu_ref_put(&ca->io_ref[WRITE]); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); put_ref: - bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate); } void bch2_do_invalidates(struct bch_fs *c) @@ -2392,14 +2392,16 @@ bkey_err: int bch2_fs_freespace_init(struct bch_fs *c) { - int ret = 0; - bool doing_init = false; + if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) + return 0; + /* * We can crash during the device add path, so we need to check this on * every mount: */ + bool doing_init = false; for_each_member_device(c, ca) { if (ca->mi.freespace_initialized) continue; @@ -2409,7 +2411,7 @@ int bch2_fs_freespace_init(struct bch_fs *c) doing_init = true; } - ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); + int ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); if (ret) { bch2_dev_put(ca); bch_err_fn(c, ret); @@ -2439,8 +2441,7 @@ int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) * We clear the LRU and need_discard btrees first so that we don't race * with bch2_do_invalidates() and bch2_do_discards() */ - ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?: - bch2_btree_delete_range(c, BTREE_ID_lru, start, end, + ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end, BTREE_TRIGGER_norun, NULL) ?: bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end, BTREE_TRIGGER_norun, NULL) ?: @@ -2503,15 +2504,15 @@ void bch2_recalc_capacity(struct bch_fs *c) lockdep_assert_held(&c->state_lock); - for_each_online_member(c, ca) { - struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; - - ra_pages += bdi->ra_pages; - } + rcu_read_lock(); + for_each_member_device_rcu(c, ca, NULL) { + struct block_device *bdev = READ_ONCE(ca->disk_sb.bdev); + if (bdev) + ra_pages += bdev->bd_disk->bdi->ra_pages; - bch2_set_ra_pages(c, ra_pages); + if (ca->mi.state != BCH_MEMBER_STATE_rw) + continue; - __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) { u64 dev_reserve = 0; /* @@ -2548,6 +2549,9 @@ void bch2_recalc_capacity(struct bch_fs *c) bucket_size_max = max_t(unsigned, bucket_size_max, ca->mi.bucket_size); } + rcu_read_unlock(); + + bch2_set_ra_pages(c, ra_pages); gc_reserve = c->opts.gc_reserve_bytes ? c->opts.gc_reserve_bytes >> 9 @@ -2570,27 +2574,41 @@ u64 bch2_min_rw_member_capacity(struct bch_fs *c) { u64 ret = U64_MAX; - for_each_rw_member(c, ca) + rcu_read_lock(); + for_each_rw_member_rcu(c, ca) ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); + rcu_read_unlock(); return ret; } static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) { struct open_bucket *ob; - bool ret = false; for (ob = c->open_buckets; ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) { - spin_lock(&ob->lock); - if (ob->valid && !ob->on_partial_list && - ob->dev == ca->dev_idx) - ret = true; - spin_unlock(&ob->lock); + scoped_guard(spinlock, &ob->lock) { + if (ob->valid && !ob->on_partial_list && + ob->dev == ca->dev_idx) + return true; + } } - return ret; + return false; +} + +void bch2_dev_allocator_set_rw(struct bch_fs *c, struct bch_dev *ca, bool rw) +{ + /* BCH_DATA_free == all rw devs */ + + for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) + if (rw && + (i == BCH_DATA_free || + (ca->mi.data_allowed & BIT(i)))) + set_bit(ca->dev_idx, c->rw_devs[i].d); + else + clear_bit(ca->dev_idx, c->rw_devs[i].d); } /* device goes ro: */ @@ -2599,9 +2617,7 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) lockdep_assert_held(&c->state_lock); /* First, remove device from allocation groups: */ - - for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) - clear_bit(ca->dev_idx, c->rw_devs[i].d); + bch2_dev_allocator_set_rw(c, ca, false); c->rw_devs_change_count++; @@ -2635,10 +2651,7 @@ void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) { lockdep_assert_held(&c->state_lock); - for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) - if (ca->mi.data_allowed & (1 << i)) - set_bit(ca->dev_idx, c->rw_devs[i].d); - + bch2_dev_allocator_set_rw(c, ca, true); c->rw_devs_change_count++; } diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h index 34b3d6ac4fbb..4f94c6a661bf 100644 --- a/fs/bcachefs/alloc_background.h +++ b/fs/bcachefs/alloc_background.h @@ -350,6 +350,7 @@ int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *); void bch2_recalc_capacity(struct bch_fs *); u64 bch2_min_rw_member_capacity(struct bch_fs *); +void bch2_dev_allocator_set_rw(struct bch_fs *, struct bch_dev *, bool); void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *); void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *); diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 7ec022e9361a..1a52c12c51ae 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -154,7 +154,7 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c) static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) { - if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs) + if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_trans_mark_dev_sbs)) return false; return bch2_is_superblock_bucket(ca, b); @@ -180,11 +180,11 @@ static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) } static inline bool may_alloc_bucket(struct bch_fs *c, - struct bpos bucket, - struct bucket_alloc_state *s) + struct alloc_request *req, + struct bpos bucket) { if (bch2_bucket_is_open(c, bucket.inode, bucket.offset)) { - s->skipped_open++; + req->counters.skipped_open++; return false; } @@ -193,36 +193,37 @@ static inline bool may_alloc_bucket(struct bch_fs *c, bucket.inode, bucket.offset); if (journal_seq_ready > c->journal.flushed_seq_ondisk) { if (journal_seq_ready > c->journal.flushing_seq) - s->need_journal_commit++; - s->skipped_need_journal_commit++; + req->counters.need_journal_commit++; + req->counters.skipped_need_journal_commit++; return false; } if (bch2_bucket_nocow_is_locked(&c->nocow_locks, bucket)) { - s->skipped_nocow++; + req->counters.skipped_nocow++; return false; } return true; } -static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, +static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, + struct alloc_request *req, u64 bucket, u8 gen, - enum bch_watermark watermark, - struct bucket_alloc_state *s, struct closure *cl) { + struct bch_dev *ca = req->ca; + if (unlikely(is_superblock_bucket(c, ca, bucket))) return NULL; if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { - s->skipped_nouse++; + req->counters.skipped_nouse++; return NULL; } spin_lock(&c->freelist_lock); - if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(watermark))) { + if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(req->watermark))) { if (cl) closure_wait(&c->open_buckets_wait, cl); @@ -234,7 +235,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * /* Recheck under lock: */ if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { spin_unlock(&c->freelist_lock); - s->skipped_open++; + req->counters.skipped_open++; return NULL; } @@ -258,16 +259,15 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * return ob; } -static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, - enum bch_watermark watermark, - struct bucket_alloc_state *s, +static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, + struct alloc_request *req, struct btree_iter *freespace_iter, struct closure *cl) { struct bch_fs *c = trans->c; u64 b = freespace_iter->pos.offset & ~(~0ULL << 56); - if (!may_alloc_bucket(c, POS(ca->dev_idx, b), s)) + if (!may_alloc_bucket(c, req, POS(req->ca->dev_idx, b))) return NULL; u8 gen; @@ -277,7 +277,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc if (ret) return NULL; - return __try_alloc_bucket(c, ca, b, gen, watermark, s, cl); + return __try_alloc_bucket(c, req, b, gen, cl); } /* @@ -285,17 +285,16 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc */ static noinline struct open_bucket * bch2_bucket_alloc_early(struct btree_trans *trans, - struct bch_dev *ca, - enum bch_watermark watermark, - struct bucket_alloc_state *s, + struct alloc_request *req, struct closure *cl) { struct bch_fs *c = trans->c; + struct bch_dev *ca = req->ca; struct btree_iter iter, citer; struct bkey_s_c k, ck; struct open_bucket *ob = NULL; u64 first_bucket = ca->mi.first_bucket; - u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; + u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; u64 alloc_start = max(first_bucket, *dev_alloc_cursor); u64 alloc_cursor = alloc_start; int ret; @@ -317,10 +316,10 @@ again: if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; - if (s->btree_bitmap != BTREE_BITMAP_ANY && - s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, + if (req->btree_bitmap != BTREE_BITMAP_ANY && + req->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { - if (s->btree_bitmap == BTREE_BITMAP_YES && + if (req->btree_bitmap == BTREE_BITMAP_YES && bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) break; @@ -328,8 +327,8 @@ again: round_up(bucket_to_sector(ca, bucket) + 1, 1ULL << ca->mi.btree_bitmap_shift)); bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket)); - s->buckets_seen++; - s->skipped_mi_btree_bitmap++; + req->counters.buckets_seen++; + req->counters.skipped_mi_btree_bitmap++; continue; } @@ -348,11 +347,10 @@ again: if (a->data_type != BCH_DATA_free) goto next; - s->buckets_seen++; + req->counters.buckets_seen++; - ob = may_alloc_bucket(c, k.k->p, s) - ? __try_alloc_bucket(c, ca, k.k->p.offset, a->gen, - watermark, s, cl) + ob = may_alloc_bucket(c, req, k.k->p) + ? __try_alloc_bucket(c, req, k.k->p.offset, a->gen, cl) : NULL; next: bch2_set_btree_iter_dontneed(trans, &citer); @@ -378,15 +376,14 @@ next: } static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, - struct bch_dev *ca, - enum bch_watermark watermark, - struct bucket_alloc_state *s, - struct closure *cl) + struct alloc_request *req, + struct closure *cl) { + struct bch_dev *ca = req->ca; struct btree_iter iter; struct bkey_s_c k; struct open_bucket *ob = NULL; - u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; + u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor)); u64 alloc_cursor = alloc_start; int ret; @@ -402,13 +399,13 @@ again: iter.k.size = iter.k.p.offset - iter.pos.offset; while (iter.k.size) { - s->buckets_seen++; + req->counters.buckets_seen++; u64 bucket = iter.pos.offset & ~(~0ULL << 56); - if (s->btree_bitmap != BTREE_BITMAP_ANY && - s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, + if (req->btree_bitmap != BTREE_BITMAP_ANY && + req->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { - if (s->btree_bitmap == BTREE_BITMAP_YES && + if (req->btree_bitmap == BTREE_BITMAP_YES && bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) goto fail; @@ -418,11 +415,11 @@ again: alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56)); bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor)); - s->skipped_mi_btree_bitmap++; + req->counters.skipped_mi_btree_bitmap++; goto next; } - ob = try_alloc_bucket(trans, ca, watermark, s, &iter, cl); + ob = try_alloc_bucket(trans, req, &iter, cl); if (ob) { if (!IS_ERR(ob)) *dev_alloc_cursor = iter.pos.offset; @@ -453,33 +450,30 @@ fail: return ob; } -static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca, - enum bch_watermark watermark, - enum bch_data_type data_type, +static noinline void trace_bucket_alloc2(struct bch_fs *c, + struct alloc_request *req, struct closure *cl, - struct bch_dev_usage *usage, - struct bucket_alloc_state *s, struct open_bucket *ob) { struct printbuf buf = PRINTBUF; printbuf_tabstop_push(&buf, 24); - prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx); - prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]); - prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]); + prt_printf(&buf, "dev\t%s (%u)\n", req->ca->name, req->ca->dev_idx); + prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[req->watermark]); + prt_printf(&buf, "data type\t%s\n", __bch2_data_types[req->data_type]); prt_printf(&buf, "blocking\t%u\n", cl != NULL); - prt_printf(&buf, "free\t%llu\n", usage->buckets[BCH_DATA_free]); - prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark)); - prt_printf(&buf, "copygc_wait\t%lu/%lli\n", + prt_printf(&buf, "free\t%llu\n", req->usage.buckets[BCH_DATA_free]); + prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(req->ca, req->usage, req->watermark)); + prt_printf(&buf, "copygc_wait\t%llu/%lli\n", bch2_copygc_wait_amount(c), c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now)); - prt_printf(&buf, "seen\t%llu\n", s->buckets_seen); - prt_printf(&buf, "open\t%llu\n", s->skipped_open); - prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit); - prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow); - prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse); - prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap); + prt_printf(&buf, "seen\t%llu\n", req->counters.buckets_seen); + prt_printf(&buf, "open\t%llu\n", req->counters.skipped_open); + prt_printf(&buf, "need journal commit\t%llu\n", req->counters.skipped_need_journal_commit); + prt_printf(&buf, "nocow\t%llu\n", req->counters.skipped_nocow); + prt_printf(&buf, "nouse\t%llu\n", req->counters.skipped_nouse); + prt_printf(&buf, "mi_btree_bitmap\t%llu\n", req->counters.skipped_mi_btree_bitmap); if (!IS_ERR(ob)) { prt_printf(&buf, "allocated\t%llu\n", ob->bucket); @@ -495,47 +489,42 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca, /** * bch2_bucket_alloc_trans - allocate a single bucket from a specific device * @trans: transaction object - * @ca: device to allocate from - * @watermark: how important is this allocation? - * @data_type: BCH_DATA_journal, btree, user... + * @req: state for the entire allocation * @cl: if not NULL, closure to be used to wait if buckets not available * @nowait: if true, do not wait for buckets to become available - * @usage: for secondarily also returning the current device usage * * Returns: an open_bucket on success, or an ERR_PTR() on failure. */ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, - struct bch_dev *ca, - enum bch_watermark watermark, - enum bch_data_type data_type, - struct closure *cl, - bool nowait, - struct bch_dev_usage *usage) + struct alloc_request *req, + struct closure *cl, + bool nowait) { struct bch_fs *c = trans->c; + struct bch_dev *ca = req->ca; struct open_bucket *ob = NULL; bool freespace = READ_ONCE(ca->mi.freespace_initialized); u64 avail; - struct bucket_alloc_state s = { - .btree_bitmap = data_type == BCH_DATA_btree, - }; bool waiting = nowait; + + req->btree_bitmap = req->data_type == BCH_DATA_btree; + memset(&req->counters, 0, sizeof(req->counters)); again: - bch2_dev_usage_read_fast(ca, usage); - avail = dev_buckets_free(ca, *usage, watermark); + bch2_dev_usage_read_fast(ca, &req->usage); + avail = dev_buckets_free(ca, req->usage, req->watermark); - if (usage->buckets[BCH_DATA_need_discard] > avail) + if (req->usage.buckets[BCH_DATA_need_discard] > avail) bch2_dev_do_discards(ca); - if (usage->buckets[BCH_DATA_need_gc_gens] > avail) + if (req->usage.buckets[BCH_DATA_need_gc_gens] > avail) bch2_gc_gens_async(c); - if (should_invalidate_buckets(ca, *usage)) + if (should_invalidate_buckets(ca, req->usage)) bch2_dev_do_invalidates(ca); if (!avail) { - if (watermark > BCH_WATERMARK_normal && - c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) + if (req->watermark > BCH_WATERMARK_normal && + c->recovery.pass_done < BCH_RECOVERY_PASS_check_allocations) goto alloc; if (cl && !waiting) { @@ -554,18 +543,18 @@ again: closure_wake_up(&c->freelist_wait); alloc: ob = likely(freespace) - ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl) - : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl); + ? bch2_bucket_alloc_freelist(trans, req, cl) + : bch2_bucket_alloc_early(trans, req, cl); - if (s.need_journal_commit * 2 > avail) + if (req->counters.need_journal_commit * 2 > avail) bch2_journal_flush_async(&c->journal, NULL); - if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) { - s.btree_bitmap = BTREE_BITMAP_ANY; + if (!ob && req->btree_bitmap != BTREE_BITMAP_ANY) { + req->btree_bitmap = BTREE_BITMAP_ANY; goto alloc; } - if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) { + if (!ob && freespace && c->recovery.pass_done < BCH_RECOVERY_PASS_check_alloc_info) { freespace = false; goto alloc; } @@ -574,7 +563,7 @@ err: ob = ERR_PTR(-BCH_ERR_no_buckets_found); if (!IS_ERR(ob)) - ob->data_type = data_type; + ob->data_type = req->data_type; if (!IS_ERR(ob)) count_event(c, bucket_alloc); @@ -584,7 +573,7 @@ err: if (!IS_ERR(ob) ? trace_bucket_alloc_enabled() : trace_bucket_alloc_fail_enabled()) - trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob); + trace_bucket_alloc2(c, req, cl, ob); return ob; } @@ -594,12 +583,15 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, enum bch_data_type data_type, struct closure *cl) { - struct bch_dev_usage usage; struct open_bucket *ob; + struct alloc_request req = { + .watermark = watermark, + .data_type = data_type, + .ca = ca, + }; bch2_trans_do(c, - PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark, - data_type, cl, false, &usage))); + PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, &req, cl, false))); return ob; } @@ -693,24 +685,20 @@ void bch2_dev_stripe_increment(struct bch_dev *ca, } static int add_new_bucket(struct bch_fs *c, - struct open_buckets *ptrs, - struct bch_devs_mask *devs_may_alloc, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - struct open_bucket *ob) + struct alloc_request *req, + struct open_bucket *ob) { unsigned durability = ob_dev(c, ob)->mi.durability; - BUG_ON(*nr_effective >= nr_replicas); + BUG_ON(req->nr_effective >= req->nr_replicas); - __clear_bit(ob->dev, devs_may_alloc->d); - *nr_effective += durability; - *have_cache |= !durability; + __clear_bit(ob->dev, req->devs_may_alloc.d); + req->nr_effective += durability; + req->have_cache |= !durability; - ob_push(c, ptrs, ob); + ob_push(c, &req->ptrs, ob); - if (*nr_effective >= nr_replicas) + if (req->nr_effective >= req->nr_replicas) return 1; if (ob->ec) return 1; @@ -718,39 +706,31 @@ static int add_new_bucket(struct bch_fs *c, } int bch2_bucket_alloc_set_trans(struct btree_trans *trans, - struct open_buckets *ptrs, - struct dev_stripe_state *stripe, - struct bch_devs_mask *devs_may_alloc, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - enum bch_write_flags flags, - enum bch_data_type data_type, - enum bch_watermark watermark, - struct closure *cl) + struct alloc_request *req, + struct dev_stripe_state *stripe, + struct closure *cl) { struct bch_fs *c = trans->c; int ret = -BCH_ERR_insufficient_devices; - BUG_ON(*nr_effective >= nr_replicas); + BUG_ON(req->nr_effective >= req->nr_replicas); - struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc); + struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, &req->devs_may_alloc); darray_for_each(devs_sorted, i) { - struct bch_dev *ca = bch2_dev_tryget_noerror(c, *i); - if (!ca) + req->ca = bch2_dev_tryget_noerror(c, *i); + if (!req->ca) continue; - if (!ca->mi.durability && *have_cache) { - bch2_dev_put(ca); + if (!req->ca->mi.durability && req->have_cache) { + bch2_dev_put(req->ca); continue; } - struct bch_dev_usage usage; - struct open_bucket *ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, - cl, flags & BCH_WRITE_alloc_nowait, &usage); + struct open_bucket *ob = bch2_bucket_alloc_trans(trans, req, cl, + req->flags & BCH_WRITE_alloc_nowait); if (!IS_ERR(ob)) - bch2_dev_stripe_increment_inlined(ca, stripe, &usage); - bch2_dev_put(ca); + bch2_dev_stripe_increment_inlined(req->ca, stripe, &req->usage); + bch2_dev_put(req->ca); if (IS_ERR(ob)) { ret = PTR_ERR(ob); @@ -759,9 +739,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, continue; } - if (add_new_bucket(c, ptrs, devs_may_alloc, - nr_replicas, nr_effective, - have_cache, ob)) { + if (add_new_bucket(c, req, ob)) { ret = 0; break; } @@ -779,34 +757,27 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, */ static int bucket_alloc_from_stripe(struct btree_trans *trans, - struct open_buckets *ptrs, - struct write_point *wp, - struct bch_devs_mask *devs_may_alloc, - u16 target, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - enum bch_watermark watermark, - enum bch_write_flags flags, - struct closure *cl) + struct alloc_request *req, + struct closure *cl) { struct bch_fs *c = trans->c; int ret = 0; - if (nr_replicas < 2) + if (req->nr_replicas < 2) return 0; - if (ec_open_bucket(c, ptrs)) + if (ec_open_bucket(c, &req->ptrs)) return 0; struct ec_stripe_head *h = - bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl); + bch2_ec_stripe_head_get(trans, req, 0, cl); if (IS_ERR(h)) return PTR_ERR(h); if (!h) return 0; - struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc); + struct dev_alloc_list devs_sorted = + bch2_dev_alloc_list(c, &req->wp->stripe, &req->devs_may_alloc); darray_for_each(devs_sorted, i) for (unsigned ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) { if (!h->s->blocks[ec_idx]) @@ -818,9 +789,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, ob->ec = h->s; ec_stripe_new_get(h->s, STRIPE_REF_io); - ret = add_new_bucket(c, ptrs, devs_may_alloc, - nr_replicas, nr_effective, - have_cache, ob); + ret = add_new_bucket(c, req, ob); goto out; } } @@ -832,65 +801,49 @@ out: /* Sector allocator */ static bool want_bucket(struct bch_fs *c, - struct write_point *wp, - struct bch_devs_mask *devs_may_alloc, - bool *have_cache, bool ec, + struct alloc_request *req, struct open_bucket *ob) { struct bch_dev *ca = ob_dev(c, ob); - if (!test_bit(ob->dev, devs_may_alloc->d)) + if (!test_bit(ob->dev, req->devs_may_alloc.d)) return false; - if (ob->data_type != wp->data_type) + if (ob->data_type != req->wp->data_type) return false; if (!ca->mi.durability && - (wp->data_type == BCH_DATA_btree || ec || *have_cache)) + (req->wp->data_type == BCH_DATA_btree || req->ec || req->have_cache)) return false; - if (ec != (ob->ec != NULL)) + if (req->ec != (ob->ec != NULL)) return false; return true; } static int bucket_alloc_set_writepoint(struct bch_fs *c, - struct open_buckets *ptrs, - struct write_point *wp, - struct bch_devs_mask *devs_may_alloc, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - bool ec) + struct alloc_request *req) { - struct open_buckets ptrs_skip = { .nr = 0 }; struct open_bucket *ob; unsigned i; int ret = 0; - open_bucket_for_each(c, &wp->ptrs, ob, i) { - if (!ret && want_bucket(c, wp, devs_may_alloc, - have_cache, ec, ob)) - ret = add_new_bucket(c, ptrs, devs_may_alloc, - nr_replicas, nr_effective, - have_cache, ob); + req->scratch_ptrs.nr = 0; + + open_bucket_for_each(c, &req->wp->ptrs, ob, i) { + if (!ret && want_bucket(c, req, ob)) + ret = add_new_bucket(c, req, ob); else - ob_push(c, &ptrs_skip, ob); + ob_push(c, &req->scratch_ptrs, ob); } - wp->ptrs = ptrs_skip; + req->wp->ptrs = req->scratch_ptrs; return ret; } static int bucket_alloc_set_partial(struct bch_fs *c, - struct open_buckets *ptrs, - struct write_point *wp, - struct bch_devs_mask *devs_may_alloc, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, bool ec, - enum bch_watermark watermark) + struct alloc_request *req) { int i, ret = 0; @@ -905,13 +858,12 @@ static int bucket_alloc_set_partial(struct bch_fs *c, for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) { struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i]; - if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) { + if (want_bucket(c, req, ob)) { struct bch_dev *ca = ob_dev(c, ob); - struct bch_dev_usage usage; u64 avail; - bch2_dev_usage_read_fast(ca, &usage); - avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets; + bch2_dev_usage_read_fast(ca, &req->usage); + avail = dev_buckets_free(ca, req->usage, req->watermark) + ca->nr_partial_buckets; if (!avail) continue; @@ -924,9 +876,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c, bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; rcu_read_unlock(); - ret = add_new_bucket(c, ptrs, devs_may_alloc, - nr_replicas, nr_effective, - have_cache, ob); + ret = add_new_bucket(c, req, ob); if (ret) break; } @@ -937,61 +887,41 @@ unlock: } static int __open_bucket_add_buckets(struct btree_trans *trans, - struct open_buckets *ptrs, - struct write_point *wp, - struct bch_devs_list *devs_have, - u16 target, - bool erasure_code, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - enum bch_watermark watermark, - enum bch_write_flags flags, - struct closure *_cl) + struct alloc_request *req, + struct closure *_cl) { struct bch_fs *c = trans->c; - struct bch_devs_mask devs; struct open_bucket *ob; struct closure *cl = NULL; unsigned i; int ret; - devs = target_rw_devs(c, wp->data_type, target); + req->devs_may_alloc = target_rw_devs(c, req->wp->data_type, req->target); /* Don't allocate from devices we already have pointers to: */ - darray_for_each(*devs_have, i) - __clear_bit(*i, devs.d); + darray_for_each(*req->devs_have, i) + __clear_bit(*i, req->devs_may_alloc.d); - open_bucket_for_each(c, ptrs, ob, i) - __clear_bit(ob->dev, devs.d); + open_bucket_for_each(c, &req->ptrs, ob, i) + __clear_bit(ob->dev, req->devs_may_alloc.d); - ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs, - nr_replicas, nr_effective, - have_cache, erasure_code); + ret = bucket_alloc_set_writepoint(c, req); if (ret) return ret; - ret = bucket_alloc_set_partial(c, ptrs, wp, &devs, - nr_replicas, nr_effective, - have_cache, erasure_code, watermark); + ret = bucket_alloc_set_partial(c, req); if (ret) return ret; - if (erasure_code) { - ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs, - target, - nr_replicas, nr_effective, - have_cache, - watermark, flags, _cl); + if (req->ec) { + ret = bucket_alloc_from_stripe(trans, req, _cl); } else { retry_blocking: /* * Try nonblocking first, so that if one device is full we'll try from * other devices: */ - ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, - nr_replicas, nr_effective, have_cache, - flags, wp->data_type, watermark, cl); + ret = bch2_bucket_alloc_set_trans(trans, req, &req->wp->stripe, cl); if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart) && !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && @@ -1005,38 +935,27 @@ retry_blocking: } static int open_bucket_add_buckets(struct btree_trans *trans, - struct open_buckets *ptrs, - struct write_point *wp, - struct bch_devs_list *devs_have, - u16 target, - unsigned erasure_code, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - enum bch_watermark watermark, - enum bch_write_flags flags, - struct closure *cl) + struct alloc_request *req, + struct closure *cl) { int ret; - if (erasure_code && !ec_open_bucket(trans->c, ptrs)) { - ret = __open_bucket_add_buckets(trans, ptrs, wp, - devs_have, target, erasure_code, - nr_replicas, nr_effective, have_cache, - watermark, flags, cl); + if (req->ec && !ec_open_bucket(trans->c, &req->ptrs)) { + ret = __open_bucket_add_buckets(trans, req, cl); if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || bch2_err_matches(ret, BCH_ERR_operation_blocked) || bch2_err_matches(ret, BCH_ERR_freelist_empty) || bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) return ret; - if (*nr_effective >= nr_replicas) + if (req->nr_effective >= req->nr_replicas) return 0; } - ret = __open_bucket_add_buckets(trans, ptrs, wp, - devs_have, target, false, - nr_replicas, nr_effective, have_cache, - watermark, flags, cl); + bool ec = false; + swap(ec, req->ec); + ret = __open_bucket_add_buckets(trans, req, cl); + swap(ec, req->ec); + return ret < 0 ? ret : 0; } @@ -1289,26 +1208,26 @@ out: static noinline void deallocate_extra_replicas(struct bch_fs *c, - struct open_buckets *ptrs, - struct open_buckets *ptrs_no_use, - unsigned extra_replicas) + struct alloc_request *req) { - struct open_buckets ptrs2 = { 0 }; struct open_bucket *ob; + unsigned extra_replicas = req->nr_effective - req->nr_replicas; unsigned i; - open_bucket_for_each(c, ptrs, ob, i) { + req->scratch_ptrs.nr = 0; + + open_bucket_for_each(c, &req->ptrs, ob, i) { unsigned d = ob_dev(c, ob)->mi.durability; if (d && d <= extra_replicas) { extra_replicas -= d; - ob_push(c, ptrs_no_use, ob); + ob_push(c, &req->wp->ptrs, ob); } else { - ob_push(c, &ptrs2, ob); + ob_push(c, &req->scratch_ptrs, ob); } } - *ptrs = ptrs2; + req->ptrs = req->scratch_ptrs; } /* @@ -1327,51 +1246,53 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, struct write_point **wp_ret) { struct bch_fs *c = trans->c; - struct write_point *wp; struct open_bucket *ob; - struct open_buckets ptrs; - unsigned nr_effective, write_points_nr; - bool have_cache; - int ret; + unsigned write_points_nr; int i; + struct alloc_request *req = bch2_trans_kmalloc_nomemzero(trans, sizeof(*req)); + int ret = PTR_ERR_OR_ZERO(req); + if (unlikely(ret)) + return ret; + if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING)) erasure_code = false; + req->nr_replicas = nr_replicas; + req->target = target; + req->ec = erasure_code; + req->watermark = watermark; + req->flags = flags; + req->devs_have = devs_have; + BUG_ON(!nr_replicas || !nr_replicas_required); retry: - ptrs.nr = 0; - nr_effective = 0; - write_points_nr = c->write_points_nr; - have_cache = false; + req->ptrs.nr = 0; + req->nr_effective = 0; + req->have_cache = false; + write_points_nr = c->write_points_nr; + + *wp_ret = req->wp = writepoint_find(trans, write_point.v); - *wp_ret = wp = writepoint_find(trans, write_point.v); + req->data_type = req->wp->data_type; ret = bch2_trans_relock(trans); if (ret) goto err; /* metadata may not allocate on cache devices: */ - if (wp->data_type != BCH_DATA_user) - have_cache = true; + if (req->data_type != BCH_DATA_user) + req->have_cache = true; if (target && !(flags & BCH_WRITE_only_specified_devs)) { - ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, - target, erasure_code, - nr_replicas, &nr_effective, - &have_cache, watermark, - flags, NULL); + ret = open_bucket_add_buckets(trans, req, NULL); if (!ret || bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto alloc_done; /* Don't retry from all devices if we're out of open buckets: */ if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) { - int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, - target, erasure_code, - nr_replicas, &nr_effective, - &have_cache, watermark, - flags, cl); + int ret2 = open_bucket_add_buckets(trans, req, cl); if (!ret2 || bch2_err_matches(ret2, BCH_ERR_transaction_restart) || bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) { @@ -1384,45 +1305,38 @@ retry: * Only try to allocate cache (durability = 0 devices) from the * specified target: */ - have_cache = true; + req->have_cache = true; + req->target = 0; - ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, - 0, erasure_code, - nr_replicas, &nr_effective, - &have_cache, watermark, - flags, cl); + ret = open_bucket_add_buckets(trans, req, cl); } else { - ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, - target, erasure_code, - nr_replicas, &nr_effective, - &have_cache, watermark, - flags, cl); + ret = open_bucket_add_buckets(trans, req, cl); } alloc_done: - BUG_ON(!ret && nr_effective < nr_replicas); + BUG_ON(!ret && req->nr_effective < req->nr_replicas); - if (erasure_code && !ec_open_bucket(c, &ptrs)) + if (erasure_code && !ec_open_bucket(c, &req->ptrs)) pr_debug("failed to get ec bucket: ret %u", ret); if (ret == -BCH_ERR_insufficient_devices && - nr_effective >= nr_replicas_required) + req->nr_effective >= nr_replicas_required) ret = 0; if (ret) goto err; - if (nr_effective > nr_replicas) - deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas); + if (req->nr_effective > req->nr_replicas) + deallocate_extra_replicas(c, req); /* Free buckets we didn't use: */ - open_bucket_for_each(c, &wp->ptrs, ob, i) + open_bucket_for_each(c, &req->wp->ptrs, ob, i) open_bucket_free_unused(c, ob); - wp->ptrs = ptrs; + req->wp->ptrs = req->ptrs; - wp->sectors_free = UINT_MAX; + req->wp->sectors_free = UINT_MAX; - open_bucket_for_each(c, &wp->ptrs, ob, i) { + open_bucket_for_each(c, &req->wp->ptrs, ob, i) { /* * Ensure proper write alignment - either due to misaligned * bucket sizes (from buggy bcachefs-tools), or writes that mix @@ -1436,29 +1350,29 @@ alloc_done: ob->sectors_free = max_t(int, 0, ob->sectors_free - align); - wp->sectors_free = min(wp->sectors_free, ob->sectors_free); + req->wp->sectors_free = min(req->wp->sectors_free, ob->sectors_free); } - wp->sectors_free = rounddown(wp->sectors_free, block_sectors(c)); + req->wp->sectors_free = rounddown(req->wp->sectors_free, block_sectors(c)); /* Did alignment use up space in an open_bucket? */ - if (unlikely(!wp->sectors_free)) { - bch2_alloc_sectors_done(c, wp); + if (unlikely(!req->wp->sectors_free)) { + bch2_alloc_sectors_done(c, req->wp); goto retry; } - BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX); + BUG_ON(!req->wp->sectors_free || req->wp->sectors_free == UINT_MAX); return 0; err: - open_bucket_for_each(c, &wp->ptrs, ob, i) - if (ptrs.nr < ARRAY_SIZE(ptrs.v)) - ob_push(c, &ptrs, ob); + open_bucket_for_each(c, &req->wp->ptrs, ob, i) + if (req->ptrs.nr < ARRAY_SIZE(req->ptrs.v)) + ob_push(c, &req->ptrs, ob); else open_bucket_free_unused(c, ob); - wp->ptrs = ptrs; + req->wp->ptrs = req->ptrs; - mutex_unlock(&wp->lock); + mutex_unlock(&req->wp->lock); if (bch2_err_matches(ret, BCH_ERR_freelist_empty) && try_decrease_writepoints(trans, write_points_nr)) @@ -1474,20 +1388,6 @@ err: return ret; } -struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob) -{ - struct bch_dev *ca = ob_dev(c, ob); - - return (struct bch_extent_ptr) { - .type = 1 << BCH_EXTENT_ENTRY_ptr, - .gen = ob->gen, - .dev = ob->dev, - .offset = bucket_to_sector(ca, ob->bucket) + - ca->mi.bucket_size - - ob->sectors_free, - }; -} - void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, struct bkey_i *k, unsigned sectors, bool cached) @@ -1617,6 +1517,8 @@ static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob; unsigned i; + mutex_lock(&wp->lock); + prt_printf(out, "%lu: ", wp->write_point); prt_human_readable_u64(out, wp->sectors_allocated << 9); @@ -1634,6 +1536,8 @@ static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c, open_bucket_for_each(c, &wp->ptrs, ob, i) bch2_open_bucket_to_text(out, c, ob); printbuf_indent_sub(out, 2); + + mutex_unlock(&wp->lock); } void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c) @@ -1731,7 +1635,12 @@ static noinline void bch2_print_allocator_stuck(struct bch_fs *c) printbuf_indent_sub(&buf, 2); prt_newline(&buf); - for_each_online_member(c, ca) { + bch2_printbuf_make_room(&buf, 4096); + + rcu_read_lock(); + buf.atomic++; + + for_each_online_member_rcu(c, ca) { prt_printf(&buf, "Dev %u:\n", ca->dev_idx); printbuf_indent_add(&buf, 2); bch2_dev_alloc_debug_to_text(&buf, ca); @@ -1739,6 +1648,9 @@ static noinline void bch2_print_allocator_stuck(struct bch_fs *c) prt_newline(&buf); } + --buf.atomic; + rcu_read_unlock(); + prt_printf(&buf, "Copygc debug:\n"); printbuf_indent_add(&buf, 2); bch2_copygc_wait_to_text(&buf, c); @@ -1750,7 +1662,7 @@ static noinline void bch2_print_allocator_stuck(struct bch_fs *c) bch2_journal_debug_to_text(&buf, &c->journal); printbuf_indent_sub(&buf, 2); - bch2_print_string_as_lines(KERN_ERR, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); } diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h index 4c1e33cf57c0..2e01c7b61ed1 100644 --- a/fs/bcachefs/alloc_foreground.h +++ b/fs/bcachefs/alloc_foreground.h @@ -3,8 +3,10 @@ #define _BCACHEFS_ALLOC_FOREGROUND_H #include "bcachefs.h" +#include "buckets.h" #include "alloc_types.h" #include "extents.h" +#include "io_write_types.h" #include "sb-members.h" #include <linux/hash.h> @@ -23,6 +25,52 @@ struct dev_alloc_list { u8 data[BCH_SB_MEMBERS_MAX]; }; +struct alloc_request { + unsigned nr_replicas; + unsigned target; + bool ec; + enum bch_watermark watermark; + enum bch_write_flags flags; + enum bch_data_type data_type; + struct bch_devs_list *devs_have; + struct write_point *wp; + + /* These fields are used primarily by open_bucket_add_buckets */ + struct open_buckets ptrs; + unsigned nr_effective; /* sum of @ptrs durability */ + bool have_cache; /* have we allocated from a 0 durability dev */ + struct bch_devs_mask devs_may_alloc; + + /* bch2_bucket_alloc_set_trans(): */ + struct bch_dev_usage usage; + + /* bch2_bucket_alloc_trans(): */ + struct bch_dev *ca; + + enum { + BTREE_BITMAP_NO, + BTREE_BITMAP_YES, + BTREE_BITMAP_ANY, + } btree_bitmap; + + struct { + u64 buckets_seen; + u64 skipped_open; + u64 skipped_need_journal_commit; + u64 need_journal_commit; + u64 skipped_nocow; + u64 skipped_nouse; + u64 skipped_mi_btree_bitmap; + } counters; + + unsigned scratch_nr_replicas; + unsigned scratch_nr_effective; + bool scratch_have_cache; + enum bch_data_type scratch_data_type; + struct open_buckets scratch_ptrs; + struct bch_devs_mask scratch_devs_may_alloc; +}; + struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *, struct dev_stripe_state *, struct bch_devs_mask *); @@ -173,11 +221,8 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 } enum bch_write_flags; -int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *, - struct dev_stripe_state *, struct bch_devs_mask *, - unsigned, unsigned *, bool *, enum bch_write_flags, - enum bch_data_type, enum bch_watermark, - struct closure *); +int bch2_bucket_alloc_set_trans(struct btree_trans *, struct alloc_request *, + struct dev_stripe_state *, struct closure *); int bch2_alloc_sectors_start_trans(struct btree_trans *, unsigned, unsigned, @@ -189,7 +234,19 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *, struct closure *, struct write_point **); -struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *); +static inline struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob) +{ + struct bch_dev *ca = ob_dev(c, ob); + + return (struct bch_extent_ptr) { + .type = 1 << BCH_EXTENT_ENTRY_ptr, + .gen = ob->gen, + .dev = ob->dev, + .offset = bucket_to_sector(ca, ob->bucket) + + ca->mi.bucket_size - + ob->sectors_free, + }; +} /* * Append pointers to the space we just allocated to @k, and mark @sectors space diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h index 8f79f46c2a78..e7becdf22cba 100644 --- a/fs/bcachefs/alloc_types.h +++ b/fs/bcachefs/alloc_types.h @@ -8,22 +8,6 @@ #include "clock_types.h" #include "fifo.h" -struct bucket_alloc_state { - enum { - BTREE_BITMAP_NO, - BTREE_BITMAP_YES, - BTREE_BITMAP_ANY, - } btree_bitmap; - - u64 buckets_seen; - u64 skipped_open; - u64 skipped_need_journal_commit; - u64 need_journal_commit; - u64 skipped_nocow; - u64 skipped_nouse; - u64 skipped_mi_btree_bitmap; -}; - #define BCH_WATERMARKS() \ x(stripe) \ x(normal) \ diff --git a/fs/bcachefs/async_objs.c b/fs/bcachefs/async_objs.c new file mode 100644 index 000000000000..a7cd1f0f0964 --- /dev/null +++ b/fs/bcachefs/async_objs.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Async obj debugging: keep asynchronous objects on (very fast) lists, make + * them visibile in debugfs: + */ + +#include "bcachefs.h" +#include "async_objs.h" +#include "btree_io.h" +#include "debug.h" +#include "io_read.h" +#include "io_write.h" + +#include <linux/debugfs.h> + +static void promote_obj_to_text(struct printbuf *out, void *obj) +{ + bch2_promote_op_to_text(out, obj); +} + +static void rbio_obj_to_text(struct printbuf *out, void *obj) +{ + bch2_read_bio_to_text(out, obj); +} + +static void write_op_obj_to_text(struct printbuf *out, void *obj) +{ + bch2_write_op_to_text(out, obj); +} + +static void btree_read_bio_obj_to_text(struct printbuf *out, void *obj) +{ + struct btree_read_bio *rbio = obj; + bch2_btree_read_bio_to_text(out, rbio); +} + +static void btree_write_bio_obj_to_text(struct printbuf *out, void *obj) +{ + struct btree_write_bio *wbio = obj; + bch2_bio_to_text(out, &wbio->wbio.bio); +} + +static int bch2_async_obj_list_open(struct inode *inode, struct file *file) +{ + struct async_obj_list *list = inode->i_private; + struct dump_iter *i; + + i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL); + if (!i) + return -ENOMEM; + + file->private_data = i; + i->from = POS_MIN; + i->iter = 0; + i->c = container_of(list, struct bch_fs, async_objs[list->idx]); + i->list = list; + i->buf = PRINTBUF; + return 0; +} + +static ssize_t bch2_async_obj_list_read(struct file *file, char __user *buf, + size_t size, loff_t *ppos) +{ + struct dump_iter *i = file->private_data; + struct async_obj_list *list = i->list; + ssize_t ret = 0; + + i->ubuf = buf; + i->size = size; + i->ret = 0; + + struct genradix_iter iter; + void *obj; + fast_list_for_each_from(&list->list, iter, obj, i->iter) { + ret = bch2_debugfs_flush_buf(i); + if (ret) + return ret; + + if (!i->size) + break; + + list->obj_to_text(&i->buf, obj); + } + + if (i->buf.allocation_failure) + ret = -ENOMEM; + else + i->iter = iter.pos; + + if (!ret) + ret = bch2_debugfs_flush_buf(i); + + return ret ?: i->ret; +} + +static const struct file_operations async_obj_ops = { + .owner = THIS_MODULE, + .open = bch2_async_obj_list_open, + .release = bch2_dump_release, + .read = bch2_async_obj_list_read, +}; + +void bch2_fs_async_obj_debugfs_init(struct bch_fs *c) +{ + c->async_obj_dir = debugfs_create_dir("async_objs", c->fs_debug_dir); + +#define x(n) debugfs_create_file(#n, 0400, c->async_obj_dir, \ + &c->async_objs[BCH_ASYNC_OBJ_LIST_##n], &async_obj_ops); + BCH_ASYNC_OBJ_LISTS() +#undef x +} + +void bch2_fs_async_obj_exit(struct bch_fs *c) +{ + for (unsigned i = 0; i < ARRAY_SIZE(c->async_objs); i++) + fast_list_exit(&c->async_objs[i].list); +} + +int bch2_fs_async_obj_init(struct bch_fs *c) +{ + for (unsigned i = 0; i < ARRAY_SIZE(c->async_objs); i++) { + if (fast_list_init(&c->async_objs[i].list)) + return -BCH_ERR_ENOMEM_async_obj_init; + c->async_objs[i].idx = i; + } + +#define x(n) c->async_objs[BCH_ASYNC_OBJ_LIST_##n].obj_to_text = n##_obj_to_text; + BCH_ASYNC_OBJ_LISTS() +#undef x + + return 0; +} diff --git a/fs/bcachefs/async_objs.h b/fs/bcachefs/async_objs.h new file mode 100644 index 000000000000..cd6489b8cf76 --- /dev/null +++ b/fs/bcachefs/async_objs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_ASYNC_OBJS_H +#define _BCACHEFS_ASYNC_OBJS_H + +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS +static inline void __async_object_list_del(struct fast_list *head, unsigned idx) +{ + fast_list_remove(head, idx); +} + +static inline int __async_object_list_add(struct fast_list *head, void *obj, unsigned *idx) +{ + int ret = fast_list_add(head, obj); + *idx = ret > 0 ? ret : 0; + return ret < 0 ? ret : 0; +} + +#define async_object_list_del(_c, _list, idx) \ + __async_object_list_del(&(_c)->async_objs[BCH_ASYNC_OBJ_LIST_##_list].list, idx) + +#define async_object_list_add(_c, _list, obj, idx) \ + __async_object_list_add(&(_c)->async_objs[BCH_ASYNC_OBJ_LIST_##_list].list, obj, idx) + +void bch2_fs_async_obj_debugfs_init(struct bch_fs *); +void bch2_fs_async_obj_exit(struct bch_fs *); +int bch2_fs_async_obj_init(struct bch_fs *); + +#else /* CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS */ + +#define async_object_list_del(_c, _n, idx) do {} while (0) + +static inline int __async_object_list_add(void) +{ + return 0; +} +#define async_object_list_add(_c, _n, obj, idx) __async_object_list_add() + +static inline void bch2_fs_async_obj_debugfs_init(struct bch_fs *c) {} +static inline void bch2_fs_async_obj_exit(struct bch_fs *c) {} +static inline int bch2_fs_async_obj_init(struct bch_fs *c) { return 0; } + +#endif /* CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS */ + +#endif /* _BCACHEFS_ASYNC_OBJS_H */ diff --git a/fs/bcachefs/async_objs_types.h b/fs/bcachefs/async_objs_types.h new file mode 100644 index 000000000000..8d713c0f5841 --- /dev/null +++ b/fs/bcachefs/async_objs_types.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_ASYNC_OBJS_TYPES_H +#define _BCACHEFS_ASYNC_OBJS_TYPES_H + +#define BCH_ASYNC_OBJ_LISTS() \ + x(promote) \ + x(rbio) \ + x(write_op) \ + x(btree_read_bio) \ + x(btree_write_bio) + +enum bch_async_obj_lists { +#define x(n) BCH_ASYNC_OBJ_LIST_##n, + BCH_ASYNC_OBJ_LISTS() +#undef x + BCH_ASYNC_OBJ_NR +}; + +struct async_obj_list { + struct fast_list list; + void (*obj_to_text)(struct printbuf *, void *); + unsigned idx; +}; + +#endif /* _BCACHEFS_ASYNC_OBJS_TYPES_H */ diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index 5f195d2280a4..cde7dd115267 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -12,9 +12,20 @@ #include "disk_accounting.h" #include "error.h" #include "progress.h" +#include "recovery_passes.h" #include <linux/mm.h> +static int bch2_bucket_bitmap_set(struct bch_dev *, struct bucket_bitmap *, u64); + +static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp) +{ + return (struct bbpos) { + .btree = bp.btree_id, + .pos = bp.pos, + }; +} + int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, struct bkey_validate_context from) { @@ -96,6 +107,8 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; + bool will_check = c->recovery.passes_to_run & + BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers); int ret = 0; if (insert) { @@ -110,9 +123,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, prt_printf(&buf, "for "); bch2_bkey_val_to_text(&buf, c, orig_k); - - bch_err(c, "%s", buf.buf); - } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) { + } else if (!will_check) { prt_printf(&buf, "backpointer not found when deleting\n"); printbuf_indent_add(&buf, 2); @@ -128,8 +139,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, bch2_bkey_val_to_text(&buf, c, orig_k); } - if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers && - __bch2_inconsistent_error(c, &buf)) + if (!will_check && __bch2_inconsistent_error(c, &buf)) ret = -BCH_ERR_erofs_unfixed_errors; bch_err(c, "%s", buf.buf); @@ -174,7 +184,7 @@ err: static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos) { - return (likely(!bch2_backpointers_no_use_write_buffer) + return (!static_branch_unlikely(&bch2_backpointers_no_use_write_buffer) ? bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, pos) : bch2_btree_delete(trans, BTREE_ID_backpointers, pos, 0)) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); @@ -184,7 +194,7 @@ static inline int bch2_backpointers_maybe_flush(struct btree_trans *trans, struct bkey_s_c visiting_k, struct bkey_buf *last_flushed) { - return likely(!bch2_backpointers_no_use_write_buffer) + return !static_branch_unlikely(&bch2_backpointers_no_use_write_buffer) ? bch2_btree_write_buffer_maybe_flush(trans, visiting_k, last_flushed) : 0; } @@ -478,7 +488,8 @@ found: bytes = p.crc.compressed_size << 9; - struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ); + struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ, + BCH_DEV_READ_REF_check_extent_checksums); if (!ca) return false; @@ -515,7 +526,8 @@ err: if (bio) bio_put(bio); kvfree(data_buf); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_check_extent_checksums); printbuf_exit(&buf); return ret; } @@ -669,22 +681,33 @@ static int check_extent_to_backpointers(struct btree_trans *trans, rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev); - bool check = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_mismatches); - bool empty = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_empty); + if (!ca) { + rcu_read_unlock(); + continue; + } + + if (p.ptr.cached && dev_ptr_stale_rcu(ca, &p.ptr)) { + rcu_read_unlock(); + continue; + } + + u64 b = PTR_BUCKET_NR(ca, &p.ptr); + if (!bch2_bucket_bitmap_test(&ca->bucket_backpointer_mismatch, b)) { + rcu_read_unlock(); + continue; + } - bool stale = p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)); + bool empty = bch2_bucket_bitmap_test(&ca->bucket_backpointer_empty, b); rcu_read_unlock(); - if ((check || empty) && !stale) { - struct bkey_i_backpointer bp; - bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bp); + struct bkey_i_backpointer bp; + bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bp); - int ret = check - ? check_bp_exists(trans, s, &bp, k) - : bch2_bucket_backpointer_mod(trans, k, &bp, true); - if (ret) - return ret; - } + int ret = !empty + ? check_bp_exists(trans, s, &bp, k) + : bch2_bucket_backpointer_mod(trans, k, &bp, true); + if (ret) + return ret; } return 0; @@ -722,14 +745,6 @@ err: return ret; } -static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp) -{ - return (struct bbpos) { - .btree = bp.btree_id, - .pos = bp.pos, - }; -} - static u64 mem_may_pin_bytes(struct bch_fs *c) { struct sysinfo i; @@ -788,6 +803,13 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans, return ret; } +static inline int bch2_fs_going_ro(struct bch_fs *c) +{ + return test_bit(BCH_FS_going_ro, &c->flags) + ? -EROFS + : 0; +} + static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, struct extents_to_bp_state *s) { @@ -815,6 +837,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, ret = for_each_btree_key_continue(trans, iter, 0, k, ({ bch2_progress_update_iter(trans, &progress, &iter, "extents_to_backpointers"); + bch2_fs_going_ro(c) ?: check_extent_to_backpointers(trans, s, btree_id, level, k) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); })); @@ -854,6 +877,7 @@ static int data_type_to_alloc_counter(enum bch_data_type t) static int check_bucket_backpointers_to_extents(struct btree_trans *, struct bch_dev *, struct bpos); static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct bkey_s_c alloc_k, + bool *had_mismatch, struct bkey_buf *last_flushed) { struct bch_fs *c = trans->c; @@ -861,6 +885,8 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); bool need_commit = false; + *had_mismatch = false; + if (a->data_type == BCH_DATA_sb || a->data_type == BCH_DATA_journal || a->data_type == BCH_DATA_parity) @@ -931,12 +957,18 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b goto err; } - if (!sectors[ALLOC_dirty] && - !sectors[ALLOC_stripe] && - !sectors[ALLOC_cached]) - __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_empty); - else - __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_mismatches); + bool empty = (sectors[ALLOC_dirty] + + sectors[ALLOC_stripe] + + sectors[ALLOC_cached]) == 0; + + ret = bch2_bucket_bitmap_set(ca, &ca->bucket_backpointer_mismatch, + alloc_k.k->p.offset) ?: + (empty + ? bch2_bucket_bitmap_set(ca, &ca->bucket_backpointer_empty, + alloc_k.k->p.offset) + : 0); + + *had_mismatch = true; } err: bch2_dev_put(ca); @@ -960,8 +992,14 @@ static bool backpointer_node_has_missing(struct bch_fs *c, struct bkey_s_c k) goto next; struct bpos bucket = bp_pos_to_bucket(ca, pos); - bucket.offset = find_next_bit(ca->bucket_backpointer_mismatches, - ca->mi.nbuckets, bucket.offset); + u64 next = ca->mi.nbuckets; + + unsigned long *bitmap = READ_ONCE(ca->bucket_backpointer_mismatch.buckets); + if (bitmap) + next = min_t(u64, next, + find_next_bit(bitmap, ca->mi.nbuckets, bucket.offset)); + + bucket.offset = next; if (bucket.offset == ca->mi.nbuckets) goto next; @@ -1070,28 +1108,6 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) { int ret = 0; - /* - * Can't allow devices to come/go/resize while we have bucket bitmaps - * allocated - */ - down_read(&c->state_lock); - - for_each_member_device(c, ca) { - BUG_ON(ca->bucket_backpointer_mismatches); - ca->bucket_backpointer_mismatches = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets), - sizeof(unsigned long), - GFP_KERNEL); - ca->bucket_backpointer_empty = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets), - sizeof(unsigned long), - GFP_KERNEL); - if (!ca->bucket_backpointer_mismatches || - !ca->bucket_backpointer_empty) { - bch2_dev_put(ca); - ret = -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap; - goto err_free_bitmaps; - } - } - struct btree_trans *trans = bch2_trans_get(c); struct extents_to_bp_state s = { .bp_start = POS_MIN }; @@ -1100,23 +1116,24 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch, k, ({ - check_bucket_backpointer_mismatch(trans, k, &s.last_flushed); + bool had_mismatch; + bch2_fs_going_ro(c) ?: + check_bucket_backpointer_mismatch(trans, k, &had_mismatch, &s.last_flushed); })); if (ret) goto err; - u64 nr_buckets = 0, nr_mismatches = 0, nr_empty = 0; + u64 nr_buckets = 0, nr_mismatches = 0; for_each_member_device(c, ca) { nr_buckets += ca->mi.nbuckets; - nr_mismatches += bitmap_weight(ca->bucket_backpointer_mismatches, ca->mi.nbuckets); - nr_empty += bitmap_weight(ca->bucket_backpointer_empty, ca->mi.nbuckets); + nr_mismatches += ca->bucket_backpointer_mismatch.nr; } - if (!nr_mismatches && !nr_empty) + if (!nr_mismatches) goto err; bch_info(c, "scanning for missing backpointers in %llu/%llu buckets", - nr_mismatches + nr_empty, nr_buckets); + nr_mismatches, nr_buckets); while (1) { ret = bch2_pin_backpointer_nodes_with_missing(trans, s.bp_start, &s.bp_end); @@ -1147,23 +1164,71 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) s.bp_start = bpos_successor(s.bp_end); } + + for_each_member_device(c, ca) { + bch2_bucket_bitmap_free(&ca->bucket_backpointer_mismatch); + bch2_bucket_bitmap_free(&ca->bucket_backpointer_empty); + } err: bch2_trans_put(trans); bch2_bkey_buf_exit(&s.last_flushed, c); bch2_btree_cache_unpin(c); -err_free_bitmaps: - for_each_member_device(c, ca) { - kvfree(ca->bucket_backpointer_empty); - ca->bucket_backpointer_empty = NULL; - kvfree(ca->bucket_backpointer_mismatches); - ca->bucket_backpointer_mismatches = NULL; - } - up_read(&c->state_lock); bch_err_fn(c, ret); return ret; } +static int check_bucket_backpointer_pos_mismatch(struct btree_trans *trans, + struct bpos bucket, + bool *had_mismatch, + struct bkey_buf *last_flushed) +{ + struct btree_iter alloc_iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &alloc_iter, + BTREE_ID_alloc, bucket, + BTREE_ITER_cached); + int ret = bkey_err(k); + if (ret) + return ret; + + ret = check_bucket_backpointer_mismatch(trans, k, had_mismatch, last_flushed); + bch2_trans_iter_exit(trans, &alloc_iter); + return ret; +} + +int bch2_check_bucket_backpointer_mismatch(struct btree_trans *trans, + struct bch_dev *ca, u64 bucket, + bool copygc, + struct bkey_buf *last_flushed) +{ + struct bch_fs *c = trans->c; + bool had_mismatch; + int ret = lockrestart_do(trans, + check_bucket_backpointer_pos_mismatch(trans, POS(ca->dev_idx, bucket), + &had_mismatch, last_flushed)); + if (ret || !had_mismatch) + return ret; + + u64 nr = ca->bucket_backpointer_mismatch.nr; + u64 allowed = copygc ? ca->mi.nbuckets >> 7 : 0; + + struct printbuf buf = PRINTBUF; + __bch2_log_msg_start(ca->name, &buf); + + prt_printf(&buf, "Detected missing backpointers in bucket %llu, now have %llu/%llu with missing\n", + bucket, nr, ca->mi.nbuckets); + + bch2_run_explicit_recovery_pass(c, &buf, + BCH_RECOVERY_PASS_check_extents_to_backpointers, + nr < allowed ? RUN_RECOVERY_PASS_ratelimit : 0); + + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + return 0; +} + +/* backpointers -> extents */ + static int check_one_backpointer(struct btree_trans *trans, struct bbpos start, struct bbpos end, @@ -1279,3 +1344,48 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c) bch_err_fn(c, ret); return ret; } + +static int bch2_bucket_bitmap_set(struct bch_dev *ca, struct bucket_bitmap *b, u64 bit) +{ + scoped_guard(mutex, &b->lock) { + if (!b->buckets) { + b->buckets = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets), + sizeof(unsigned long), GFP_KERNEL); + if (!b->buckets) + return -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap; + } + + b->nr += !__test_and_set_bit(bit, b->buckets); + } + + return 0; +} + +int bch2_bucket_bitmap_resize(struct bucket_bitmap *b, u64 old_size, u64 new_size) +{ + scoped_guard(mutex, &b->lock) { + if (!b->buckets) + return 0; + + unsigned long *n = kvcalloc(BITS_TO_LONGS(new_size), + sizeof(unsigned long), GFP_KERNEL); + if (!n) + return -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap; + + memcpy(n, b->buckets, + BITS_TO_LONGS(min(old_size, new_size)) * sizeof(unsigned long)); + kvfree(b->buckets); + b->buckets = n; + } + + return 0; +} + +void bch2_bucket_bitmap_free(struct bucket_bitmap *b) +{ + mutex_lock(&b->lock); + kvfree(b->buckets); + b->buckets = NULL; + b->nr = 0; + mutex_unlock(&b->lock); +} diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h index 16575dbc5736..6840561084ce 100644 --- a/fs/bcachefs/backpointers.h +++ b/fs/bcachefs/backpointers.h @@ -102,7 +102,7 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans, struct bkey_i_backpointer *bp, bool insert) { - if (unlikely(bch2_backpointers_no_use_write_buffer)) + if (static_branch_unlikely(&bch2_backpointers_no_use_write_buffer)) return bch2_bucket_backpointer_mod_nowritebuffer(trans, orig_k, bp, insert); if (!insert) { @@ -182,8 +182,20 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct bkey_s_c_b struct btree *bch2_backpointer_get_node(struct btree_trans *, struct bkey_s_c_backpointer, struct btree_iter *, struct bkey_buf *); +int bch2_check_bucket_backpointer_mismatch(struct btree_trans *, struct bch_dev *, u64, + bool, struct bkey_buf *); + int bch2_check_btree_backpointers(struct bch_fs *); int bch2_check_extents_to_backpointers(struct bch_fs *); int bch2_check_backpointers_to_extents(struct bch_fs *); +static inline bool bch2_bucket_bitmap_test(struct bucket_bitmap *b, u64 i) +{ + unsigned long *bitmap = READ_ONCE(b->buckets); + return bitmap && test_bit(i, bitmap); +} + +int bch2_bucket_bitmap_resize(struct bucket_bitmap *, u64, u64); +void bch2_bucket_bitmap_free(struct bucket_bitmap *); + #endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */ diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 75f7408da173..7824da2af9d0 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -209,17 +209,18 @@ #include "btree_journal_iter_types.h" #include "disk_accounting_types.h" #include "errcode.h" +#include "fast_list.h" #include "fifo.h" #include "nocow_locking_types.h" #include "opts.h" -#include "recovery_passes_types.h" #include "sb-errors_types.h" #include "seqmutex.h" +#include "snapshot_types.h" #include "time_stats.h" #include "util.h" #ifdef CONFIG_BCACHEFS_DEBUG -#define BCH_WRITE_REF_DEBUG +#define ENUMERATED_REF_DEBUG #endif #ifndef dynamic_fault @@ -269,7 +270,8 @@ do { \ #define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n") -void bch2_print_str(struct bch_fs *, const char *); +void bch2_print_str(struct bch_fs *, const char *, const char *); +void bch2_print_str_nonblocking(struct bch_fs *, const char *, const char *); __printf(2, 3) void bch2_print_opts(struct bch_opts *, const char *, ...); @@ -293,6 +295,16 @@ do { \ bch2_print(_c, __VA_ARGS__); \ } while (0) +#define bch2_print_str_ratelimited(_c, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + bch2_print_str(_c, __VA_ARGS__); \ +} while (0) + #define bch_info(c, fmt, ...) \ bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__) #define bch_info_ratelimited(c, fmt, ...) \ @@ -390,17 +402,20 @@ do { \ "compare them") \ BCH_DEBUG_PARAM(backpointers_no_use_write_buffer, \ "Don't use the write buffer for backpointers, enabling "\ - "extra runtime checks") - -/* Parameters that should only be compiled in debug mode: */ -#define BCH_DEBUG_PARAMS_DEBUG() \ - BCH_DEBUG_PARAM(expensive_debug_checks, \ - "Enables various runtime debugging checks that " \ - "significantly affect performance") \ + "extra runtime checks") \ + BCH_DEBUG_PARAM(debug_check_btree_locking, \ + "Enable additional asserts for btree locking") \ BCH_DEBUG_PARAM(debug_check_iterators, \ "Enables extra verification for btree iterators") \ + BCH_DEBUG_PARAM(debug_check_bset_lookups, \ + "Enables extra verification for bset lookups") \ BCH_DEBUG_PARAM(debug_check_btree_accounting, \ "Verify btree accounting for keys within a node") \ + BCH_DEBUG_PARAM(debug_check_bkey_unpack, \ + "Enables extra verification for bkey unpack") + +/* Parameters that should only be compiled in debug mode: */ +#define BCH_DEBUG_PARAMS_DEBUG() \ BCH_DEBUG_PARAM(journal_seq_verify, \ "Store the journal sequence number in the version " \ "number of every btree key, and verify that btree " \ @@ -427,22 +442,17 @@ do { \ #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS() #endif -#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name; -BCH_DEBUG_PARAMS() +#define BCH_DEBUG_PARAM(name, description) extern struct static_key_false bch2_##name; +BCH_DEBUG_PARAMS_ALL() #undef BCH_DEBUG_PARAM -#ifndef CONFIG_BCACHEFS_DEBUG -#define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name; -BCH_DEBUG_PARAMS_DEBUG() -#undef BCH_DEBUG_PARAM -#endif - #define BCH_TIME_STATS() \ x(btree_node_mem_alloc) \ x(btree_node_split) \ x(btree_node_compact) \ x(btree_node_merge) \ x(btree_node_sort) \ + x(btree_node_get) \ x(btree_node_read) \ x(btree_node_read_done) \ x(btree_node_write) \ @@ -450,6 +460,10 @@ BCH_DEBUG_PARAMS_DEBUG() x(btree_interior_update_total) \ x(btree_gc) \ x(data_write) \ + x(data_write_to_submit) \ + x(data_write_to_queue) \ + x(data_write_to_btree_update) \ + x(data_write_btree_update) \ x(data_read) \ x(data_promote) \ x(journal_flush_write) \ @@ -473,6 +487,7 @@ enum bch_time_stats { }; #include "alloc_types.h" +#include "async_objs_types.h" #include "btree_gc_types.h" #include "btree_types.h" #include "btree_node_scan_types.h" @@ -482,10 +497,12 @@ enum bch_time_stats { #include "clock_types.h" #include "disk_groups_types.h" #include "ec_types.h" +#include "enumerated_ref_types.h" #include "journal_types.h" #include "keylist_types.h" #include "quota_types.h" #include "rebalance_types.h" +#include "recovery_passes_types.h" #include "replicas_types.h" #include "sb-members_types.h" #include "subvolume_types.h" @@ -514,6 +531,57 @@ struct discard_in_flight { u64 bucket:63; }; +#define BCH_DEV_READ_REFS() \ + x(bch2_online_devs) \ + x(trans_mark_dev_sbs) \ + x(read_fua_test) \ + x(sb_field_resize) \ + x(write_super) \ + x(journal_read) \ + x(fs_journal_alloc) \ + x(fs_resize_on_mount) \ + x(btree_node_read) \ + x(btree_node_read_all_replicas) \ + x(btree_node_scrub) \ + x(btree_node_write) \ + x(btree_node_scan) \ + x(btree_verify_replicas) \ + x(btree_node_ondisk_to_text) \ + x(io_read) \ + x(check_extent_checksums) \ + x(ec_block) + +enum bch_dev_read_ref { +#define x(n) BCH_DEV_READ_REF_##n, + BCH_DEV_READ_REFS() +#undef x + BCH_DEV_READ_REF_NR, +}; + +#define BCH_DEV_WRITE_REFS() \ + x(journal_write) \ + x(journal_do_discards) \ + x(dev_do_discards) \ + x(discard_one_bucket_fast) \ + x(do_invalidates) \ + x(nocow_flush) \ + x(io_write) \ + x(ec_block) \ + x(ec_bucket_zero) + +enum bch_dev_write_ref { +#define x(n) BCH_DEV_WRITE_REF_##n, + BCH_DEV_WRITE_REFS() +#undef x + BCH_DEV_WRITE_REF_NR, +}; + +struct bucket_bitmap { + unsigned long *buckets; + u64 nr; + struct mutex lock; +}; + struct bch_dev { struct kobject kobj; #ifdef CONFIG_BCACHEFS_DEBUG @@ -524,8 +592,7 @@ struct bch_dev { struct percpu_ref ref; #endif struct completion ref_completion; - struct percpu_ref io_ref[2]; - struct completion io_ref_completion[2]; + struct enumerated_ref io_ref[2]; struct bch_fs *fs; @@ -559,8 +626,8 @@ struct bch_dev { u8 *oldest_gen; unsigned long *buckets_nouse; - unsigned long *bucket_backpointer_mismatches; - unsigned long *bucket_backpointer_empty; + struct bucket_bitmap bucket_backpointer_mismatch; + struct bucket_bitmap bucket_backpointer_empty; struct bch_dev_usage_full __percpu *usage; @@ -572,10 +639,6 @@ struct bch_dev { unsigned nr_partial_buckets; unsigned nr_btree_reserve; - size_t inc_gen_needs_gc; - size_t inc_gen_really_needs_gc; - size_t buckets_waiting_on_journal; - struct work_struct invalidate_work; struct work_struct discard_work; struct mutex discard_buckets_in_flight_lock; @@ -614,14 +677,15 @@ struct bch_dev { x(accounting_replay_done) \ x(may_go_rw) \ x(rw) \ + x(rw_init_done) \ x(was_rw) \ x(stopping) \ x(emergency_ro) \ x(going_ro) \ x(write_disable_complete) \ x(clean_shutdown) \ - x(recovery_running) \ - x(fsck_running) \ + x(in_recovery) \ + x(in_fsck) \ x(initial_gc_unfixed) \ x(need_delete_dead_snapshots) \ x(error) \ @@ -648,8 +712,10 @@ struct btree_transaction_stats { struct bch2_time_stats lock_hold_times; struct mutex lock; unsigned nr_max_paths; - unsigned journal_entries_size; unsigned max_mem; +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + darray_trans_kmalloc_trace trans_kmalloc_trace; +#endif char *max_paths_text; }; @@ -670,9 +736,6 @@ struct btree_trans_buf { struct btree_trans *trans; }; -#define BCACHEFS_ROOT_SUBVOL_INUM \ - ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO }) - #define BCH_WRITE_REFS() \ x(journal) \ x(trans) \ @@ -694,7 +757,8 @@ struct btree_trans_buf { x(snapshot_delete_pagecache) \ x(sysfs) \ x(btree_write_buffer) \ - x(btree_node_scrub) + x(btree_node_scrub) \ + x(async_recovery_passes) enum bch_write_ref { #define x(n) BCH_WRITE_REF_##n, @@ -728,11 +792,7 @@ struct bch_fs { struct rw_semaphore state_lock; /* Counts outstanding writes, for clean transition to read-only */ -#ifdef BCH_WRITE_REF_DEBUG - atomic_long_t writes[BCH_WRITE_REF_NR]; -#else - struct percpu_ref writes; -#endif + struct enumerated_ref writes; /* * Certain operations are only allowed in single threaded mode, during * recovery, and we want to assert that this is the case: @@ -776,6 +836,7 @@ struct bch_fs { u8 nr_devices; u8 clean; + bool multi_device; /* true if we've ever had more than one device */ u8 encryption_type; @@ -785,6 +846,7 @@ struct bch_fs { unsigned nsec_per_time_unit; u64 features; u64 compat; + u64 recovery_passes_required; unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)]; u64 btrees_lost_data; } sb; @@ -809,7 +871,7 @@ struct bch_fs { struct mutex snapshot_table_lock; struct rw_semaphore snapshot_create_lock; - struct work_struct snapshot_delete_work; + struct snapshot_delete snapshot_delete; struct work_struct snapshot_wait_for_pagecache_and_delete_work; snapshot_id_list snapshots_unlinked; struct mutex snapshots_unlinked_lock; @@ -874,7 +936,7 @@ struct bch_fs { struct btree_write_buffer btree_write_buffer; struct workqueue_struct *btree_update_wq; - struct workqueue_struct *btree_io_complete_wq; + struct workqueue_struct *btree_write_complete_wq; /* copygc needs its own workqueue for index updates.. */ struct workqueue_struct *copygc_wq; /* @@ -885,6 +947,7 @@ struct bch_fs { struct workqueue_struct *write_ref_wq; /* ALLOCATION */ + struct bch_devs_mask online_devs; struct bch_devs_mask rw_devs[BCH_DATA_NR]; unsigned long rw_devs_change_count; @@ -979,6 +1042,10 @@ struct bch_fs { nocow_locks; struct rhashtable promote_table; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + struct async_obj_list async_objs[BCH_ASYNC_OBJ_NR]; +#endif + mempool_t compression_bounce[2]; mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR]; size_t zstd_workspace_size; @@ -1048,25 +1115,12 @@ struct bch_fs { /* RECOVERY */ u64 journal_replay_seq_start; u64 journal_replay_seq_end; - /* - * Two different uses: - * "Has this fsck pass?" - i.e. should this type of error be an - * emergency read-only - * And, in certain situations fsck will rewind to an earlier pass: used - * for signaling to the toplevel code which pass we want to run now. - */ - enum bch_recovery_pass curr_recovery_pass; - enum bch_recovery_pass next_recovery_pass; - /* bitmask of recovery passes that we actually ran */ - u64 recovery_passes_complete; - /* never rewinds version of curr_recovery_pass */ - enum bch_recovery_pass recovery_pass_done; - spinlock_t recovery_pass_lock; - struct semaphore online_fsck_mutex; + struct bch_fs_recovery recovery; /* DEBUG JUNK */ struct dentry *fs_debug_dir; struct dentry *btree_debug_dir; + struct dentry *async_obj_dir; struct btree_debug btree_debug[BTREE_ID_NR]; struct btree *verify_data; struct btree_node *verify_ondisk; @@ -1108,54 +1162,6 @@ struct bch_fs { extern struct wait_queue_head bch2_read_only_wait; -static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref) -{ -#ifdef BCH_WRITE_REF_DEBUG - atomic_long_inc(&c->writes[ref]); -#else - percpu_ref_get(&c->writes); -#endif -} - -static inline bool __bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref) -{ -#ifdef BCH_WRITE_REF_DEBUG - return !test_bit(BCH_FS_going_ro, &c->flags) && - atomic_long_inc_not_zero(&c->writes[ref]); -#else - return percpu_ref_tryget(&c->writes); -#endif -} - -static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref) -{ -#ifdef BCH_WRITE_REF_DEBUG - return !test_bit(BCH_FS_going_ro, &c->flags) && - atomic_long_inc_not_zero(&c->writes[ref]); -#else - return percpu_ref_tryget_live(&c->writes); -#endif -} - -static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref) -{ -#ifdef BCH_WRITE_REF_DEBUG - long v = atomic_long_dec_return(&c->writes[ref]); - - BUG_ON(v < 0); - if (v) - return; - for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) - if (atomic_long_read(&c->writes[i])) - return; - - set_bit(BCH_FS_write_disable_complete, &c->flags); - wake_up(&bch2_read_only_wait); -#else - percpu_ref_put(&c->writes); -#endif -} - static inline bool bch2_ro_ref_tryget(struct bch_fs *c) { if (test_bit(BCH_FS_stopping, &c->flags)) @@ -1256,4 +1262,17 @@ static inline unsigned data_replicas_required(struct bch_fs *c) #define BKEY_PADDED_ONSTACK(key, pad) \ struct { struct bkey_i key; __u64 key ## _pad[pad]; } +/* + * This is needed because discard is both a filesystem option and a device + * option, and mount options are supposed to apply to that mount and not be + * persisted, i.e. if it's set as a mount option we can't propagate it to the + * device. + */ +static inline bool bch2_discard_opt_enabled(struct bch_fs *c, struct bch_dev *ca) +{ + return test_bit(BCH_FS_discard_mount_opt_set, &c->flags) + ? c->opts.discard + : ca->mi.discard; +} + #endif /* _BCACHEFS_H */ diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index d6e4a496f02b..b4a04df5ea95 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -497,7 +497,8 @@ struct bch_sb_field { x(members_v2, 11) \ x(errors, 12) \ x(ext, 13) \ - x(downgrade, 14) + x(downgrade, 14) \ + x(recovery_passes, 15) #include "alloc_background_format.h" #include "dirent_format.h" @@ -510,6 +511,7 @@ struct bch_sb_field { #include "logged_ops_format.h" #include "lru_format.h" #include "quota_format.h" +#include "recovery_passes_format.h" #include "reflink_format.h" #include "replicas_format.h" #include "snapshot_format.h" @@ -695,7 +697,10 @@ struct bch_sb_field_ext { x(stripe_backpointers, BCH_VERSION(1, 22)) \ x(stripe_lru, BCH_VERSION(1, 23)) \ x(casefolding, BCH_VERSION(1, 24)) \ - x(extent_flags, BCH_VERSION(1, 25)) + x(extent_flags, BCH_VERSION(1, 25)) \ + x(snapshot_deletion_v2, BCH_VERSION(1, 26)) \ + x(fast_device_removal, BCH_VERSION(1, 27)) \ + x(inode_has_case_insensitive, BCH_VERSION(1, 28)) enum bcachefs_metadata_version { bcachefs_metadata_version_min = 9, @@ -846,7 +851,7 @@ LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29); LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30); LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62); LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63); -/* one free bit */ +LE64_BITMASK(BCH_SB_MULTI_DEVICE, struct bch_sb, flags[3], 63, 64); LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32); LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33); LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34); @@ -867,7 +872,9 @@ LE64_BITMASK(BCH_SB_VERSION_INCOMPAT_ALLOWED, LE64_BITMASK(BCH_SB_SHARD_INUMS_NBITS, struct bch_sb, flags[6], 0, 4); LE64_BITMASK(BCH_SB_WRITE_ERROR_TIMEOUT,struct bch_sb, flags[6], 4, 14); LE64_BITMASK(BCH_SB_CSUM_ERR_RETRY_NR, struct bch_sb, flags[6], 14, 20); +LE64_BITMASK(BCH_SB_DEGRADED_ACTION, struct bch_sb, flags[6], 20, 22); LE64_BITMASK(BCH_SB_CASEFOLD, struct bch_sb, flags[6], 22, 23); +LE64_BITMASK(BCH_SB_REBALANCE_AC_ONLY, struct bch_sb, flags[6], 23, 24); static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb) { @@ -922,7 +929,9 @@ static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u x(alloc_v2, 17) \ x(extents_across_btree_nodes, 18) \ x(incompat_version_field, 19) \ - x(casefolding, 20) + x(casefolding, 20) \ + x(no_alloc_info, 21) \ + x(small_image, 22) #define BCH_SB_FEATURES_ALWAYS \ (BIT_ULL(BCH_FEATURE_new_extent_overwrite)| \ @@ -989,6 +998,19 @@ enum bch_error_actions { BCH_ON_ERROR_NR }; +#define BCH_DEGRADED_ACTIONS() \ + x(ask, 0) \ + x(yes, 1) \ + x(very, 2) \ + x(no, 3) + +enum bch_degraded_actions { +#define x(t, n) BCH_DEGRADED_##t = n, + BCH_DEGRADED_ACTIONS() +#undef x + BCH_DEGRADED_ACTIONS_NR +}; + #define BCH_STR_HASH_TYPES() \ x(crc32c, 0) \ x(crc64, 1) \ diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c index 995ba32e9b6e..ee823c640642 100644 --- a/fs/bcachefs/bkey.c +++ b/fs/bcachefs/bkey.c @@ -47,11 +47,9 @@ void bch2_bkey_packed_to_binary_text(struct printbuf *out, } } -#ifdef CONFIG_BCACHEFS_DEBUG - -static void bch2_bkey_pack_verify(const struct bkey_packed *packed, - const struct bkey *unpacked, - const struct bkey_format *format) +static void __bch2_bkey_pack_verify(const struct bkey_packed *packed, + const struct bkey *unpacked, + const struct bkey_format *format) { struct bkey tmp; @@ -95,11 +93,13 @@ static void bch2_bkey_pack_verify(const struct bkey_packed *packed, } } -#else static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed, - const struct bkey *unpacked, - const struct bkey_format *format) {} -#endif + const struct bkey *unpacked, + const struct bkey_format *format) +{ + if (static_branch_unlikely(&bch2_debug_check_bkey_unpack)) + __bch2_bkey_pack_verify(packed, unpacked, format); +} struct pack_state { const struct bkey_format *format; @@ -398,7 +398,6 @@ static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v) return ret; } -#ifdef CONFIG_BCACHEFS_DEBUG static bool bkey_packed_successor(struct bkey_packed *out, const struct btree *b, struct bkey_packed k) @@ -455,7 +454,6 @@ static bool bkey_format_has_too_big_fields(const struct bkey_format *f) return false; } -#endif /* * Returns a packed key that compares <= in @@ -472,9 +470,7 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out, const struct bkey_format *f = &b->format; struct pack_state state = pack_state_init(f, out); u64 *w = out->_data; -#ifdef CONFIG_BCACHEFS_DEBUG struct bpos orig = in; -#endif bool exact = true; unsigned i; @@ -527,18 +523,18 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out, out->format = KEY_FORMAT_LOCAL_BTREE; out->type = KEY_TYPE_deleted; -#ifdef CONFIG_BCACHEFS_DEBUG - if (exact) { - BUG_ON(bkey_cmp_left_packed(b, out, &orig)); - } else { - struct bkey_packed successor; + if (static_branch_unlikely(&bch2_debug_check_bkey_unpack)) { + if (exact) { + BUG_ON(bkey_cmp_left_packed(b, out, &orig)); + } else { + struct bkey_packed successor; - BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0); - BUG_ON(bkey_packed_successor(&successor, b, *out) && - bkey_cmp_left_packed(b, &successor, &orig) < 0 && - !bkey_format_has_too_big_fields(f)); + BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0); + BUG_ON(bkey_packed_successor(&successor, b, *out) && + bkey_cmp_left_packed(b, &successor, &orig) < 0 && + !bkey_format_has_too_big_fields(f)); + } } -#endif return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER; } @@ -627,14 +623,13 @@ struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s) } } -#ifdef CONFIG_BCACHEFS_DEBUG - { + if (static_branch_unlikely(&bch2_debug_check_bkey_unpack)) { struct printbuf buf = PRINTBUF; BUG_ON(bch2_bkey_format_invalid(NULL, &ret, 0, &buf)); printbuf_exit(&buf); } -#endif + return ret; } diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h index 054e2d5e8448..3ccd521c190a 100644 --- a/fs/bcachefs/bkey.h +++ b/fs/bcachefs/bkey.h @@ -191,6 +191,7 @@ static inline struct bpos bkey_max(struct bpos l, struct bpos r) static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r) { return bpos_eq(l.k->p, r.k->p) && + l.k->size == r.k->size && bkey_bytes(l.k) == bkey_bytes(r.k) && !memcmp(l.v, r.v, bkey_val_bytes(l.k)); } @@ -397,8 +398,7 @@ __bkey_unpack_key_format_checked(const struct btree *b, compiled_unpack_fn unpack_fn = b->aux_data; unpack_fn(dst, src); - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && - bch2_expensive_debug_checks) { + if (static_branch_unlikely(&bch2_debug_check_bkey_unpack)) { struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src); BUG_ON(memcmp(dst, &dst2, sizeof(*dst))); diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c index 00d05ccfaf73..fcd8c82cba4f 100644 --- a/fs/bcachefs/bkey_methods.c +++ b/fs/bcachefs/bkey_methods.c @@ -356,7 +356,7 @@ bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) return ops->key_merge && bch2_bkey_maybe_mergable(l.k, r.k) && (u64) l.k->size + r.k->size <= KEY_SIZE_MAX && - !bch2_key_merging_disabled && + !static_branch_unlikely(&bch2_key_merging_disabled) && ops->key_merge(c, l, r); } diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c index 9a4a83d6fd2d..32841f762eb2 100644 --- a/fs/bcachefs/bset.c +++ b/fs/bcachefs/bset.c @@ -144,8 +144,6 @@ struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b) return nr; } -#ifdef CONFIG_BCACHEFS_DEBUG - void __bch2_verify_btree_nr_keys(struct btree *b) { struct btree_nr_keys nr = bch2_btree_node_count_keys(b); @@ -153,7 +151,7 @@ void __bch2_verify_btree_nr_keys(struct btree *b) BUG_ON(memcmp(&nr, &b->nr, sizeof(nr))); } -static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter, +static void __bch2_btree_node_iter_next_check(struct btree_node_iter *_iter, struct btree *b) { struct btree_node_iter iter = *_iter; @@ -190,8 +188,8 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter, } } -void bch2_btree_node_iter_verify(struct btree_node_iter *iter, - struct btree *b) +void __bch2_btree_node_iter_verify(struct btree_node_iter *iter, + struct btree *b) { struct btree_node_iter_set *set, *s2; struct bkey_packed *k, *p; @@ -237,8 +235,8 @@ found: } } -void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, - struct bkey_packed *insert, unsigned clobber_u64s) +static void __bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, + struct bkey_packed *insert, unsigned clobber_u64s) { struct bset_tree *t = bch2_bkey_to_bset(b, where); struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where); @@ -285,12 +283,15 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, #endif } -#else - -static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter, - struct btree *b) {} +static inline void bch2_verify_insert_pos(struct btree *b, + struct bkey_packed *where, + struct bkey_packed *insert, + unsigned clobber_u64s) +{ + if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) + __bch2_verify_insert_pos(b, where, insert, clobber_u64s); +} -#endif /* Auxiliary search trees */ @@ -361,9 +362,8 @@ static struct bkey_float *bkey_float(const struct btree *b, return ro_aux_tree_base(b, t)->f + idx; } -static void bset_aux_tree_verify(struct btree *b) +static void __bset_aux_tree_verify(struct btree *b) { -#ifdef CONFIG_BCACHEFS_DEBUG for_each_bset(b, t) { if (t->aux_data_offset == U16_MAX) continue; @@ -375,7 +375,12 @@ static void bset_aux_tree_verify(struct btree *b) BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b)); BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b)); } -#endif +} + +static inline void bset_aux_tree_verify(struct btree *b) +{ + if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) + __bset_aux_tree_verify(b); } void bch2_btree_keys_init(struct btree *b) @@ -495,15 +500,11 @@ static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t, }; } -static void bch2_bset_verify_rw_aux_tree(struct btree *b, - struct bset_tree *t) +static void __bch2_bset_verify_rw_aux_tree(struct btree *b, struct bset_tree *t) { struct bkey_packed *k = btree_bkey_first(b, t); unsigned j = 0; - if (!bch2_expensive_debug_checks) - return; - BUG_ON(bset_has_ro_aux_tree(t)); if (!bset_has_rw_aux_tree(t)) @@ -530,6 +531,13 @@ start: } } +static inline void bch2_bset_verify_rw_aux_tree(struct btree *b, + struct bset_tree *t) +{ + if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) + __bch2_bset_verify_rw_aux_tree(b, t); +} + /* returns idx of first entry >= offset: */ static unsigned rw_aux_tree_bsearch(struct btree *b, struct bset_tree *t, @@ -869,7 +877,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, k = p; } - if (bch2_expensive_debug_checks) { + if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) { BUG_ON(ret >= orig_k); for (i = ret @@ -1195,7 +1203,7 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b, bkey_iter_pos_cmp(b, m, search) < 0) m = bkey_p_next(m); - if (bch2_expensive_debug_checks) { + if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) { struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); BUG_ON(prev && @@ -1435,9 +1443,9 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter, void bch2_btree_node_iter_advance(struct btree_node_iter *iter, struct btree *b) { - if (bch2_expensive_debug_checks) { - bch2_btree_node_iter_verify(iter, b); - bch2_btree_node_iter_next_check(iter, b); + if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) { + __bch2_btree_node_iter_verify(iter, b); + __bch2_btree_node_iter_next_check(iter, b); } __bch2_btree_node_iter_advance(iter, b); @@ -1453,8 +1461,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter, struct btree_node_iter_set *set; unsigned end = 0; - if (bch2_expensive_debug_checks) - bch2_btree_node_iter_verify(iter, b); + bch2_btree_node_iter_verify(iter, b); for_each_bset(b, t) { k = bch2_bkey_prev_all(b, t, @@ -1489,8 +1496,7 @@ found: iter->data[0].k = __btree_node_key_to_offset(b, prev); iter->data[0].end = end; - if (bch2_expensive_debug_checks) - bch2_btree_node_iter_verify(iter, b); + bch2_btree_node_iter_verify(iter, b); return prev; } diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h index 6953d55b72cc..a15ecf9d006e 100644 --- a/fs/bcachefs/bset.h +++ b/fs/bcachefs/bset.h @@ -517,27 +517,19 @@ void bch2_dump_bset(struct bch_fs *, struct btree *, struct bset *, unsigned); void bch2_dump_btree_node(struct bch_fs *, struct btree *); void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *); -#ifdef CONFIG_BCACHEFS_DEBUG - void __bch2_verify_btree_nr_keys(struct btree *); -void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *); -void bch2_verify_insert_pos(struct btree *, struct bkey_packed *, - struct bkey_packed *, unsigned); - -#else +void __bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *); -static inline void __bch2_verify_btree_nr_keys(struct btree *b) {} static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter, - struct btree *b) {} -static inline void bch2_verify_insert_pos(struct btree *b, - struct bkey_packed *where, - struct bkey_packed *insert, - unsigned clobber_u64s) {} -#endif + struct btree *b) +{ + if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) + __bch2_btree_node_iter_verify(iter, b); +} static inline void bch2_verify_btree_nr_keys(struct btree *b) { - if (bch2_debug_check_btree_accounting) + if (static_branch_unlikely(&bch2_debug_check_btree_accounting)) __bch2_verify_btree_nr_keys(b); } diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 899891295797..8557cbd3d818 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -17,12 +17,6 @@ #include <linux/sched/mm.h> #include <linux/swap.h> -#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \ -do { \ - if (shrinker_counter) \ - bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++; \ -} while (0) - const char * const bch2_btree_node_flags[] = { "typebit", "typebit", @@ -350,115 +344,118 @@ static inline struct btree *btree_cache_find(struct btree_cache *bc, return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params); } -/* - * this version is for btree nodes that have already been freed (we're not - * reaping a real btree node) - */ -static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counter) +static int __btree_node_reclaim_checks(struct bch_fs *c, struct btree *b, + bool flush, bool locked) { struct btree_cache *bc = &c->btree_cache; - int ret = 0; lockdep_assert_held(&bc->lock); -wait_on_io: - if (b->flags & ((1U << BTREE_NODE_dirty)| - (1U << BTREE_NODE_read_in_flight)| + + if (btree_node_noevict(b)) { + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_noevict]++; + return -BCH_ERR_ENOMEM_btree_node_reclaim; + } + if (btree_node_write_blocked(b)) { + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_blocked]++; + return -BCH_ERR_ENOMEM_btree_node_reclaim; + } + if (btree_node_will_make_reachable(b)) { + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_will_make_reachable]++; + return -BCH_ERR_ENOMEM_btree_node_reclaim; + } + + if (btree_node_dirty(b)) { + if (!flush) { + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_dirty]++; + return -BCH_ERR_ENOMEM_btree_node_reclaim; + } + + if (locked) { + /* + * Using the underscore version because we don't want to compact + * bsets after the write, since this node is about to be evicted + * - unless btree verify mode is enabled, since it runs out of + * the post write cleanup: + */ + if (static_branch_unlikely(&bch2_verify_btree_ondisk)) + bch2_btree_node_write(c, b, SIX_LOCK_intent, + BTREE_WRITE_cache_reclaim); + else + __bch2_btree_node_write(c, b, + BTREE_WRITE_cache_reclaim); + } + } + + if (b->flags & ((1U << BTREE_NODE_read_in_flight)| (1U << BTREE_NODE_write_in_flight))) { if (!flush) { - if (btree_node_dirty(b)) - BTREE_CACHE_NOT_FREED_INCREMENT(dirty); - else if (btree_node_read_in_flight(b)) - BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight); + if (btree_node_read_in_flight(b)) + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_read_in_flight]++; else if (btree_node_write_in_flight(b)) - BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight); + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_in_flight]++; return -BCH_ERR_ENOMEM_btree_node_reclaim; } + if (locked) + return -EINTR; + /* XXX: waiting on IO with btree cache lock held */ bch2_btree_node_wait_on_read(b); bch2_btree_node_wait_on_write(b); } + return 0; +} + +/* + * this version is for btree nodes that have already been freed (we're not + * reaping a real btree node) + */ +static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) +{ + struct btree_cache *bc = &c->btree_cache; + int ret = 0; + + lockdep_assert_held(&bc->lock); +retry_unlocked: + ret = __btree_node_reclaim_checks(c, b, flush, false); + if (ret) + return ret; + if (!six_trylock_intent(&b->c.lock)) { - BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent); + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_intent]++; return -BCH_ERR_ENOMEM_btree_node_reclaim; } if (!six_trylock_write(&b->c.lock)) { - BTREE_CACHE_NOT_FREED_INCREMENT(lock_write); - goto out_unlock_intent; + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_write]++; + six_unlock_intent(&b->c.lock); + return -BCH_ERR_ENOMEM_btree_node_reclaim; } /* recheck under lock */ - if (b->flags & ((1U << BTREE_NODE_read_in_flight)| - (1U << BTREE_NODE_write_in_flight))) { - if (!flush) { - if (btree_node_read_in_flight(b)) - BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight); - else if (btree_node_write_in_flight(b)) - BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight); - goto out_unlock; - } + ret = __btree_node_reclaim_checks(c, b, flush, true); + if (ret) { six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); - goto wait_on_io; - } - - if (btree_node_noevict(b)) { - BTREE_CACHE_NOT_FREED_INCREMENT(noevict); - goto out_unlock; - } - if (btree_node_write_blocked(b)) { - BTREE_CACHE_NOT_FREED_INCREMENT(write_blocked); - goto out_unlock; - } - if (btree_node_will_make_reachable(b)) { - BTREE_CACHE_NOT_FREED_INCREMENT(will_make_reachable); - goto out_unlock; + if (ret == -EINTR) + goto retry_unlocked; + return ret; } - if (btree_node_dirty(b)) { - if (!flush) { - BTREE_CACHE_NOT_FREED_INCREMENT(dirty); - goto out_unlock; - } - /* - * Using the underscore version because we don't want to compact - * bsets after the write, since this node is about to be evicted - * - unless btree verify mode is enabled, since it runs out of - * the post write cleanup: - */ - if (bch2_verify_btree_ondisk) - bch2_btree_node_write(c, b, SIX_LOCK_intent, - BTREE_WRITE_cache_reclaim); - else - __bch2_btree_node_write(c, b, - BTREE_WRITE_cache_reclaim); - - six_unlock_write(&b->c.lock); - six_unlock_intent(&b->c.lock); - goto wait_on_io; - } -out: if (b->hash_val && !ret) trace_and_count(c, btree_cache_reap, c, b); - return ret; -out_unlock: - six_unlock_write(&b->c.lock); -out_unlock_intent: - six_unlock_intent(&b->c.lock); - ret = -BCH_ERR_ENOMEM_btree_node_reclaim; - goto out; + return 0; } -static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter) +static int btree_node_reclaim(struct bch_fs *c, struct btree *b) { - return __btree_node_reclaim(c, b, false, shrinker_counter); + return __btree_node_reclaim(c, b, false); } static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b) { - return __btree_node_reclaim(c, b, true, false); + return __btree_node_reclaim(c, b, true); } static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, @@ -476,7 +473,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, unsigned long ret = SHRINK_STOP; bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4; - if (bch2_btree_shrinker_disabled) + if (static_branch_unlikely(&bch2_btree_shrinker_disabled)) return SHRINK_STOP; mutex_lock(&bc->lock); @@ -490,7 +487,10 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, * IO can always make forward progress: */ can_free = btree_cache_can_free(list); - nr = min_t(unsigned long, nr, can_free); + if (nr > can_free) { + bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_cache_reserve] += nr - can_free; + nr = can_free; + } i = 0; list_for_each_entry_safe(b, t, &bc->freeable, list) { @@ -506,7 +506,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, if (touched >= nr) goto out; - if (!btree_node_reclaim(c, b, true)) { + if (!btree_node_reclaim(c, b)) { btree_node_data_free(bc, b); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); @@ -522,7 +522,7 @@ restart: clear_btree_node_accessed(b); bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; --touched;; - } else if (!btree_node_reclaim(c, b, true)) { + } else if (!btree_node_reclaim(c, b)) { __bch2_btree_node_hash_remove(bc, b); __btree_node_data_free(bc, b); @@ -569,7 +569,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink, { struct btree_cache_list *list = shrink->private_data; - if (bch2_btree_shrinker_disabled) + if (static_branch_unlikely(&bch2_btree_shrinker_disabled)) return 0; return btree_cache_can_free(list); @@ -755,7 +755,7 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c) for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) list_for_each_entry_reverse(b, &bc->live[i].list, list) - if (!btree_node_reclaim(c, b, false)) + if (!btree_node_reclaim(c, b)) return b; while (1) { @@ -790,7 +790,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea * disk node. Check the freed list before allocating a new one: */ list_for_each_entry(b, freed, list) - if (!btree_node_reclaim(c, b, false)) { + if (!btree_node_reclaim(c, b)) { list_del_init(&b->list); goto got_node; } @@ -817,7 +817,7 @@ got_node: * the list. Check if there's any freed nodes there: */ list_for_each_entry(b2, &bc->freeable, list) - if (!btree_node_reclaim(c, b2, false)) { + if (!btree_node_reclaim(c, b2)) { swap(b->data, b2->data); swap(b->aux_data, b2->aux_data); @@ -977,7 +977,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, /* Unlock before doing IO: */ six_unlock_intent(&b->c.lock); - bch2_trans_unlock_noassert(trans); + bch2_trans_unlock(trans); bch2_btree_node_read(trans, b, sync); @@ -1003,7 +1003,7 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) { struct printbuf buf = PRINTBUF; - if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) + if (c->recovery.pass_done < BCH_RECOVERY_PASS_check_allocations) return; prt_printf(&buf, @@ -1492,9 +1492,10 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc prt_btree_cache_line(out, c, "live:", bc->live[0].nr); prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr); - prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable); + prt_btree_cache_line(out, c, "reserve:", bc->nr_reserve); + prt_btree_cache_line(out, c, "freed:", bc->nr_freeable); prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty)); - prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); + prt_printf(out, "cannibalize lock:\t%s\n", bc->alloc_lock ? "held" : "not held"); prt_newline(out); for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) { @@ -1505,6 +1506,7 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc } prt_newline(out); + prt_printf(out, "counters since mount:\n"); prt_printf(out, "freed:\t%zu\n", bc->nr_freed); prt_printf(out, "not freed:\n"); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 37b69d89341f..91b6395421df 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -22,6 +22,7 @@ #include "debug.h" #include "disk_accounting.h" #include "ec.h" +#include "enumerated_ref.h" #include "error.h" #include "extents.h" #include "journal.h" @@ -370,20 +371,13 @@ again: prt_char(&buf, ' '); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k)); - if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), - trans, btree_node_read_error, - "Topology repair: unreadable btree node at\n%s", - buf.buf)) { + if (bch2_err_matches(ret, EIO)) { bch2_btree_node_evict(trans, cur_k.k); cur = NULL; ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level, cur_k.k->k.p); if (ret) break; - - ret = bch2_btree_lost_data(c, b->c.btree_id); - if (ret) - break; continue; } @@ -545,9 +539,6 @@ int bch2_check_topology(struct bch_fs *c) bch2_btree_id_to_text(&buf, i); if (r->error) { - ret = bch2_btree_lost_data(c, i); - if (ret) - break; reconstruct_root: bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf); @@ -628,7 +619,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, deleted.p = k.k->p; if (initial) { - BUG_ON(bch2_journal_seq_verify && + BUG_ON(static_branch_unlikely(&bch2_journal_seq_verify) && k.k->bversion.lo > atomic64_read(&c->journal.seq)); if (fsck_err_on(btree_id != BTREE_ID_accounting && @@ -1088,6 +1079,10 @@ out: * allocator thread - issue wakeup in case they blocked on gc_lock: */ closure_wake_up(&c->freelist_wait); + + if (!ret && !test_bit(BCH_FS_errors_not_fixed, &c->flags)) + bch2_sb_members_clean_deleted(c); + bch_err_fn(c, ret); return ret; } @@ -1256,26 +1251,21 @@ static void bch2_gc_gens_work(struct work_struct *work) { struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work); bch2_gc_gens(c); - bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_gc_gens); } void bch2_gc_gens_async(struct bch_fs *c) { - if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) && + if (enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_gc_gens) && !queue_work(c->write_ref_wq, &c->gc_gens_work)) - bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_gc_gens); } -void bch2_fs_btree_gc_exit(struct bch_fs *c) -{ -} - -int bch2_fs_btree_gc_init(struct bch_fs *c) +void bch2_fs_btree_gc_init_early(struct bch_fs *c) { seqcount_init(&c->gc_pos_lock); INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work); init_rwsem(&c->gc_lock); mutex_init(&c->gc_gens_lock); - return 0; } diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h index 9693a90a48a2..ec77662369a2 100644 --- a/fs/bcachefs/btree_gc.h +++ b/fs/bcachefs/btree_gc.h @@ -83,7 +83,6 @@ void bch2_gc_pos_to_text(struct printbuf *, struct gc_pos *); int bch2_gc_gens(struct bch_fs *); void bch2_gc_gens_async(struct bch_fs *); -void bch2_fs_btree_gc_exit(struct bch_fs *); -int bch2_fs_btree_gc_init(struct bch_fs *); +void bch2_fs_btree_gc_init_early(struct bch_fs *); #endif /* _BCACHEFS_BTREE_GC_H */ diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 60782f3e5aec..34018296053a 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "async_objs.h" #include "bkey_buf.h" #include "bkey_methods.h" #include "bkey_sort.h" @@ -13,6 +14,7 @@ #include "buckets.h" #include "checksum.h" #include "debug.h" +#include "enumerated_ref.h" #include "error.h" #include "extents.h" #include "io_write.h" @@ -514,19 +516,23 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) static void btree_err_msg(struct printbuf *out, struct bch_fs *c, struct bch_dev *ca, + bool print_pos, struct btree *b, struct bset *i, struct bkey_packed *k, - unsigned offset, int write) + unsigned offset, int rw) { - prt_printf(out, bch2_log_msg(c, "%s"), - write == READ - ? "error validating btree node " - : "corrupt btree node before write "); + if (print_pos) { + prt_str(out, rw == READ + ? "error validating btree node " + : "corrupt btree node before write "); + prt_printf(out, "at btree "); + bch2_btree_pos_to_text(out, c, b); + prt_newline(out); + } + if (ca) - prt_printf(out, "on %s ", ca->name); - prt_printf(out, "at btree "); - bch2_btree_pos_to_text(out, c, b); + prt_printf(out, "%s ", ca->name); - prt_printf(out, "\nnode offset %u/%u", + prt_printf(out, "node offset %u/%u", b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key))); if (i) prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); @@ -537,75 +543,110 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c, prt_str(out, ": "); } -__printf(10, 11) +__printf(11, 12) static int __btree_err(int ret, struct bch_fs *c, struct bch_dev *ca, struct btree *b, struct bset *i, struct bkey_packed *k, - int write, - bool have_retry, + int rw, enum bch_sb_error_id err_type, + struct bch_io_failures *failed, + struct printbuf *err_msg, const char *fmt, ...) { - bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes; + if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes) + return -BCH_ERR_fsck_fix; + + bool have_retry = false; + int ret2; + + if (ca) { + bch2_mark_btree_validate_failure(failed, ca->dev_idx); + + struct extent_ptr_decoded pick; + have_retry = !bch2_bkey_pick_read_device(c, + bkey_i_to_s_c(&b->key), + failed, &pick, -1); + } if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) ret = -BCH_ERR_btree_node_read_err_fixable; if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) ret = -BCH_ERR_btree_node_read_err_bad_node; - if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable) - bch2_sb_error_count(c, err_type); + bch2_sb_error_count(c, err_type); + + bool print_deferred = err_msg && + rw == READ && + !(test_bit(BCH_FS_in_fsck, &c->flags) && + c->opts.fix_errors == FSCK_FIX_ask); struct printbuf out = PRINTBUF; - if (write != WRITE && ret != -BCH_ERR_btree_node_read_err_fixable) { - printbuf_indent_add_nextline(&out, 2); -#ifdef BCACHEFS_LOG_PREFIX - prt_printf(&out, bch2_log_msg(c, "")); -#endif - } + bch2_log_msg_start(c, &out); - btree_err_msg(&out, c, ca, b, i, k, b->written, write); + if (!print_deferred) + err_msg = &out; + + btree_err_msg(err_msg, c, ca, !print_deferred, b, i, k, b->written, rw); va_list args; va_start(args, fmt); - prt_vprintf(&out, fmt, args); + prt_vprintf(err_msg, fmt, args); va_end(args); - if (write == WRITE) { + if (print_deferred) { + prt_newline(err_msg); + + switch (ret) { + case -BCH_ERR_btree_node_read_err_fixable: + ret2 = bch2_fsck_err_opt(c, FSCK_CAN_FIX, err_type); + if (ret2 != -BCH_ERR_fsck_fix && + ret2 != -BCH_ERR_fsck_ignore) { + ret = ret2; + goto fsck_err; + } + + if (!have_retry) + ret = -BCH_ERR_fsck_fix; + goto out; + case -BCH_ERR_btree_node_read_err_bad_node: + prt_str(&out, ", "); + ret = __bch2_topology_error(c, &out); + break; + } + + goto out; + } + + if (rw == WRITE) { prt_str(&out, ", "); ret = __bch2_inconsistent_error(c, &out) ? -BCH_ERR_fsck_errors_not_fixed : 0; - silent = false; + goto print; } switch (ret) { case -BCH_ERR_btree_node_read_err_fixable: - ret = !silent - ? __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf) - : -BCH_ERR_fsck_fix; - if (ret != -BCH_ERR_fsck_fix && - ret != -BCH_ERR_fsck_ignore) + ret2 = __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf); + if (ret2 != -BCH_ERR_fsck_fix && + ret2 != -BCH_ERR_fsck_ignore) { + ret = ret2; goto fsck_err; - ret = -BCH_ERR_fsck_fix; + } + + if (!have_retry) + ret = -BCH_ERR_fsck_fix; goto out; case -BCH_ERR_btree_node_read_err_bad_node: prt_str(&out, ", "); ret = __bch2_topology_error(c, &out); - if (ret) - silent = false; - break; - case -BCH_ERR_btree_node_read_err_incompatible: - ret = -BCH_ERR_fsck_errors_not_fixed; - silent = false; break; } - - if (!silent) - bch2_print_string_as_lines(KERN_ERR, out.buf); +print: + bch2_print_str(c, KERN_ERR, out.buf); out: fsck_err: printbuf_exit(&out); @@ -614,8 +655,9 @@ fsck_err: #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \ ({ \ - int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \ + int _ret = __btree_err(type, c, ca, b, i, k, write, \ BCH_FSCK_ERR_##_err_type, \ + failed, err_msg, \ msg, ##__VA_ARGS__); \ \ if (_ret != -BCH_ERR_fsck_fix) { \ @@ -623,7 +665,7 @@ fsck_err: goto fsck_err; \ } \ \ - *saw_error = true; \ + true; \ }) #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) @@ -681,8 +723,9 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b) static int validate_bset(struct bch_fs *c, struct bch_dev *ca, struct btree *b, struct bset *i, - unsigned offset, unsigned sectors, - int write, bool have_retry, bool *saw_error) + unsigned offset, unsigned sectors, int write, + struct bch_io_failures *failed, + struct printbuf *err_msg) { unsigned version = le16_to_cpu(i->version); unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); @@ -895,7 +938,8 @@ static inline int btree_node_read_bkey_cmp(const struct btree *b, static int validate_bset_keys(struct bch_fs *c, struct btree *b, struct bset *i, int write, - bool have_retry, bool *saw_error) + struct bch_io_failures *failed, + struct printbuf *err_msg) { unsigned version = le16_to_cpu(i->version); struct bkey_packed *k, *prev = NULL; @@ -1008,7 +1052,9 @@ fsck_err: } int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, - struct btree *b, bool have_retry, bool *saw_error) + struct btree *b, + struct bch_io_failures *failed, + struct printbuf *err_msg) { struct btree_node_entry *bne; struct sort_iter *iter; @@ -1018,11 +1064,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, bool used_mempool, blacklisted; bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); - unsigned u64s; unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); u64 max_journal_seq = 0; struct printbuf buf = PRINTBUF; - int ret = 0, retry_read = 0, write = READ; + int ret = 0, write = READ; u64 start_time = local_clock(); b->version_ondisk = U16_MAX; @@ -1156,15 +1201,14 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, b->version_ondisk = min(b->version_ondisk, le16_to_cpu(i->version)); - ret = validate_bset(c, ca, b, i, b->written, sectors, - READ, have_retry, saw_error); + ret = validate_bset(c, ca, b, i, b->written, sectors, READ, failed, err_msg); if (ret) goto fsck_err; if (!b->written) btree_node_set_format(b, b->data->format); - ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error); + ret = validate_bset_keys(c, b, i, READ, failed, err_msg); if (ret) goto fsck_err; @@ -1225,23 +1269,20 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool); sorted->keys.u64s = 0; - set_btree_bset(b, b->set, &b->data->keys); - b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0, btree_buf_bytes(b) - sizeof(struct btree_node) - b->nr.live_u64s * sizeof(u64)); - u64s = le16_to_cpu(sorted->keys.u64s); + b->data->keys.u64s = sorted->keys.u64s; *sorted = *b->data; - sorted->keys.u64s = cpu_to_le16(u64s); swap(sorted, b->data); set_btree_bset(b, b->set, &b->data->keys); b->nsets = 1; b->data->keys.journal_seq = cpu_to_le64(max_journal_seq); - BUG_ON(b->nr.live_u64s != u64s); + BUG_ON(b->nr.live_u64s != le16_to_cpu(b->data->keys.u64s)); btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted); @@ -1255,7 +1296,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, ret = btree_node_bkey_val_validate(c, b, u.s_c, READ); if (ret == -BCH_ERR_fsck_delete_bkey || - (bch2_inject_invalid_keys && + (static_branch_unlikely(&bch2_inject_invalid_keys) && !bversion_cmp(u.k->bversion, MAX_VERSION))) { btree_keys_account_key_drop(&b->nr, 0, k); @@ -1295,20 +1336,11 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, if (!ptr_written) set_btree_node_need_rewrite(b); -out: +fsck_err: mempool_free(iter, &c->fill_iter); printbuf_exit(&buf); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time); - return retry_read; -fsck_err: - if (ret == -BCH_ERR_btree_node_read_err_want_retry || - ret == -BCH_ERR_btree_node_read_err_must_retry) { - retry_read = 1; - } else { - set_btree_node_read_error(b); - bch2_btree_lost_data(c, b->c.btree_id); - } - goto out; + return ret; } static void btree_node_read_work(struct work_struct *work) @@ -1320,16 +1352,26 @@ static void btree_node_read_work(struct work_struct *work) struct btree *b = rb->b; struct bio *bio = &rb->bio; struct bch_io_failures failed = { .nr = 0 }; + int ret = 0; + struct printbuf buf = PRINTBUF; - bool saw_error = false; - bool retry = false; - bool can_retry; + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "btree node read error at btree "); + bch2_btree_pos_to_text(&buf, c, b); + prt_newline(&buf); goto start; while (1) { - retry = true; - bch_info(c, "retrying read"); - ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ); + ret = bch2_bkey_pick_read_device(c, + bkey_i_to_s_c(&b->key), + &failed, &rb->pick, -1); + if (ret) { + set_btree_node_read_error(b); + break; + } + + ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); rb->have_ioref = ca != NULL; rb->start_time = local_clock(); bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META); @@ -1346,59 +1388,59 @@ static void btree_node_read_work(struct work_struct *work) bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, rb->start_time, !bio->bi_status); start: - printbuf_reset(&buf); - bch2_btree_pos_to_text(&buf, c, b); - - if (ca && bio->bi_status) - bch_err_dev_ratelimited(ca, - "btree read error %s for %s", - bch2_blk_status_to_str(bio->bi_status), buf.buf); if (rb->have_ioref) - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_read); rb->have_ioref = false; - bch2_mark_io_failure(&failed, &rb->pick, false); - - can_retry = bch2_bkey_pick_read_device(c, - bkey_i_to_s_c(&b->key), - &failed, &rb->pick, -1) > 0; - - if (!bio->bi_status && - !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) { - if (retry) - bch_info(c, "retry success"); - break; + if (bio->bi_status) { + bch2_mark_io_failure(&failed, &rb->pick, false); + continue; } - saw_error = true; + ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf); + if (ret == -BCH_ERR_btree_node_read_err_want_retry || + ret == -BCH_ERR_btree_node_read_err_must_retry) + continue; - if (!can_retry) { + if (ret) set_btree_node_read_error(b); - bch2_btree_lost_data(c, b->c.btree_id); - break; - } + + break; } - bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], - rb->start_time); - bio_put(&rb->bio); + bch2_io_failures_to_text(&buf, c, &failed); + + if (btree_node_read_error(b)) + bch2_btree_lost_data(c, &buf, b->c.btree_id); + + /* + * only print retry success if we read from a replica with no errors + */ + if (btree_node_read_error(b)) + prt_printf(&buf, "ret %s", bch2_err_str(ret)); + else if (failed.nr) { + if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev)) + prt_printf(&buf, "retry success"); + else + prt_printf(&buf, "repair success"); + } - if ((saw_error || + if ((failed.nr || btree_node_need_rewrite(b)) && !btree_node_read_error(b) && - c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) { - if (saw_error) { - printbuf_reset(&buf); - bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); - prt_str(&buf, " "); - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); - bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n %s", - __func__, buf.buf); - } - + c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) { + prt_printf(&buf, " (rewriting node)"); bch2_btree_node_rewrite_async(c, b); } + prt_newline(&buf); + + if (failed.nr) + bch2_print_str_ratelimited(c, KERN_ERR, buf.buf); + async_object_list_del(c, btree_read_bio, rb->list_idx); + bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], + rb->start_time); + bio_put(&rb->bio); printbuf_exit(&buf); clear_btree_node_read_in_flight(b); smp_mb__after_atomic(); @@ -1419,6 +1461,11 @@ static void btree_node_read_endio(struct bio *bio) queue_work(c->btree_read_complete_wq, &rb->work); } +void bch2_btree_read_bio_to_text(struct printbuf *out, struct btree_read_bio *rbio) +{ + bch2_bio_to_text(out, &rbio->bio); +} + struct btree_node_read_all { struct closure cl; struct bch_fs *c; @@ -1478,12 +1525,13 @@ static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) struct btree *b = ra->b; struct printbuf buf = PRINTBUF; bool dump_bset_maps = false; - bool have_retry = false; int ret = 0, best = -1, write = READ; unsigned i, written = 0, written2 = 0; __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; bool _saw_error = false, *saw_error = &_saw_error; + struct printbuf *err_msg = NULL; + struct bch_io_failures *failed = NULL; for (i = 0; i < ra->nr; i++) { struct btree_node *bn = ra->buf[i]; @@ -1576,14 +1624,19 @@ fsck_err: if (best >= 0) { memcpy(b->data, ra->buf[best], btree_buf_bytes(b)); - ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error); + ret = bch2_btree_node_read_done(c, NULL, b, NULL, NULL); } else { ret = -1; } if (ret) { set_btree_node_read_error(b); - bch2_btree_lost_data(c, b->c.btree_id); + + struct printbuf buf = PRINTBUF; + bch2_btree_lost_data(c, &buf, b->c.btree_id); + if (buf.pos) + bch_err(c, "%s", buf.buf); + printbuf_exit(&buf); } else if (*saw_error) bch2_btree_node_rewrite_async(c, b); @@ -1612,7 +1665,8 @@ static void btree_node_read_all_replicas_endio(struct bio *bio) struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev); bch2_latency_acct(ca, rb->start_time, READ); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_btree_node_read_all_replicas); } ra->err[rb->idx] = bio->bi_status; @@ -1652,7 +1706,8 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool i = 0; bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) { - struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); + struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, + BCH_DEV_READ_REF_btree_node_read_all_replicas); struct btree_read_bio *rb = container_of(ra->bio[i], struct btree_read_bio, bio); rb->c = c; @@ -1703,7 +1758,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, trace_and_count(c, btree_node_read, trans, b); - if (bch2_verify_all_btree_replicas && + if (static_branch_unlikely(&bch2_verify_all_btree_replicas) && !btree_node_read_all_replicas(c, b, sync)) return; @@ -1711,26 +1766,34 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, NULL, &pick, -1); if (ret <= 0) { + bool ratelimit = true; struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); prt_str(&buf, "btree node read error: no device to read from\n at "); bch2_btree_pos_to_text(&buf, c, b); - bch_err_ratelimited(c, "%s", buf.buf); - - if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) - bch2_fatal_error(c); + prt_newline(&buf); + bch2_btree_lost_data(c, &buf, b->c.btree_id); + + if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && + bch2_fs_emergency_read_only2(c, &buf)) + ratelimit = false; + + static DEFINE_RATELIMIT_STATE(rs, + DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + if (!ratelimit || __ratelimit(&rs)) + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); set_btree_node_read_error(b); - bch2_btree_lost_data(c, b->c.btree_id); clear_btree_node_read_in_flight(b); smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); - printbuf_exit(&buf); return; } - ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); + ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); bio = bio_alloc_bioset(NULL, buf_pages(b->data, btree_buf_bytes(b)), @@ -1749,6 +1812,8 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, bio->bi_end_io = btree_node_read_endio; bch2_bio_map(bio, b->data, btree_buf_bytes(b)); + async_object_list_add(c, btree_read_bio, rb, &rb->list_idx); + if (rb->have_ioref) { this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], bio_sectors(bio)); @@ -1922,7 +1987,7 @@ static void btree_node_scrub_work(struct work_struct *work) bch_err(c, "error validating btree node during scrub on %s at btree %s", scrub->ca->name, err.buf); - ret = bch2_btree_node_rewrite(trans, &iter, b, 0); + ret = bch2_btree_node_rewrite(trans, &iter, b, 0, 0); } err: bch2_trans_iter_exit(trans, &iter); @@ -1933,9 +1998,9 @@ err: printbuf_exit(&err); bch2_bkey_buf_exit(&scrub->key, c);; btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf); - percpu_ref_put(&scrub->ca->io_ref[READ]); + enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); kfree(scrub); - bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub); } static void btree_node_scrub_endio(struct bio *bio) @@ -1954,7 +2019,7 @@ int bch2_btree_node_scrub(struct btree_trans *trans, struct bch_fs *c = trans->c; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_node_scrub)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_node_scrub)) return -BCH_ERR_erofs_no_writes; struct extent_ptr_decoded pick; @@ -1962,7 +2027,8 @@ int bch2_btree_node_scrub(struct btree_trans *trans, if (ret <= 0) goto err; - struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); + struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, + BCH_DEV_READ_REF_btree_node_scrub); if (!ca) { ret = -BCH_ERR_device_offline; goto err; @@ -2002,9 +2068,9 @@ int bch2_btree_node_scrub(struct btree_trans *trans, return 0; err_free: btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); err: - bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub); return ret; } @@ -2121,6 +2187,7 @@ static void btree_node_write_work(struct work_struct *work) goto err; } out: + async_object_list_del(c, btree_write_bio, wbio->list_idx); bio_put(&wbio->wbio.bio); btree_node_write_done(c, b, start_time); return; @@ -2172,7 +2239,8 @@ static void btree_node_write_endio(struct bio *bio) * btree writes yet (due to device removal/ro): */ if (wbio->have_ioref) - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_btree_node_write); if (parent) { bio_put(bio); @@ -2184,14 +2252,12 @@ static void btree_node_write_endio(struct bio *bio) smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); INIT_WORK(&wb->work, btree_node_write_work); - queue_work(c->btree_io_complete_wq, &wb->work); + queue_work(c->btree_write_complete_wq, &wb->work); } static int validate_bset_for_write(struct bch_fs *c, struct btree *b, struct bset *i, unsigned sectors) { - bool saw_error; - int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key), (struct bkey_validate_context) { .from = BKEY_VALIDATE_btree_node, @@ -2204,8 +2270,8 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, return ret; } - ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?: - validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error); + ret = validate_bset_keys(c, b, i, WRITE, NULL, NULL) ?: + validate_bset(c, NULL, b, i, b->written, sectors, WRITE, NULL, NULL); if (ret) { bch2_inconsistent_error(c); dump_stack(); @@ -2472,6 +2538,8 @@ do_write: atomic64_inc(&c->btree_write_stats[type].nr); atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); + async_object_list_add(c, btree_write_bio, wbio, &wbio->list_idx); + INIT_WORK(&wbio->work, btree_write_submit); queue_work(c->btree_write_submit_wq, &wbio->work); return; diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index dbf76d22c660..30a5180532c8 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -41,6 +41,9 @@ struct btree_read_bio { u64 start_time; unsigned have_ioref:1; unsigned idx:7; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + unsigned list_idx; +#endif struct extent_ptr_decoded pick; struct work_struct work; struct bio bio; @@ -53,6 +56,9 @@ struct btree_write_bio { unsigned data_bytes; unsigned sector_offset; u64 start_time; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + unsigned list_idx; +#endif struct bch_write_bio wbio; }; @@ -128,11 +134,15 @@ void bch2_btree_build_aux_trees(struct btree *); void bch2_btree_init_next(struct btree_trans *, struct btree *); int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *, - struct btree *, bool, bool *); + struct btree *, + struct bch_io_failures *, + struct printbuf *); void bch2_btree_node_read(struct btree_trans *, struct btree *, bool); int bch2_btree_root_read(struct bch_fs *, enum btree_id, const struct bkey_i *, unsigned); +void bch2_btree_read_bio_to_text(struct printbuf *, struct btree_read_bio *); + int bch2_btree_node_scrub(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, unsigned); diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index a873ec1baf58..b4bf4217a3fa 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -16,6 +16,7 @@ #include "journal_io.h" #include "replicas.h" #include "snapshot.h" +#include "super.h" #include "trace.h" #include <linux/random.h> @@ -114,11 +115,9 @@ static inline bool btree_path_pos_in_node(struct btree_path *path, !btree_path_pos_after_node(path, b); } -/* Btree iterator: */ +/* Debug: */ -#ifdef CONFIG_BCACHEFS_DEBUG - -static void bch2_btree_path_verify_cached(struct btree_trans *trans, +static void __bch2_btree_path_verify_cached(struct btree_trans *trans, struct btree_path *path) { struct bkey_cached *ck; @@ -135,7 +134,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans, btree_node_unlock(trans, path, 0); } -static void bch2_btree_path_verify_level(struct btree_trans *trans, +static void __bch2_btree_path_verify_level(struct btree_trans *trans, struct btree_path *path, unsigned level) { struct btree_path_level *l; @@ -147,16 +146,13 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans, struct printbuf buf3 = PRINTBUF; const char *msg; - if (!bch2_debug_check_iterators) - return; - l = &path->l[level]; tmp = l->iter; locked = btree_node_locked(path, level); if (path->cached) { if (!level) - bch2_btree_path_verify_cached(trans, path); + __bch2_btree_path_verify_cached(trans, path); return; } @@ -217,7 +213,7 @@ err: msg, level, buf1.buf, buf2.buf, buf3.buf); } -static void bch2_btree_path_verify(struct btree_trans *trans, +static void __bch2_btree_path_verify(struct btree_trans *trans, struct btree_path *path) { struct bch_fs *c = trans->c; @@ -229,22 +225,22 @@ static void bch2_btree_path_verify(struct btree_trans *trans, break; } - bch2_btree_path_verify_level(trans, path, i); + __bch2_btree_path_verify_level(trans, path, i); } - bch2_btree_path_verify_locks(path); + bch2_btree_path_verify_locks(trans, path); } -void bch2_trans_verify_paths(struct btree_trans *trans) +void __bch2_trans_verify_paths(struct btree_trans *trans) { struct btree_path *path; unsigned iter; trans_for_each_path(trans, path, iter) - bch2_btree_path_verify(trans, path); + __bch2_btree_path_verify(trans, path); } -static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter) +static void __bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter) { BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached); @@ -256,11 +252,11 @@ static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter !btree_type_has_snapshot_field(iter->btree_id)); if (iter->update_path) - bch2_btree_path_verify(trans, &trans->paths[iter->update_path]); - bch2_btree_path_verify(trans, btree_iter_path(trans, iter)); + __bch2_btree_path_verify(trans, &trans->paths[iter->update_path]); + __bch2_btree_path_verify(trans, btree_iter_path(trans, iter)); } -static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) +static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) { BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && !iter->pos.snapshot); @@ -274,16 +270,13 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) bkey_gt(iter->pos, iter->k.p))); } -static int bch2_btree_iter_verify_ret(struct btree_trans *trans, - struct btree_iter *iter, struct bkey_s_c k) +static int __bch2_btree_iter_verify_ret(struct btree_trans *trans, + struct btree_iter *iter, struct bkey_s_c k) { struct btree_iter copy; struct bkey_s_c prev; int ret = 0; - if (!bch2_debug_check_iterators) - return 0; - if (!(iter->flags & BTREE_ITER_filter_snapshots)) return 0; @@ -324,7 +317,7 @@ out: return ret; } -void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, +void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, struct bpos pos) { bch2_trans_verify_not_unlocked_or_in_restart(trans); @@ -357,19 +350,40 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf); } -#else - static inline void bch2_btree_path_verify_level(struct btree_trans *trans, - struct btree_path *path, unsigned l) {} + struct btree_path *path, unsigned l) +{ + if (static_branch_unlikely(&bch2_debug_check_iterators)) + __bch2_btree_path_verify_level(trans, path, l); +} + static inline void bch2_btree_path_verify(struct btree_trans *trans, - struct btree_path *path) {} + struct btree_path *path) +{ + if (static_branch_unlikely(&bch2_debug_check_iterators)) + __bch2_btree_path_verify(trans, path); +} + static inline void bch2_btree_iter_verify(struct btree_trans *trans, - struct btree_iter *iter) {} -static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {} -static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_s_c k) { return 0; } + struct btree_iter *iter) +{ + if (static_branch_unlikely(&bch2_debug_check_iterators)) + __bch2_btree_iter_verify(trans, iter); +} -#endif +static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) +{ + if (static_branch_unlikely(&bch2_debug_check_iterators)) + __bch2_btree_iter_verify_entry_exit(iter); +} + +static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter, + struct bkey_s_c k) +{ + return static_branch_unlikely(&bch2_debug_check_iterators) + ? __bch2_btree_iter_verify_ret(trans, iter, k) + : 0; +} /* Btree path: fixups after btree updates */ @@ -523,7 +537,7 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans, __bch2_btree_node_iter_fix(path, b, node_iter, t, where, clobber_u64s, new_u64s); - if (bch2_debug_check_iterators) + if (static_branch_unlikely(&bch2_debug_check_iterators)) bch2_btree_node_iter_verify(node_iter, b); } @@ -977,7 +991,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans, path->level = level; bch2_btree_path_level_init(trans, path, b); - bch2_btree_path_verify_locks(path); + bch2_btree_path_verify_locks(trans, path); err: bch2_bkey_buf_exit(&tmp, c); return ret; @@ -1089,7 +1103,7 @@ static void btree_path_set_level_down(struct btree_trans *trans, if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) btree_node_unlock(trans, path, l); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); bch2_btree_path_verify(trans, path); } @@ -1162,7 +1176,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans, } if (path->cached) { - ret = bch2_btree_path_traverse_cached(trans, path, flags); + ret = bch2_btree_path_traverse_cached(trans, path_idx, flags); goto out; } @@ -1287,7 +1301,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans, if (unlikely(path->cached)) { btree_node_unlock(trans, path, 0); path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); goto out; } @@ -1316,7 +1330,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans, } if (unlikely(level != path->level)) { - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); __bch2_btree_path_unlock(trans, path); } out: @@ -1385,45 +1399,45 @@ static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_p void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent) { - struct btree_path *path = trans->paths + path_idx, *dup; + struct btree_path *path = trans->paths + path_idx, *dup = NULL; if (!__btree_path_put(trans, path, intent)) return; + if (!path->preserve && !path->should_be_locked) + goto free; + dup = path->preserve ? have_path_at_pos(trans, path) : have_node_at_pos(trans, path); - - trace_btree_path_free(trans, path_idx, dup); - - if (!dup && !(!path->preserve && !is_btree_node(path, path->level))) + if (!dup) return; - if (path->should_be_locked && !trans->restarted) { - if (!dup) - return; - + /* + * If we need this path locked, the duplicate also has te be locked + * before we free this one: + */ + if (path->should_be_locked && + !dup->should_be_locked && + !trans->restarted) { if (!(trans->locked ? bch2_btree_path_relock_norestart(trans, dup) : bch2_btree_path_can_relock(trans, dup))) return; - } - if (dup) { - dup->preserve |= path->preserve; - dup->should_be_locked |= path->should_be_locked; + dup->should_be_locked = true; } - __bch2_path_free(trans, path_idx); -} - -static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path, - bool intent) -{ - if (!__btree_path_put(trans, trans->paths + path, intent)) - return; + BUG_ON(path->should_be_locked && + !trans->restarted && + trans->locked && + !btree_node_locked(dup, dup->level)); - __bch2_path_free(trans, path); + path->should_be_locked = false; + dup->preserve |= path->preserve; +free: + trace_btree_path_free(trans, path_idx, dup); + __bch2_path_free(trans, path_idx); } void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count) @@ -1485,7 +1499,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans) prt_newline(buf); } - for (struct jset_entry *e = trans->journal_entries; + for (struct jset_entry *e = btree_trans_journal_entries_start(trans); e != btree_trans_journal_entries_top(trans); e = vstruct_next(e)) { bch2_journal_entry_to_text(buf, trans->c, e); @@ -1591,7 +1605,7 @@ void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort) __bch2_trans_paths_to_text(&buf, trans, nosort); bch2_trans_updates_to_text(&buf, trans); - bch2_print_str(trans->c, buf.buf); + bch2_print_str(trans->c, KERN_ERR, buf.buf); printbuf_exit(&buf); } @@ -1735,6 +1749,10 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, btree_trans_sort_paths(trans); + if (intent) + locks_want = max(locks_want, level + 1); + locks_want = min(locks_want, BTREE_MAX_DEPTH); + trans_for_each_path_inorder(trans, path, iter) { if (__btree_path_cmp(path, btree_id, @@ -1749,7 +1767,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, if (path_pos && trans->paths[path_pos].cached == cached && trans->paths[path_pos].btree_id == btree_id && - trans->paths[path_pos].level == level) { + trans->paths[path_pos].level == level && + bch2_btree_path_upgrade_norestart(trans, trans->paths + path_pos, locks_want)) { trace_btree_path_get(trans, trans->paths + path_pos, &pos); __btree_path_get(trans, trans->paths + path_pos, intent); @@ -1781,9 +1800,6 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, if (!(flags & BTREE_ITER_nopreserve)) path->preserve = true; - if (path->intent_ref) - locks_want = max(locks_want, level + 1); - /* * If the path has locks_want greater than requested, we don't downgrade * it here - on transaction restart because btree node split needs to @@ -1792,10 +1808,6 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, * a successful transaction commit. */ - locks_want = min(locks_want, BTREE_MAX_DEPTH); - if (locks_want > path->locks_want) - bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL); - return path_idx; } @@ -1967,6 +1979,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_ /* got to end? */ if (!btree_path_node(path, path->level + 1)) { + path->should_be_locked = false; btree_path_set_level_up(trans, path); return NULL; } @@ -1978,12 +1991,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_ bch2_btree_path_downgrade(trans, path); if (!bch2_btree_node_relock(trans, path, path->level + 1)) { + trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path); + ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); __bch2_btree_path_unlock(trans, path); path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); - trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path); - ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); goto err; } @@ -2344,8 +2357,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree } if (iter->update_path) { - bch2_path_put_nokeep(trans, iter->update_path, - iter->flags & BTREE_ITER_intent); + bch2_path_put(trans, iter->update_path, iter->flags & BTREE_ITER_intent); iter->update_path = 0; } @@ -2374,8 +2386,8 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree if (iter->update_path && !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) { - bch2_path_put_nokeep(trans, iter->update_path, - iter->flags & BTREE_ITER_intent); + bch2_path_put(trans, iter->update_path, + iter->flags & BTREE_ITER_intent); iter->update_path = 0; } @@ -2634,7 +2646,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct * the last possible snapshot overwrite, return * it: */ - bch2_path_put_nokeep(trans, iter->path, + bch2_path_put(trans, iter->path, iter->flags & BTREE_ITER_intent); iter->path = saved_path; saved_path = 0; @@ -2664,8 +2676,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct * our previous saved candidate: */ if (saved_path) { - bch2_path_put_nokeep(trans, saved_path, - iter->flags & BTREE_ITER_intent); + bch2_path_put(trans, saved_path, + iter->flags & BTREE_ITER_intent); saved_path = 0; } @@ -2708,7 +2720,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct iter->pos.snapshot = iter->snapshot; out_no_locked: if (saved_path) - bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent); + bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_intent); bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify(trans, iter); @@ -2929,7 +2941,7 @@ static void btree_trans_verify_sorted(struct btree_trans *trans) struct btree_path *path, *prev = NULL; struct trans_for_each_path_inorder_iter iter; - if (!bch2_debug_check_iterators) + if (!static_branch_unlikely(&bch2_debug_check_iterators)) return; trans_for_each_path_inorder(trans, path, iter) { @@ -3031,7 +3043,7 @@ static inline void btree_path_list_add(struct btree_trans *trans, void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) { if (iter->update_path) - bch2_path_put_nokeep(trans, iter->update_path, + bch2_path_put(trans, iter->update_path, iter->flags & BTREE_ITER_intent); if (iter->path) bch2_path_put(trans, iter->path, @@ -3095,7 +3107,19 @@ void bch2_trans_copy_iter(struct btree_trans *trans, dst->key_cache_path = 0; } -void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE +void bch2_trans_kmalloc_trace_to_text(struct printbuf *out, + darray_trans_kmalloc_trace *trace) +{ + printbuf_tabstops_reset(out); + printbuf_tabstop_push(out, 60); + + darray_for_each(*trace, i) + prt_printf(out, "%pS\t%zu\n", (void *) i->ip, i->bytes); +} +#endif + +void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long ip) { struct bch_fs *c = trans->c; unsigned new_top = trans->mem_top + size; @@ -3105,14 +3129,35 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) void *new_mem; void *p; - WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX); + if (WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX)) { +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + struct printbuf buf = PRINTBUF; + bch2_trans_kmalloc_trace_to_text(&buf, &trans->trans_kmalloc_trace); + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); +#endif + } ret = trans_maybe_inject_restart(trans, _RET_IP_); if (ret) return ERR_PTR(ret); struct btree_transaction_stats *s = btree_trans_stats(trans); - s->max_mem = max(s->max_mem, new_bytes); + if (new_bytes > s->max_mem) { + mutex_lock(&s->lock); +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + darray_resize(&s->trans_kmalloc_trace, trans->trans_kmalloc_trace.nr); + s->trans_kmalloc_trace.nr = min(s->trans_kmalloc_trace.size, + trans->trans_kmalloc_trace.nr); + + memcpy(s->trans_kmalloc_trace.data, + trans->trans_kmalloc_trace.data, + sizeof(s->trans_kmalloc_trace.data[0]) * + s->trans_kmalloc_trace.nr); +#endif + s->max_mem = new_bytes; + mutex_unlock(&s->lock); + } if (trans->used_mempool) { if (trans->mem_bytes >= new_bytes) @@ -3172,6 +3217,8 @@ out_new_mem: BCH_ERR_transaction_restart_mem_realloced, _RET_IP_)); } out_change_top: + bch2_trans_kmalloc_trace(trans, size, ip); + p = trans->mem + trans->mem_top; trans->mem_top += size; memset(p, 0, size); @@ -3231,7 +3278,6 @@ u32 bch2_trans_begin(struct btree_trans *trans) trans->restart_count++; trans->mem_top = 0; - trans->journal_entries = NULL; trans_for_each_path(trans, path, i) { path->should_be_locked = false; @@ -3285,6 +3331,10 @@ u32 bch2_trans_begin(struct btree_trans *trans) } #endif +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + trans->trans_kmalloc_trace.nr = 0; +#endif + trans_set_locked(trans, false); if (trans->restarted) { @@ -3385,7 +3435,6 @@ got_trans: } trans->nr_paths_max = s->nr_max_paths; - trans->journal_entries_size = s->journal_entries_size; } trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); @@ -3397,28 +3446,44 @@ got_trans: return trans; } -static void check_btree_paths_leaked(struct btree_trans *trans) -{ #ifdef CONFIG_BCACHEFS_DEBUG - struct bch_fs *c = trans->c; + +static bool btree_paths_leaked(struct btree_trans *trans) +{ struct btree_path *path; unsigned i; trans_for_each_path(trans, path, i) if (path->ref) - goto leaked; - return; -leaked: - bch_err(c, "btree paths leaked from %s!", trans->fn); - trans_for_each_path(trans, path, i) - if (path->ref) - printk(KERN_ERR " btree %s %pS\n", - bch2_btree_id_str(path->btree_id), - (void *) path->ip_allocated); - /* Be noisy about this: */ - bch2_fatal_error(c); -#endif + return true; + return false; +} + +static void check_btree_paths_leaked(struct btree_trans *trans) +{ + if (btree_paths_leaked(trans)) { + struct bch_fs *c = trans->c; + struct btree_path *path; + unsigned i; + + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "btree paths leaked from %s!\n", trans->fn); + trans_for_each_path(trans, path, i) + if (path->ref) + prt_printf(&buf, "btree %s %pS\n", + bch2_btree_id_str(path->btree_id), + (void *) path->ip_allocated); + + bch2_fs_emergency_read_only2(c, &buf); + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + } } +#else +static inline void check_btree_paths_leaked(struct btree_trans *trans) {} +#endif void bch2_trans_put(struct btree_trans *trans) __releases(&c->btree_trans_barrier) @@ -3454,6 +3519,9 @@ void bch2_trans_put(struct btree_trans *trans) #ifdef CONFIG_BCACHEFS_DEBUG darray_exit(&trans->last_restarted_trace); #endif +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + darray_exit(&trans->trans_kmalloc_trace); +#endif unsigned long *paths_allocated = trans->paths_allocated; trans->paths_allocated = NULL; @@ -3608,6 +3676,9 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c) for (s = c->btree_transaction_stats; s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); s++) { +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + darray_exit(&s->trans_kmalloc_trace); +#endif kfree(s->max_paths_text); bch2_time_stats_exit(&s->lock_hold_times); } diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 9d2cccf5d21a..2cabb5f0f484 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -46,9 +46,11 @@ static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path return --path->ref == 0; } -static inline void btree_path_set_dirty(struct btree_path *path, +static inline void btree_path_set_dirty(struct btree_trans *trans, + struct btree_path *path, enum btree_path_uptodate u) { + BUG_ON(path->should_be_locked && trans->locked && !trans->restarted); path->uptodate = max_t(unsigned, path->uptodate, u); } @@ -285,14 +287,23 @@ static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex : __bch2_trans_mutex_lock(trans, lock); } -#ifdef CONFIG_BCACHEFS_DEBUG -void bch2_trans_verify_paths(struct btree_trans *); -void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos); -#else -static inline void bch2_trans_verify_paths(struct btree_trans *trans) {} -static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, - struct bpos pos) {} -#endif +/* Debug: */ + +void __bch2_trans_verify_paths(struct btree_trans *); +void __bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos); + +static inline void bch2_trans_verify_paths(struct btree_trans *trans) +{ + if (static_branch_unlikely(&bch2_debug_check_iterators)) + __bch2_trans_verify_paths(trans); +} + +static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id btree, + struct bpos pos) +{ + if (static_branch_unlikely(&bch2_debug_check_iterators)) + __bch2_assert_pos_locked(trans, btree, pos); +} void bch2_btree_path_fix_key_modified(struct btree_trans *trans, struct btree *, struct bkey_packed *); @@ -543,43 +554,73 @@ void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btre void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *); -void *__bch2_trans_kmalloc(struct btree_trans *, size_t); +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE +void bch2_trans_kmalloc_trace_to_text(struct printbuf *, + darray_trans_kmalloc_trace *); +#endif + +void *__bch2_trans_kmalloc(struct btree_trans *, size_t, unsigned long); -/** - * bch2_trans_kmalloc - allocate memory for use by the current transaction - * - * Must be called after bch2_trans_begin, which on second and further calls - * frees all memory allocated in this transaction - */ -static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) +static inline void bch2_trans_kmalloc_trace(struct btree_trans *trans, size_t size, + unsigned long ip) +{ +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + darray_push(&trans->trans_kmalloc_trace, + ((struct trans_kmalloc_trace) { .ip = ip, .bytes = size })); +#endif +} + +static __always_inline void *bch2_trans_kmalloc_nomemzero_ip(struct btree_trans *trans, size_t size, + unsigned long ip) { size = roundup(size, 8); + bch2_trans_kmalloc_trace(trans, size, ip); + if (likely(trans->mem_top + size <= trans->mem_bytes)) { void *p = trans->mem + trans->mem_top; trans->mem_top += size; - memset(p, 0, size); return p; } else { - return __bch2_trans_kmalloc(trans, size); + return __bch2_trans_kmalloc(trans, size, ip); } } -static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size) +static __always_inline void *bch2_trans_kmalloc_ip(struct btree_trans *trans, size_t size, + unsigned long ip) { - size = round_up(size, 8); + size = roundup(size, 8); + + bch2_trans_kmalloc_trace(trans, size, ip); if (likely(trans->mem_top + size <= trans->mem_bytes)) { void *p = trans->mem + trans->mem_top; trans->mem_top += size; + memset(p, 0, size); return p; } else { - return __bch2_trans_kmalloc(trans, size); + return __bch2_trans_kmalloc(trans, size, ip); } } +/** + * bch2_trans_kmalloc - allocate memory for use by the current transaction + * + * Must be called after bch2_trans_begin, which on second and further calls + * frees all memory allocated in this transaction + */ +static __always_inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) +{ + return bch2_trans_kmalloc_ip(trans, size, _THIS_IP_); +} + +static __always_inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size) +{ + return bch2_trans_kmalloc_nomemzero_ip(trans, size, _THIS_IP_); +} + static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 2b186584a291..9da950e7eb7d 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -101,8 +101,8 @@ static void __bkey_cached_free(struct rcu_pending *pending, struct rcu_head *rcu kmem_cache_free(bch2_key_cache, ck); } -static void bkey_cached_free(struct btree_key_cache *bc, - struct bkey_cached *ck) +static inline void bkey_cached_free_noassert(struct btree_key_cache *bc, + struct bkey_cached *ck) { kfree(ck->k); ck->k = NULL; @@ -116,6 +116,19 @@ static void bkey_cached_free(struct btree_key_cache *bc, this_cpu_inc(*bc->nr_pending); } +static void bkey_cached_free(struct btree_trans *trans, + struct btree_key_cache *bc, + struct bkey_cached *ck) +{ + /* + * we'll hit strange issues in the SRCU code if we aren't holding an + * SRCU read lock... + */ + EBUG_ON(!trans->srcu_held); + + bkey_cached_free_noassert(bc, ck); +} + static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp) { gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE; @@ -281,7 +294,7 @@ static int btree_key_cache_create(struct btree_trans *trans, ck_path->uptodate = BTREE_ITER_UPTODATE; return 0; err: - bkey_cached_free(bc, ck); + bkey_cached_free(trans, bc, ck); mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED); return ret; @@ -301,9 +314,11 @@ static noinline_for_stack void do_trace_key_cache_fill(struct btree_trans *trans } static noinline int btree_key_cache_fill(struct btree_trans *trans, - struct btree_path *ck_path, + btree_path_idx_t ck_path_idx, unsigned flags) { + struct btree_path *ck_path = trans->paths + ck_path_idx; + if (flags & BTREE_ITER_cached_nofill) { ck_path->l[0].b = NULL; return 0; @@ -325,6 +340,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, goto err; /* Recheck after btree lookup, before allocating: */ + ck_path = trans->paths + ck_path_idx; ret = bch2_btree_key_cache_find(c, ck_path->btree_id, ck_path->pos) ? -EEXIST : 0; if (unlikely(ret)) goto out; @@ -344,10 +360,11 @@ err: } static inline int btree_path_traverse_cached_fast(struct btree_trans *trans, - struct btree_path *path) + btree_path_idx_t path_idx) { struct bch_fs *c = trans->c; struct bkey_cached *ck; + struct btree_path *path = trans->paths + path_idx; retry: ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); if (!ck) @@ -373,27 +390,32 @@ retry: return 0; } -int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path, +int bch2_btree_path_traverse_cached(struct btree_trans *trans, + btree_path_idx_t path_idx, unsigned flags) { - EBUG_ON(path->level); - - path->l[1].b = NULL; + EBUG_ON(trans->paths[path_idx].level); int ret; do { - ret = btree_path_traverse_cached_fast(trans, path); + ret = btree_path_traverse_cached_fast(trans, path_idx); if (unlikely(ret == -ENOENT)) - ret = btree_key_cache_fill(trans, path, flags); + ret = btree_key_cache_fill(trans, path_idx, flags); } while (ret == -EEXIST); + struct btree_path *path = trans->paths + path_idx; + if (unlikely(ret)) { path->uptodate = BTREE_ITER_NEED_TRAVERSE; if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) { btree_node_unlock(trans, path, 0); path->l[0].b = ERR_PTR(ret); } + } else { + BUG_ON(path->uptodate); + BUG_ON(!path->nodes_locked); } + return ret; } @@ -502,7 +524,7 @@ evict: mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED); if (bkey_cached_evict(&c->btree_key_cache, ck)) { - bkey_cached_free(&c->btree_key_cache, ck); + bkey_cached_free(trans, &c->btree_key_cache, ck); } else { six_unlock_write(&ck->c.lock); six_unlock_intent(&ck->c.lock); @@ -616,7 +638,7 @@ void bch2_btree_key_cache_drop(struct btree_trans *trans, } bkey_cached_evict(bc, ck); - bkey_cached_free(bc, ck); + bkey_cached_free(trans, bc, ck); mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED); @@ -624,10 +646,17 @@ void bch2_btree_key_cache_drop(struct btree_trans *trans, unsigned i; trans_for_each_path(trans, path2, i) if (path2->l[0].b == (void *) ck) { + /* + * It's safe to clear should_be_locked here because + * we're evicting from the key cache, and we still have + * the underlying btree locked: filling into the key + * cache would require taking a write lock on the btree + * node + */ + path2->should_be_locked = false; __bch2_btree_path_unlock(trans, path2); path2->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_drop); - path2->should_be_locked = false; - btree_path_set_dirty(path2, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path2, BTREE_ITER_NEED_TRAVERSE); } bch2_trans_verify_locks(trans); @@ -684,7 +713,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, } else if (!bkey_cached_lock_for_evict(ck)) { bc->skipped_lock_fail++; } else if (bkey_cached_evict(bc, ck)) { - bkey_cached_free(bc, ck); + bkey_cached_free_noassert(bc, ck); bc->freed++; freed++; } else { diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h index 51d6289b8dee..82d8c72512a9 100644 --- a/fs/bcachefs/btree_key_cache.h +++ b/fs/bcachefs/btree_key_cache.h @@ -40,8 +40,7 @@ int bch2_btree_key_cache_journal_flush(struct journal *, struct bkey_cached * bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos); -int bch2_btree_path_traverse_cached(struct btree_trans *, struct btree_path *, - unsigned); +int bch2_btree_path_traverse_cached(struct btree_trans *, btree_path_idx_t, unsigned); bool bch2_btree_insert_key_cached(struct btree_trans *, unsigned, struct btree_insert_entry *); diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index 94eb2b73a843..2f2aed0c9916 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "btree_cache.h" #include "btree_locking.h" #include "btree_types.h" @@ -236,7 +237,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle, prt_newline(&buf); } - bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf); + bch2_print_str_nonblocking(g->g->trans->c, KERN_ERR, buf.buf); printbuf_exit(&buf); BUG(); } @@ -450,13 +451,13 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans, /* relock */ -static inline bool btree_path_get_locks(struct btree_trans *trans, - struct btree_path *path, - bool upgrade, - struct get_locks_fail *f) +static int btree_path_get_locks(struct btree_trans *trans, + struct btree_path *path, + bool upgrade, + struct get_locks_fail *f, + int restart_err) { unsigned l = path->level; - int fail_idx = -1; do { if (!btree_path_node(path, l)) @@ -464,39 +465,49 @@ static inline bool btree_path_get_locks(struct btree_trans *trans, if (!(upgrade ? bch2_btree_node_upgrade(trans, path, l) - : bch2_btree_node_relock(trans, path, l))) { - fail_idx = l; - - if (f) { - f->l = l; - f->b = path->l[l].b; - } - } + : bch2_btree_node_relock(trans, path, l))) + goto err; l++; } while (l < path->locks_want); + if (path->uptodate == BTREE_ITER_NEED_RELOCK) + path->uptodate = BTREE_ITER_UPTODATE; + + return path->uptodate < BTREE_ITER_NEED_RELOCK ? 0 : -1; +err: + if (f) { + f->l = l; + f->b = path->l[l].b; + } + + /* + * Do transaction restart before unlocking, so we don't pop + * should_be_locked asserts + */ + if (restart_err) { + btree_trans_restart(trans, restart_err); + } else if (path->should_be_locked && !trans->restarted) { + if (upgrade) + path->locks_want = l; + return -1; + } + + __bch2_btree_path_unlock(trans, path); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); + /* * When we fail to get a lock, we have to ensure that any child nodes * can't be relocked so bch2_btree_path_traverse has to walk back up to * the node that we failed to relock: */ - if (fail_idx >= 0) { - __bch2_btree_path_unlock(trans, path); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); - - do { - path->l[fail_idx].b = upgrade - ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade) - : ERR_PTR(-BCH_ERR_no_btree_node_relock); - --fail_idx; - } while (fail_idx >= 0); - } - - if (path->uptodate == BTREE_ITER_NEED_RELOCK) - path->uptodate = BTREE_ITER_UPTODATE; + do { + path->l[l].b = upgrade + ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade) + : ERR_PTR(-BCH_ERR_no_btree_node_relock); + } while (l--); - return path->uptodate < BTREE_ITER_NEED_RELOCK; + return -restart_err ?: -1; } bool __bch2_btree_node_relock(struct btree_trans *trans, @@ -583,7 +594,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans, l++) { if (!bch2_btree_node_relock(trans, path, l)) { __bch2_btree_path_unlock(trans, path); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent); } @@ -595,9 +606,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans, __flatten bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path) { - struct get_locks_fail f; - - bool ret = btree_path_get_locks(trans, path, false, &f); + bool ret = !btree_path_get_locks(trans, path, false, NULL, 0); bch2_trans_verify_locks(trans); return ret; } @@ -613,27 +622,37 @@ int __bch2_btree_path_relock(struct btree_trans *trans, return 0; } -bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans, - struct btree_path *path, - unsigned new_locks_want, - struct get_locks_fail *f) +bool __bch2_btree_path_upgrade_norestart(struct btree_trans *trans, + struct btree_path *path, + unsigned new_locks_want) { - EBUG_ON(path->locks_want >= new_locks_want); - path->locks_want = new_locks_want; - bool ret = btree_path_get_locks(trans, path, true, f); - bch2_trans_verify_locks(trans); + /* + * If we need it locked, we can't touch it. Otherwise, we can return + * success - bch2_path_get() will use this path, and it'll just be + * retraversed: + */ + bool ret = !btree_path_get_locks(trans, path, true, NULL, 0) || + !path->should_be_locked; + + bch2_btree_path_verify_locks(trans, path); return ret; } -bool __bch2_btree_path_upgrade(struct btree_trans *trans, - struct btree_path *path, - unsigned new_locks_want, - struct get_locks_fail *f) +int __bch2_btree_path_upgrade(struct btree_trans *trans, + struct btree_path *path, + unsigned new_locks_want) { - bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f); - if (ret) + unsigned old_locks = path->nodes_locked; + unsigned old_locks_want = path->locks_want; + + path->locks_want = max_t(unsigned, path->locks_want, new_locks_want); + + struct get_locks_fail f = {}; + int ret = btree_path_get_locks(trans, path, true, &f, + BCH_ERR_transaction_restart_upgrade); + if (!ret) goto out; /* @@ -665,9 +684,30 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans, linked->btree_id == path->btree_id && linked->locks_want < new_locks_want) { linked->locks_want = new_locks_want; - btree_path_get_locks(trans, linked, true, NULL); + btree_path_get_locks(trans, linked, true, NULL, 0); } } + + count_event(trans->c, trans_restart_upgrade); + if (trace_trans_restart_upgrade_enabled()) { + struct printbuf buf = PRINTBUF; + + prt_printf(&buf, "%s %pS\n", trans->fn, (void *) _RET_IP_); + prt_printf(&buf, "btree %s pos\n", bch2_btree_id_str(path->btree_id)); + bch2_bpos_to_text(&buf, path->pos); + prt_printf(&buf, "locks want %u -> %u level %u\n", + old_locks_want, new_locks_want, f.l); + prt_printf(&buf, "nodes_locked %x -> %x\n", + old_locks, path->nodes_locked); + prt_printf(&buf, "node %s ", IS_ERR(f.b) ? bch2_err_str(PTR_ERR(f.b)) : + !f.b ? "(null)" : "(node)"); + prt_printf(&buf, "path seq %u node seq %u\n", + IS_ERR_OR_NULL(f.b) ? 0 : f.b->c.lock.seq, + path->l[f.l].lock_seq); + + trace_trans_restart_upgrade(trans->c, buf.buf); + printbuf_exit(&buf); + } out: bch2_trans_verify_locks(trans); return ret; @@ -699,7 +739,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans, } } - bch2_btree_path_verify_locks(path); + bch2_btree_path_verify_locks(trans, path); trace_path_downgrade(trans, _RET_IP_, path, old_locks_want); } @@ -728,7 +768,7 @@ static inline void __bch2_trans_unlock(struct btree_trans *trans) __bch2_btree_path_unlock(trans, path); } -static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path, +static noinline __cold void bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path, struct get_locks_fail *f, bool trace) { if (!trace) @@ -738,7 +778,9 @@ static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, str struct printbuf buf = PRINTBUF; bch2_bpos_to_text(&buf, path->pos); - prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq); + prt_printf(&buf, " %s l=%u seq=%u node seq=", + bch2_btree_id_str(path->btree_id), + f->l, path->l[f->l].lock_seq); if (IS_ERR_OR_NULL(f->b)) { prt_str(&buf, bch2_err_str(PTR_ERR(f->b))); } else { @@ -760,7 +802,6 @@ static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, str out: __bch2_trans_unlock(trans); bch2_trans_verify_locks(trans); - return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); } static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace) @@ -777,10 +818,14 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace) trans_for_each_path(trans, path, i) { struct get_locks_fail f; + int ret; if (path->should_be_locked && - !btree_path_get_locks(trans, path, false, &f)) - return bch2_trans_relock_fail(trans, path, &f, trace); + (ret = btree_path_get_locks(trans, path, false, &f, + BCH_ERR_transaction_restart_relock))) { + bch2_trans_relock_fail(trans, path, &f, trace); + return ret; + } } trans_set_locked(trans, true); @@ -799,18 +844,11 @@ int bch2_trans_relock_notrace(struct btree_trans *trans) return __bch2_trans_relock(trans, false); } -void bch2_trans_unlock_noassert(struct btree_trans *trans) +void bch2_trans_unlock(struct btree_trans *trans) { - __bch2_trans_unlock(trans); - trans_set_unlocked(trans); -} -void bch2_trans_unlock(struct btree_trans *trans) -{ __bch2_trans_unlock(trans); - - trans_set_unlocked(trans); } void bch2_trans_unlock_long(struct btree_trans *trans) @@ -842,32 +880,28 @@ int __bch2_trans_mutex_lock(struct btree_trans *trans, /* Debug */ -#ifdef CONFIG_BCACHEFS_DEBUG - -void bch2_btree_path_verify_locks(struct btree_path *path) +void __bch2_btree_path_verify_locks(struct btree_trans *trans, struct btree_path *path) { - /* - * A path may be uptodate and yet have nothing locked if and only if - * there is no node at path->level, which generally means we were - * iterating over all nodes and got to the end of the btree - */ - BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && - btree_path_node(path, path->level) && - !path->nodes_locked); + if (!path->nodes_locked && btree_path_node(path, path->level)) { + /* + * A path may be uptodate and yet have nothing locked if and only if + * there is no node at path->level, which generally means we were + * iterating over all nodes and got to the end of the btree + */ + BUG_ON(path->uptodate == BTREE_ITER_UPTODATE); + BUG_ON(path->should_be_locked && trans->locked && !trans->restarted); + } if (!path->nodes_locked) return; for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) { int want = btree_lock_want(path, l); - int have = btree_node_locked_type(path, l); + int have = btree_node_locked_type_nowrite(path, l); BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED); - BUG_ON(is_btree_node(path, l) && - (want == BTREE_NODE_UNLOCKED || - have != BTREE_NODE_WRITE_LOCKED) && - want != have); + BUG_ON(is_btree_node(path, l) && want != have); BUG_ON(btree_node_locked(path, l) && path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock)); @@ -885,7 +919,7 @@ static bool bch2_trans_locked(struct btree_trans *trans) return false; } -void bch2_trans_verify_locks(struct btree_trans *trans) +void __bch2_trans_verify_locks(struct btree_trans *trans) { if (!trans->locked) { BUG_ON(bch2_trans_locked(trans)); @@ -896,7 +930,5 @@ void bch2_trans_verify_locks(struct btree_trans *trans) unsigned i; trans_for_each_path(trans, path, i) - bch2_btree_path_verify_locks(path); + __bch2_btree_path_verify_locks(trans, path); } - -#endif diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index b33ab7af8440..9adca77e2580 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -15,7 +15,6 @@ void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags, gfp_t gfp); -void bch2_trans_unlock_noassert(struct btree_trans *); void bch2_trans_unlock_write(struct btree_trans *); static inline bool is_btree_node(struct btree_path *path, unsigned l) @@ -44,6 +43,15 @@ static inline int btree_node_locked_type(struct btree_path *path, return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3); } +static inline int btree_node_locked_type_nowrite(struct btree_path *path, + unsigned level) +{ + int have = btree_node_locked_type(path, level); + return have == BTREE_NODE_WRITE_LOCKED + ? BTREE_NODE_INTENT_LOCKED + : have; +} + static inline bool btree_node_write_locked(struct btree_path *path, unsigned l) { return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED; @@ -152,7 +160,7 @@ static inline int btree_path_highest_level_locked(struct btree_path *path) static inline void __bch2_btree_path_unlock(struct btree_trans *trans, struct btree_path *path) { - btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_RELOCK); while (path->nodes_locked) btree_node_unlock(trans, path, btree_path_lowest_level_locked(path)); @@ -367,8 +375,8 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans, struct btree_path *path, unsigned level) { EBUG_ON(btree_node_locked(path, level) && - !btree_node_write_locked(path, level) && - btree_node_locked_type(path, level) != __btree_lock_want(path, level)); + btree_node_locked_type_nowrite(path, level) != + __btree_lock_want(path, level)); return likely(btree_node_locked(path, level)) || (!IS_ERR_OR_NULL(path->l[level].b) && @@ -377,31 +385,29 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans, /* upgrade */ -bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *, - struct btree_path *, unsigned, - struct get_locks_fail *); +bool __bch2_btree_path_upgrade_norestart(struct btree_trans *, struct btree_path *, unsigned); -bool __bch2_btree_path_upgrade(struct btree_trans *, - struct btree_path *, unsigned, - struct get_locks_fail *); +static inline bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans, + struct btree_path *path, + unsigned new_locks_want) +{ + return new_locks_want > path->locks_want + ? __bch2_btree_path_upgrade_norestart(trans, path, new_locks_want) + : true; +} + +int __bch2_btree_path_upgrade(struct btree_trans *, + struct btree_path *, unsigned); static inline int bch2_btree_path_upgrade(struct btree_trans *trans, struct btree_path *path, unsigned new_locks_want) { - struct get_locks_fail f = {}; - unsigned old_locks_want = path->locks_want; - new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH); - if (path->locks_want < new_locks_want - ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f) - : path->nodes_locked) - return 0; - - trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path, - old_locks_want, new_locks_want, &f); - return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); + return likely(path->locks_want >= new_locks_want && path->nodes_locked) + ? 0 + : __bch2_btree_path_upgrade(trans, path, new_locks_want); } /* misc: */ @@ -427,7 +433,7 @@ static inline void btree_path_set_level_up(struct btree_trans *trans, struct btree_path *path) { __btree_path_set_level_up(trans, path, path->level++); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); } /* debug */ @@ -439,12 +445,20 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *, int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *); -#ifdef CONFIG_BCACHEFS_DEBUG -void bch2_btree_path_verify_locks(struct btree_path *); -void bch2_trans_verify_locks(struct btree_trans *); -#else -static inline void bch2_btree_path_verify_locks(struct btree_path *path) {} -static inline void bch2_trans_verify_locks(struct btree_trans *trans) {} -#endif +void __bch2_btree_path_verify_locks(struct btree_trans *, struct btree_path *); +void __bch2_trans_verify_locks(struct btree_trans *); + +static inline void bch2_btree_path_verify_locks(struct btree_trans *trans, + struct btree_path *path) +{ + if (static_branch_unlikely(&bch2_debug_check_btree_locking)) + __bch2_btree_path_verify_locks(trans, path); +} + +static inline void bch2_trans_verify_locks(struct btree_trans *trans) +{ + if (static_branch_unlikely(&bch2_debug_check_btree_locking)) + __bch2_trans_verify_locks(trans); +} #endif /* _BCACHEFS_BTREE_LOCKING_H */ diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index 86acf037590c..5a97a6b8a757 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -271,7 +271,7 @@ static int read_btree_nodes_worker(void *p) err: bio_put(bio); free_page((unsigned long) buf); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan); closure_put(w->cl); kfree(w); return 0; @@ -285,13 +285,13 @@ static int read_btree_nodes(struct find_btree_nodes *f) closure_init_stack(&cl); - for_each_online_member(c, ca) { + for_each_online_member(c, ca, BCH_DEV_READ_REF_btree_node_scan) { if (!(ca->mi.data_allowed & BIT(BCH_DATA_btree))) continue; struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL); if (!w) { - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan); ret = -ENOMEM; goto err; } @@ -303,14 +303,14 @@ static int read_btree_nodes(struct find_btree_nodes *f) struct task_struct *t = kthread_create(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name); ret = PTR_ERR_OR_ZERO(t); if (ret) { - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan); kfree(w); bch_err_msg(c, ret, "starting kthread"); break; } closure_get(&cl); - percpu_ref_get(&ca->io_ref[READ]); + enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan); wake_up_process(t); } err: @@ -395,7 +395,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c) printbuf_reset(&buf); prt_printf(&buf, "%s: nodes found:\n", __func__); found_btree_nodes_to_text(&buf, c, f->nodes); - bch2_print_string_as_lines(KERN_INFO, buf.buf); + bch2_print_str(c, KERN_INFO, buf.buf); } sort_nonatomic(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL); @@ -424,7 +424,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c) printbuf_reset(&buf); prt_printf(&buf, "%s: nodes after merging replicas:\n", __func__); found_btree_nodes_to_text(&buf, c, f->nodes); - bch2_print_string_as_lines(KERN_INFO, buf.buf); + bch2_print_str(c, KERN_INFO, buf.buf); } swap(nodes_heap, f->nodes); @@ -470,7 +470,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c) printbuf_reset(&buf); prt_printf(&buf, "%s: nodes found after overwrites:\n", __func__); found_btree_nodes_to_text(&buf, c, f->nodes); - bch2_print_string_as_lines(KERN_INFO, buf.buf); + bch2_print_str(c, KERN_INFO, buf.buf); } else { bch_info(c, "btree node scan found %zu nodes after overwrites", f->nodes.nr); } @@ -541,7 +541,7 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree, struct find_btree_nodes *f = &c->found_btree_nodes; - int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes); + int ret = bch2_run_print_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes); if (ret) return ret; diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 7d7e52ddde02..1c03c965d836 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -11,6 +11,7 @@ #include "btree_write_buffer.h" #include "buckets.h" #include "disk_accounting.h" +#include "enumerated_ref.h" #include "errcode.h" #include "error.h" #include "journal.h" @@ -20,6 +21,7 @@ #include "snapshot.h" #include <linux/prefetch.h> +#include <linux/string_helpers.h> static const char * const trans_commit_flags_strs[] = { #define x(n, ...) #n, @@ -366,7 +368,8 @@ static noinline void journal_transaction_name(struct btree_trans *trans) struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry); - strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64)); + memcpy_and_pad(l->d, JSET_ENTRY_LOG_U64s * sizeof(u64), + trans->fn, strlen(trans->fn), 0); } static inline int btree_key_can_insert(struct btree_trans *trans, @@ -644,10 +647,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && !(flags & BCH_TRANS_COMMIT_no_journal_res)) { - if (bch2_journal_seq_verify) + if (static_branch_unlikely(&bch2_journal_seq_verify)) trans_for_each_update(trans, i) i->k->k.bversion.lo = trans->journal_res.seq; - else if (bch2_inject_invalid_keys) + else if (static_branch_unlikely(&bch2_inject_invalid_keys)) trans_for_each_update(trans, i) i->k->k.bversion = MAX_VERSION; } @@ -660,18 +663,17 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, h = h->next; } - struct jset_entry *entry = trans->journal_entries; + struct bkey_i *accounting; percpu_down_read(&c->mark_lock); - for (entry = trans->journal_entries; - entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); - entry = vstruct_next(entry)) - if (entry->type == BCH_JSET_ENTRY_write_buffer_keys && - entry->start->k.type == KEY_TYPE_accounting) { - ret = bch2_accounting_trans_commit_hook(trans, bkey_i_to_accounting(entry->start), flags); - if (ret) - goto revert_fs_usage; - } + for (accounting = btree_trans_subbuf_base(trans, &trans->accounting); + accounting != btree_trans_subbuf_top(trans, &trans->accounting); + accounting = bkey_next(accounting)) { + ret = bch2_accounting_trans_commit_hook(trans, + bkey_i_to_accounting(accounting), flags); + if (ret) + goto revert_fs_usage; + } percpu_up_read(&c->mark_lock); /* XXX: we only want to run this if deltas are nonzero */ @@ -695,8 +697,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, if (!(flags & BCH_TRANS_COMMIT_no_journal_res)) validate_context.flags = BCH_VALIDATE_write|BCH_VALIDATE_commit; - for (struct jset_entry *i = trans->journal_entries; - i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); + for (struct jset_entry *i = btree_trans_journal_entries_start(trans); + i != btree_trans_journal_entries_top(trans); i = vstruct_next(i)) { ret = bch2_journal_entry_validate(c, NULL, i, bcachefs_metadata_version_current, @@ -751,11 +753,18 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, } memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res), - trans->journal_entries, - trans->journal_entries_u64s); + btree_trans_journal_entries_start(trans), + trans->journal_entries.u64s); + + trans->journal_res.offset += trans->journal_entries.u64s; + trans->journal_res.u64s -= trans->journal_entries.u64s; - trans->journal_res.offset += trans->journal_entries_u64s; - trans->journal_res.u64s -= trans->journal_entries_u64s; + memcpy_u64s_small(bch2_journal_add_entry(j, &trans->journal_res, + BCH_JSET_ENTRY_write_buffer_keys, + BTREE_ID_accounting, 0, + trans->accounting.u64s)->_data, + btree_trans_subbuf_base(trans, &trans->accounting), + trans->accounting.u64s); if (trans->journal_seq) *trans->journal_seq = trans->journal_res.seq; @@ -777,13 +786,10 @@ fatal_err: bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret)); percpu_down_read(&c->mark_lock); revert_fs_usage: - for (struct jset_entry *entry2 = trans->journal_entries; - entry2 != entry; - entry2 = vstruct_next(entry2)) - if (entry2->type == BCH_JSET_ENTRY_write_buffer_keys && - entry2->start->k.type == KEY_TYPE_accounting) - bch2_accounting_trans_commit_revert(trans, - bkey_i_to_accounting(entry2->start), flags); + for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting); + i != accounting; + i = bkey_next(i)) + bch2_accounting_trans_commit_revert(trans, bkey_i_to_accounting(i), flags); percpu_up_read(&c->mark_lock); return ret; } @@ -958,8 +964,8 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans) return ret; } - for (struct jset_entry *i = trans->journal_entries; - i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); + for (struct jset_entry *i = btree_trans_journal_entries_start(trans); + i != btree_trans_journal_entries_top(trans); i = vstruct_next(i)) if (i->type == BCH_JSET_ENTRY_btree_keys || i->type == BCH_JSET_ENTRY_write_buffer_keys) { @@ -968,6 +974,14 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans) return ret; } + for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting); + i != btree_trans_subbuf_top(trans, &trans->accounting); + i = bkey_next(i)) { + int ret = bch2_journal_key_insert(c, BTREE_ID_accounting, 0, i); + if (ret) + return ret; + } + return 0; } @@ -984,7 +998,8 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) goto out_reset; if (!trans->nr_updates && - !trans->journal_entries_u64s) + !trans->journal_entries.u64s && + !trans->accounting.u64s) goto out_reset; ret = bch2_trans_commit_run_triggers(trans); @@ -992,7 +1007,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) goto out_reset; if (!(flags & BCH_TRANS_COMMIT_no_check_rw) && - unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) { + unlikely(!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_trans))) { if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) ret = do_bch2_trans_commit_to_journal_replay(trans); else @@ -1002,7 +1017,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags)); - trans->journal_u64s = trans->journal_entries_u64s; + trans->journal_u64s = trans->journal_entries.u64s + jset_u64s(trans->accounting.u64s); trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names); if (trans->journal_transaction_names) trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s); @@ -1058,7 +1073,7 @@ retry: trace_and_count(c, transaction_commit, trans, _RET_IP_); out: if (likely(!(flags & BCH_TRANS_COMMIT_no_check_rw))) - bch2_write_ref_put(c, BCH_WRITE_REF_trans); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_trans); out_reset: if (!ret) bch2_trans_downgrade(trans); diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 023c472dc9ee..9d641bf9d2a2 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -139,6 +139,7 @@ struct btree { }; #define BCH_BTREE_CACHE_NOT_FREED_REASONS() \ + x(cache_reserve) \ x(lock_intent) \ x(lock_write) \ x(dirty) \ @@ -257,9 +258,6 @@ struct btree_node_iter { * * BTREE_TRIGGER_insert - @new is entering the btree * BTREE_TRIGGER_overwrite - @old is leaving the btree - * - * BTREE_TRIGGER_bucket_invalidate - signal from bucket invalidate path to alloc - * trigger */ #define BTREE_TRIGGER_FLAGS() \ x(norun) \ @@ -269,8 +267,7 @@ struct btree_node_iter { x(gc) \ x(insert) \ x(overwrite) \ - x(is_root) \ - x(bucket_invalidate) + x(is_root) enum { #define x(n) BTREE_ITER_FLAG_BIT_##n, @@ -477,6 +474,18 @@ struct btree_trans_paths { struct btree_path paths[]; }; +struct trans_kmalloc_trace { + unsigned long ip; + size_t bytes; +}; +typedef DARRAY(struct trans_kmalloc_trace) darray_trans_kmalloc_trace; + +struct btree_trans_subbuf { + u16 base; + u16 u64s; + u16 size;; +}; + struct btree_trans { struct bch_fs *c; @@ -488,6 +497,9 @@ struct btree_trans { void *mem; unsigned mem_top; unsigned mem_bytes; +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + darray_trans_kmalloc_trace trans_kmalloc_trace; +#endif btree_path_idx_t nr_sorted; btree_path_idx_t nr_paths; @@ -528,9 +540,8 @@ struct btree_trans { int srcu_idx; /* update path: */ - u16 journal_entries_u64s; - u16 journal_entries_size; - struct jset_entry *journal_entries; + struct btree_trans_subbuf journal_entries; + struct btree_trans_subbuf accounting; struct btree_trans_commit_hook *hooks; struct journal_entry_pin *journal_pin; @@ -647,13 +658,13 @@ static inline struct bset_tree *bset_tree_last(struct btree *b) static inline void * __btree_node_offset_to_ptr(const struct btree *b, u16 offset) { - return (void *) ((u64 *) b->data + 1 + offset); + return (void *) ((u64 *) b->data + offset); } static inline u16 __btree_node_ptr_to_offset(const struct btree *b, const void *p) { - u16 ret = (u64 *) p - 1 - (u64 *) b->data; + u16 ret = (u64 *) p - (u64 *) b->data; EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p); return ret; diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c index 1e6b7836cc01..5dac09c98026 100644 --- a/fs/bcachefs/btree_update.c +++ b/fs/bcachefs/btree_update.c @@ -14,6 +14,8 @@ #include "snapshot.h" #include "trace.h" +#include <linux/string_helpers.h> + static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l, const struct btree_insert_entry *r) { @@ -509,8 +511,9 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans, return 0; } -int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_i *k, enum btree_iter_update_trigger_flags flags) +int __must_check bch2_trans_update_ip(struct btree_trans *trans, struct btree_iter *iter, + struct bkey_i *k, enum btree_iter_update_trigger_flags flags, + unsigned long ip) { kmsan_check_memory(k, bkey_bytes(&k->k)); @@ -546,7 +549,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter path_idx = iter->key_cache_path; } - return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_); + return bch2_trans_update_by_path(trans, path_idx, k, flags, ip); } int bch2_btree_insert_clone_trans(struct btree_trans *trans, @@ -562,30 +565,29 @@ int bch2_btree_insert_clone_trans(struct btree_trans *trans, return bch2_btree_insert_trans(trans, btree, n, 0); } -struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s) +void *__bch2_trans_subbuf_alloc(struct btree_trans *trans, + struct btree_trans_subbuf *buf, + unsigned u64s) { - unsigned new_top = trans->journal_entries_u64s + u64s; - unsigned old_size = trans->journal_entries_size; - - if (new_top > trans->journal_entries_size) { - trans->journal_entries_size = roundup_pow_of_two(new_top); + unsigned new_top = buf->u64s + u64s; + unsigned old_size = buf->size; - btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size; - } + if (new_top > buf->size) + buf->size = roundup_pow_of_two(new_top); - struct jset_entry *n = - bch2_trans_kmalloc_nomemzero(trans, - trans->journal_entries_size * sizeof(u64)); + void *n = bch2_trans_kmalloc_nomemzero(trans, buf->size * sizeof(u64)); if (IS_ERR(n)) - return ERR_CAST(n); + return n; - if (trans->journal_entries) - memcpy(n, trans->journal_entries, old_size * sizeof(u64)); - trans->journal_entries = n; + if (buf->u64s) + memcpy(n, + btree_trans_subbuf_base(trans, buf), + old_size * sizeof(u64)); + buf->base = (u64 *) n - (u64 *) trans->mem; - struct jset_entry *e = btree_trans_journal_entries_top(trans); - trans->journal_entries_u64s = new_top; - return e; + void *p = btree_trans_subbuf_top(trans, buf); + buf->u64s = new_top; + return p; } int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter, @@ -826,26 +828,35 @@ int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree, return bch2_trans_update_buffered(trans, btree, &k); } -int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf) +static int __bch2_trans_log_str(struct btree_trans *trans, const char *str, unsigned len) { - unsigned u64s = DIV_ROUND_UP(buf->pos, sizeof(u64)); - prt_chars(buf, '\0', u64s * sizeof(u64) - buf->pos); - - int ret = buf->allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0; - if (ret) - return ret; + unsigned u64s = DIV_ROUND_UP(len, sizeof(u64)); struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s)); - ret = PTR_ERR_OR_ZERO(e); + int ret = PTR_ERR_OR_ZERO(e); if (ret) return ret; struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry); journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s); - memcpy(l->d, buf->buf, buf->pos); + memcpy_and_pad(l->d, u64s * sizeof(u64), str, len, 0); return 0; } +int bch2_trans_log_str(struct btree_trans *trans, const char *str) +{ + return __bch2_trans_log_str(trans, str, strlen(str)); +} + +int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf) +{ + int ret = buf->allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0; + if (ret) + return ret; + + return __bch2_trans_log_str(trans, buf->buf, buf->pos); +} + int bch2_trans_log_bkey(struct btree_trans *trans, enum btree_id btree, unsigned level, struct bkey_i *k) { @@ -868,7 +879,6 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt, prt_vprintf(&buf, fmt, args); unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64)); - prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos); int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0; if (ret) @@ -881,7 +891,7 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt, struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries); journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s); - memcpy(l->d, buf.buf, buf.pos); + memcpy_and_pad(l->d, u64s * sizeof(u64), buf.buf, buf.pos, 0); c->journal.early_journal_entries.nr += jset_u64s(u64s); } else { ret = bch2_trans_commit_do(c, NULL, NULL, commit_flags, diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 568e56c91190..f907eaa8b185 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -102,26 +102,60 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter * int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *, enum btree_id, struct bpos); -int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *, - struct bkey_i *, enum btree_iter_update_trigger_flags); +int __must_check bch2_trans_update_ip(struct btree_trans *, struct btree_iter *, + struct bkey_i *, enum btree_iter_update_trigger_flags, + unsigned long); -struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *, unsigned); +static inline int __must_check +bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter, + struct bkey_i *k, enum btree_iter_update_trigger_flags flags) +{ + return bch2_trans_update_ip(trans, iter, k, flags, _THIS_IP_); +} + +static inline void *btree_trans_subbuf_base(struct btree_trans *trans, + struct btree_trans_subbuf *buf) +{ + return (u64 *) trans->mem + buf->base; +} + +static inline void *btree_trans_subbuf_top(struct btree_trans *trans, + struct btree_trans_subbuf *buf) +{ + return (u64 *) trans->mem + buf->base + buf->u64s; +} + +void *__bch2_trans_subbuf_alloc(struct btree_trans *, + struct btree_trans_subbuf *, + unsigned); + +static inline void * +bch2_trans_subbuf_alloc(struct btree_trans *trans, + struct btree_trans_subbuf *buf, + unsigned u64s) +{ + if (buf->u64s + u64s > buf->size) + return __bch2_trans_subbuf_alloc(trans, buf, u64s); + + void *p = btree_trans_subbuf_top(trans, buf); + buf->u64s += u64s; + return p; +} + +static inline struct jset_entry *btree_trans_journal_entries_start(struct btree_trans *trans) +{ + return btree_trans_subbuf_base(trans, &trans->journal_entries); +} static inline struct jset_entry *btree_trans_journal_entries_top(struct btree_trans *trans) { - return (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); + return btree_trans_subbuf_top(trans, &trans->journal_entries); } static inline struct jset_entry * bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s) { - if (!trans->journal_entries || - trans->journal_entries_u64s + u64s > trans->journal_entries_size) - return __bch2_trans_jset_entry_alloc(trans, u64s); - - struct jset_entry *e = btree_trans_journal_entries_top(trans); - trans->journal_entries_u64s += u64s; - return e; + return bch2_trans_subbuf_alloc(trans, &trans->journal_entries, u64s); } int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *); @@ -135,6 +169,8 @@ static inline int __must_check bch2_trans_update_buffered(struct btree_trans *tr { kmsan_check_memory(k, bkey_bytes(&k->k)); + EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX); + if (unlikely(!btree_type_uses_write_buffer(btree))) { int ret = bch2_btree_write_buffer_insert_err(trans, btree, k); dump_stack(); @@ -169,6 +205,7 @@ void bch2_trans_commit_hook(struct btree_trans *, struct btree_trans_commit_hook *); int __bch2_trans_commit(struct btree_trans *, unsigned); +int bch2_trans_log_str(struct btree_trans *, const char *); int bch2_trans_log_msg(struct btree_trans *, struct printbuf *); int bch2_trans_log_bkey(struct btree_trans *, enum btree_id, unsigned, struct bkey_i *); @@ -217,12 +254,15 @@ static inline void bch2_trans_reset_updates(struct btree_trans *trans) bch2_path_put(trans, i->path, true); trans->nr_updates = 0; - trans->journal_entries_u64s = 0; + trans->journal_entries.u64s = 0; + trans->journal_entries.size = 0; + trans->accounting.u64s = 0; + trans->accounting.size = 0; trans->hooks = NULL; trans->extra_disk_res = 0; } -static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k, +static __always_inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k, unsigned type, unsigned min_bytes) { unsigned bytes = max_t(unsigned, min_bytes, bkey_bytes(k.k)); @@ -245,7 +285,7 @@ static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *t return mut; } -static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k) +static __always_inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k) { return __bch2_bkey_make_mut_noupdate(trans, k, 0, 0); } diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 00307356d7c8..74e65714fecd 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -14,6 +14,7 @@ #include "btree_locking.h" #include "buckets.h" #include "clock.h" +#include "enumerated_ref.h" #include "error.h" #include "extents.h" #include "io_write.h" @@ -284,6 +285,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, struct disk_reservation *res, struct closure *cl, bool interior_node, + unsigned target, unsigned flags) { struct bch_fs *c = trans->c; @@ -317,6 +319,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, mutex_unlock(&c->btree_reserve_cache_lock); retry: ret = bch2_alloc_sectors_start_trans(trans, + target ?: c->opts.metadata_target ?: c->opts.foreground_target, 0, @@ -325,7 +328,9 @@ retry: res->nr_replicas, min(res->nr_replicas, c->opts.metadata_replicas_required), - watermark, 0, cl, &wp); + watermark, + target ? BCH_WRITE_only_specified_devs : 0, + cl, &wp); if (unlikely(ret)) goto err; @@ -505,6 +510,7 @@ static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans * static int bch2_btree_reserve_get(struct btree_trans *trans, struct btree_update *as, unsigned nr_nodes[2], + unsigned target, unsigned flags, struct closure *cl) { @@ -527,7 +533,7 @@ static int bch2_btree_reserve_get(struct btree_trans *trans, while (p->nr < nr_nodes[interior]) { b = __bch2_btree_node_alloc(trans, &as->disk_res, cl, - interior, flags); + interior, target, flags); if (IS_ERR(b)) { ret = PTR_ERR(b); goto err; @@ -1116,7 +1122,8 @@ static void bch2_btree_update_done(struct btree_update *as, struct btree_trans * static struct btree_update * bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, - unsigned level_start, bool split, unsigned flags) + unsigned level_start, bool split, + unsigned target, unsigned flags) { struct bch_fs *c = trans->c; struct btree_update *as; @@ -1226,7 +1233,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, if (ret) goto err; - ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL); + ret = bch2_btree_reserve_get(trans, as, nr_nodes, target, flags, NULL); if (bch2_err_matches(ret, ENOSPC) || bch2_err_matches(ret, ENOMEM)) { struct closure cl; @@ -1245,7 +1252,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, closure_init_stack(&cl); do { - ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl); + ret = bch2_btree_reserve_get(trans, as, nr_nodes, target, flags, &cl); bch2_trans_unlock(trans); bch2_wait_on_allocator(c, &cl); @@ -1806,10 +1813,10 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t __func__, b->c.level); bch2_btree_update_to_text(&buf, as); bch2_btree_path_to_text(&buf, trans, path_idx); + bch2_fs_emergency_read_only2(c, &buf); - bch2_print_string_as_lines(KERN_ERR, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); - bch2_fs_emergency_read_only(c); return -EIO; } @@ -1878,7 +1885,7 @@ int bch2_btree_split_leaf(struct btree_trans *trans, as = bch2_btree_update_start(trans, trans->paths + path, trans->paths[path].level, - true, flags); + true, 0, flags); if (IS_ERR(as)) return PTR_ERR(as); @@ -1948,7 +1955,8 @@ int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path, return bch2_btree_split_leaf(trans, path, flags); struct btree_update *as = - bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, flags); + bch2_btree_update_start(trans, trans->paths + path, b->c.level, + true, 0, flags); if (IS_ERR(as)) return PTR_ERR(as); @@ -2077,7 +2085,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, parent = btree_node_parent(trans->paths + path, b); as = bch2_btree_update_start(trans, trans->paths + path, level, false, - BCH_TRANS_COMMIT_no_enospc|flags); + 0, BCH_TRANS_COMMIT_no_enospc|flags); ret = PTR_ERR_OR_ZERO(as); if (ret) goto err; @@ -2184,6 +2192,7 @@ err: int bch2_btree_node_rewrite(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, + unsigned target, unsigned flags) { struct bch_fs *c = trans->c; @@ -2196,7 +2205,8 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, struct btree_path *path = btree_iter_path(trans, iter); parent = btree_node_parent(path, b); - as = bch2_btree_update_start(trans, path, b->c.level, false, flags); + as = bch2_btree_update_start(trans, path, b->c.level, + false, target, flags); ret = PTR_ERR_OR_ZERO(as); if (ret) goto out; @@ -2261,7 +2271,7 @@ static int bch2_btree_node_rewrite_key(struct btree_trans *trans, bool found = b && btree_ptr_hash_val(&b->key) == btree_ptr_hash_val(k); ret = found - ? bch2_btree_node_rewrite(trans, &iter, b, flags) + ? bch2_btree_node_rewrite(trans, &iter, b, 0, flags) : -ENOENT; out: bch2_trans_iter_exit(trans, &iter); @@ -2270,7 +2280,9 @@ out: int bch2_btree_node_rewrite_pos(struct btree_trans *trans, enum btree_id btree, unsigned level, - struct bpos pos, unsigned flags) + struct bpos pos, + unsigned target, + unsigned flags) { BUG_ON(!level); @@ -2282,7 +2294,7 @@ int bch2_btree_node_rewrite_pos(struct btree_trans *trans, if (ret) goto err; - ret = bch2_btree_node_rewrite(trans, &iter, b, flags); + ret = bch2_btree_node_rewrite(trans, &iter, b, target, flags); err: bch2_trans_iter_exit(trans, &iter); return ret; @@ -2296,7 +2308,7 @@ int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *trans, if (ret) return ret == -BCH_ERR_btree_node_dying ? 0 : ret; - ret = bch2_btree_node_rewrite(trans, &iter, b, flags); + ret = bch2_btree_node_rewrite(trans, &iter, b, 0, flags); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -2330,7 +2342,7 @@ static void async_btree_node_rewrite_work(struct work_struct *work) closure_wake_up(&c->btree_node_rewrites_wait); bch2_bkey_buf_exit(&a->key, c); - bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_node_rewrite); kfree(a); } @@ -2351,8 +2363,8 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) bool now = false, pending = false; spin_lock(&c->btree_node_rewrites_lock); - if (c->curr_recovery_pass > BCH_RECOVERY_PASS_journal_replay && - bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { + if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_journal_replay) && + enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_node_rewrite)) { list_add(&a->list, &c->btree_node_rewrites); now = true; } else if (!test_bit(BCH_FS_may_go_rw, &c->flags)) { @@ -2391,7 +2403,7 @@ void bch2_do_pending_node_rewrites(struct bch_fs *c) if (!a) break; - bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + enumerated_ref_get(&c->writes, BCH_WRITE_REF_node_rewrite); queue_work(c->btree_node_rewrite_worker, &a->work); } } diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index be71cd73b864..7fe793788a79 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -144,7 +144,7 @@ static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, EBUG_ON(!btree_node_locked(path, level)); - if (bch2_btree_node_merging_disabled) + if (static_branch_unlikely(&bch2_btree_node_merging_disabled)) return 0; b = path->l[level].b; @@ -168,10 +168,10 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, } int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, - struct btree *, unsigned); + struct btree *, unsigned, unsigned); int bch2_btree_node_rewrite_pos(struct btree_trans *, enum btree_id, unsigned, - struct bpos, unsigned); + struct bpos, unsigned, unsigned); int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *, struct btree *, unsigned); diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 0941fb2c026d..efb0c64d0aac 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -7,6 +7,7 @@ #include "btree_update_interior.h" #include "btree_write_buffer.h" #include "disk_accounting.h" +#include "enumerated_ref.h" #include "error.h" #include "extents.h" #include "journal.h" @@ -181,6 +182,8 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite return wb_flush_one_slowpath(trans, iter, wb); } + EBUG_ON(!bpos_eq(wb->k.k.p, path->pos)); + bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq); (*fast)++; return 0; @@ -629,11 +632,11 @@ int bch2_btree_write_buffer_tryflush(struct btree_trans *trans) { struct bch_fs *c = trans->c; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer)) return -BCH_ERR_erofs_no_writes; int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans); - bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer); return ret; } @@ -692,7 +695,7 @@ static void bch2_btree_write_buffer_flush_work(struct work_struct *work) } while (!ret && bch2_btree_write_buffer_should_flush(c)); mutex_unlock(&wb->flushing.lock); - bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer); } static void wb_accounting_sort(struct btree_write_buffer *wb) @@ -821,9 +824,9 @@ int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_ bch2_journal_pin_drop(&c->journal, &dst->wb->pin); if (bch2_btree_write_buffer_should_flush(c) && - __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) && + __enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer) && !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work)) - bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer); if (dst->wb == &wb->flushing) mutex_unlock(&wb->flushing.lock); @@ -866,13 +869,18 @@ void bch2_fs_btree_write_buffer_exit(struct bch_fs *c) darray_exit(&wb->inc.keys); } -int bch2_fs_btree_write_buffer_init(struct bch_fs *c) +void bch2_fs_btree_write_buffer_init_early(struct bch_fs *c) { struct btree_write_buffer *wb = &c->btree_write_buffer; mutex_init(&wb->inc.lock); mutex_init(&wb->flushing.lock); INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work); +} + +int bch2_fs_btree_write_buffer_init(struct bch_fs *c) +{ + struct btree_write_buffer *wb = &c->btree_write_buffer; /* Will be resized by journal as needed: */ unsigned initial_size = 1 << 16; diff --git a/fs/bcachefs/btree_write_buffer.h b/fs/bcachefs/btree_write_buffer.h index d535cea28bde..05f56fd1eed0 100644 --- a/fs/bcachefs/btree_write_buffer.h +++ b/fs/bcachefs/btree_write_buffer.h @@ -101,6 +101,7 @@ int bch2_journal_keys_to_write_buffer_end(struct bch_fs *, struct journal_keys_t int bch2_btree_write_buffer_resize(struct bch_fs *, size_t); void bch2_fs_btree_write_buffer_exit(struct bch_fs *); +void bch2_fs_btree_write_buffer_init_early(struct bch_fs *); int bch2_fs_btree_write_buffer_init(struct bch_fs *); #endif /* _BCACHEFS_BTREE_WRITE_BUFFER_H */ diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 31fbc2716d8b..09eb5a543ae4 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -156,10 +156,14 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, g->gen_valid = true; g->gen = p.ptr.gen; } else { + /* this pointer will be dropped */ *do_update = true; + goto out; } } + /* g->gen_valid == true */ + if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, trans, ptr_gen_newer_than_bucket_gen, "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" @@ -172,15 +176,13 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, if (!p.ptr.cached && (g->data_type != BCH_DATA_btree || data_type == BCH_DATA_btree)) { - g->gen_valid = true; - g->gen = p.ptr.gen; - g->data_type = 0; + g->data_type = data_type; g->stripe_sectors = 0; g->dirty_sectors = 0; g->cached_sectors = 0; - } else { - *do_update = true; } + + *do_update = true; } if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, @@ -217,9 +219,8 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, bch2_data_type_str(data_type), (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - if (data_type == BCH_DATA_btree) { - g->gen_valid = true; - g->gen = p.ptr.gen; + if (!p.ptr.cached && + data_type == BCH_DATA_btree) { g->data_type = data_type; g->stripe_sectors = 0; g->dirty_sectors = 0; @@ -392,29 +393,24 @@ static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf struct bkey_s_c k, bool insert, enum bch_sb_error_id id) { struct bch_fs *c = trans->c; - bool repeat = false, print = true, suppress = false; prt_printf(buf, "\nwhile marking "); bch2_bkey_val_to_text(buf, c, k); prt_newline(buf); - __bch2_count_fsck_err(c, id, buf->buf, &repeat, &print, &suppress); + bool print = __bch2_count_fsck_err(c, id, buf); - int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + int ret = bch2_run_explicit_recovery_pass(c, buf, + BCH_RECOVERY_PASS_check_allocations, 0); if (insert) { - print = true; - suppress = false; - bch2_trans_updates_to_text(buf, trans); __bch2_inconsistent_error(c, buf); ret = -BCH_ERR_bucket_ref_update; } - if (suppress) - prt_printf(buf, "Ratelimiting new instances of previous error\n"); - if (print) - bch2_print_string_as_lines(KERN_ERR, buf->buf); + if (print || insert) + bch2_print_str(c, KERN_ERR, buf->buf); return ret; } @@ -711,7 +707,7 @@ err: (u64) p.ec.idx); bch2_bkey_val_to_text(&buf, c, k); __bch2_inconsistent_error(c, &buf); - bch2_print_string_as_lines(KERN_ERR, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); return -BCH_ERR_trigger_stripe_pointer; } @@ -966,14 +962,23 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, return PTR_ERR(a); if (a->v.data_type && type && a->v.data_type != type) { - bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); - log_fsck_err(trans, bucket_metadata_type_mismatch, - "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" - "while marking %s", - iter.pos.inode, iter.pos.offset, a->v.gen, - bch2_data_type_str(a->v.data_type), - bch2_data_type_str(type), - bch2_data_type_str(type)); + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + prt_printf(&buf, "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" + "while marking %s\n", + iter.pos.inode, iter.pos.offset, a->v.gen, + bch2_data_type_str(a->v.data_type), + bch2_data_type_str(type), + bch2_data_type_str(type)); + + bool print = bch2_count_fsck_err(c, bucket_metadata_type_mismatch, &buf); + + bch2_run_explicit_recovery_pass(c, &buf, + BCH_RECOVERY_PASS_check_allocations, 0); + + if (print) + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); ret = -BCH_ERR_metadata_bucket_inconsistency; goto err; } @@ -985,7 +990,6 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, &a->k_i, 0); } err: -fsck_err: bch2_trans_iter_exit(trans, &iter); return ret; } @@ -1143,10 +1147,10 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c, enum btree_iter_update_trigger_flags flags) { - for_each_online_member(c, ca) { + for_each_online_member(c, ca, BCH_DEV_READ_REF_trans_mark_dev_sbs) { int ret = bch2_trans_mark_dev_sb(c, ca, flags); if (ret) { - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_trans_mark_dev_sbs); return ret; } } @@ -1321,6 +1325,11 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) sizeof(bucket_gens->b[0]) * copy); } + ret = bch2_bucket_bitmap_resize(&ca->bucket_backpointer_mismatch, + ca->mi.nbuckets, nbuckets) ?: + bch2_bucket_bitmap_resize(&ca->bucket_backpointer_empty, + ca->mi.nbuckets, nbuckets); + rcu_assign_pointer(ca->bucket_gens, bucket_gens); bucket_gens = old_bucket_gens; diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c index 5891b3a1e61c..4066946b26bc 100644 --- a/fs/bcachefs/chardev.c +++ b/fs/bcachefs/chardev.c @@ -613,11 +613,13 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c, if (!dev) return -EINVAL; - for_each_online_member(c, ca) + rcu_read_lock(); + for_each_online_member_rcu(c, ca) if (ca->dev == dev) { - percpu_ref_put(&ca->io_ref[READ]); + rcu_read_unlock(); return ca->dev_idx; } + rcu_read_unlock(); return -BCH_ERR_ENOENT_dev_idx_not_found; } diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c index d0a34a097b80..d3e2e4f776c6 100644 --- a/fs/bcachefs/checksum.c +++ b/fs/bcachefs/checksum.c @@ -91,7 +91,7 @@ static void bch2_checksum_update(struct bch2_checksum_state *state, const void * } } -static void bch2_chacha20_init(u32 state[CHACHA_STATE_WORDS], +static void bch2_chacha20_init(struct chacha_state *state, const struct bch_key *key, struct nonce nonce) { u32 key_words[CHACHA_KEY_SIZE / sizeof(u32)]; @@ -106,14 +106,14 @@ static void bch2_chacha20_init(u32 state[CHACHA_STATE_WORDS], memzero_explicit(key_words, sizeof(key_words)); } -static void bch2_chacha20(const struct bch_key *key, struct nonce nonce, - void *data, size_t len) +void bch2_chacha20(const struct bch_key *key, struct nonce nonce, + void *data, size_t len) { - u32 state[CHACHA_STATE_WORDS]; + struct chacha_state state; - bch2_chacha20_init(state, key, nonce); - chacha20_crypt(state, data, data, len); - memzero_explicit(state, sizeof(state)); + bch2_chacha20_init(&state, key, nonce); + chacha20_crypt(&state, data, data, len); + chacha_zeroize_state(&state); } static void bch2_poly1305_init(struct poly1305_desc_ctx *desc, @@ -257,14 +257,14 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type, { struct bio_vec bv; struct bvec_iter iter; - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; int ret = 0; if (bch2_fs_inconsistent_on(!c->chacha20_key_set, c, "attempting to encrypt without encryption key")) return -BCH_ERR_no_encryption_key; - bch2_chacha20_init(chacha_state, &c->chacha20_key, nonce); + bch2_chacha20_init(&chacha_state, &c->chacha20_key, nonce); bio_for_each_segment(bv, bio, iter) { void *p; @@ -280,10 +280,10 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type, } p = bvec_kmap_local(&bv); - chacha20_crypt(chacha_state, p, p, bv.bv_len); + chacha20_crypt(&chacha_state, p, p, bv.bv_len); kunmap_local(p); } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); return ret; } diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h index 1310782d3ae9..7bd9cf6104ca 100644 --- a/fs/bcachefs/checksum.h +++ b/fs/bcachefs/checksum.h @@ -69,6 +69,8 @@ static inline void bch2_csum_err_msg(struct printbuf *out, bch2_csum_to_text(out, type, expected); } +void bch2_chacha20(const struct bch_key *, struct nonce, void *, size_t); + int bch2_request_key(struct bch_sb *, struct bch_key *); #ifndef __KERNEL__ int bch2_revoke_key(struct bch_sb *); diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c index 28ed32449913..1bca61d17092 100644 --- a/fs/bcachefs/compress.c +++ b/fs/bcachefs/compress.c @@ -714,7 +714,7 @@ int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res, ret = match_string(bch2_compression_opts, -1, type_str); if (ret < 0 && err) - prt_str(err, "invalid compression type"); + prt_printf(err, "invalid compression type\n"); if (ret < 0) goto err; @@ -729,7 +729,7 @@ int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res, if (!ret && level > 15) ret = -EINVAL; if (ret < 0 && err) - prt_str(err, "invalid compression level"); + prt_printf(err, "invalid compression level\n"); if (ret < 0) goto err; diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h index c6151495985f..50ec3decfe8c 100644 --- a/fs/bcachefs/darray.h +++ b/fs/bcachefs/darray.h @@ -20,7 +20,18 @@ struct { \ #define DARRAY(_type) DARRAY_PREALLOCATED(_type, 0) typedef DARRAY(char) darray_char; -typedef DARRAY(char *) darray_str; +typedef DARRAY(char *) darray_str; +typedef DARRAY(const char *) darray_const_str; + +typedef DARRAY(u8) darray_u8; +typedef DARRAY(u16) darray_u16; +typedef DARRAY(u32) darray_u32; +typedef DARRAY(u64) darray_u64; + +typedef DARRAY(s8) darray_s8; +typedef DARRAY(s16) darray_s16; +typedef DARRAY(s32) darray_s32; +typedef DARRAY(s64) darray_s64; int __bch2_darray_resize_noprof(darray_char *, size_t, size_t, gfp_t); diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index b211c97238ab..c34e5b88ba9d 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -100,9 +100,10 @@ static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struc return true; } -static noinline void trace_io_move_finish2(struct data_update *u, - struct bkey_i *new, - struct bkey_i *insert) +noinline_for_stack +static void trace_io_move_finish2(struct data_update *u, + struct bkey_i *new, + struct bkey_i *insert) { struct bch_fs *c = u->op.c; struct printbuf buf = PRINTBUF; @@ -124,6 +125,7 @@ static noinline void trace_io_move_finish2(struct data_update *u, printbuf_exit(&buf); } +noinline_for_stack static void trace_io_move_fail2(struct data_update *m, struct bkey_s_c new, struct bkey_s_c wrote, @@ -179,24 +181,84 @@ static void trace_io_move_fail2(struct data_update *m, printbuf_exit(&buf); } +noinline_for_stack +static void trace_data_update2(struct data_update *m, + struct bkey_s_c old, struct bkey_s_c k, + struct bkey_i *insert) +{ + struct bch_fs *c = m->op.c; + struct printbuf buf = PRINTBUF; + + prt_str(&buf, "\nold: "); + bch2_bkey_val_to_text(&buf, c, old); + prt_str(&buf, "\nk: "); + bch2_bkey_val_to_text(&buf, c, k); + prt_str(&buf, "\nnew: "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); + + trace_data_update(c, buf.buf); + printbuf_exit(&buf); +} + +noinline_for_stack +static void trace_io_move_created_rebalance2(struct data_update *m, + struct bkey_s_c old, struct bkey_s_c k, + struct bkey_i *insert) +{ + struct bch_fs *c = m->op.c; + struct printbuf buf = PRINTBUF; + + bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts); + + prt_str(&buf, "\nold: "); + bch2_bkey_val_to_text(&buf, c, old); + prt_str(&buf, "\nk: "); + bch2_bkey_val_to_text(&buf, c, k); + prt_str(&buf, "\nnew: "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); + + trace_io_move_created_rebalance(c, buf.buf); + printbuf_exit(&buf); + + this_cpu_inc(c->counters[BCH_COUNTER_io_move_created_rebalance]); +} + +noinline_for_stack +static int data_update_invalid_bkey(struct data_update *m, + struct bkey_s_c old, struct bkey_s_c k, + struct bkey_i *insert) +{ + struct bch_fs *c = m->op.c; + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + prt_str(&buf, "about to insert invalid key in data update path"); + prt_printf(&buf, "\nop.nonce: %u", m->op.nonce); + prt_str(&buf, "\nold: "); + bch2_bkey_val_to_text(&buf, c, old); + prt_str(&buf, "\nk: "); + bch2_bkey_val_to_text(&buf, c, k); + prt_str(&buf, "\nnew: "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); + + bch2_fs_emergency_read_only2(c, &buf); + + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + + return -BCH_ERR_invalid_bkey; +} + static int __bch2_data_update_index_update(struct btree_trans *trans, struct bch_write_op *op) { struct bch_fs *c = op->c; struct btree_iter iter; - struct data_update *m = - container_of(op, struct data_update, op); - struct keylist *keys = &op->insert_keys; - struct bkey_buf _new, _insert; - struct printbuf journal_msg = PRINTBUF; + struct data_update *m = container_of(op, struct data_update, op); int ret = 0; - bch2_bkey_buf_init(&_new); - bch2_bkey_buf_init(&_insert); - bch2_bkey_buf_realloc(&_insert, c, U8_MAX); - bch2_trans_iter_init(trans, &iter, m->btree_id, - bkey_start_pos(&bch2_keylist_front(keys)->k), + bkey_start_pos(&bch2_keylist_front(&op->insert_keys)->k), BTREE_ITER_slots|BTREE_ITER_intent); while (1) { @@ -221,19 +283,30 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, if (ret) goto err; - new = bkey_i_to_extent(bch2_keylist_front(keys)); + new = bkey_i_to_extent(bch2_keylist_front(&op->insert_keys)); if (!bch2_extents_match(k, old)) { trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), - NULL, "no match:"); + NULL, "no match:"); goto nowork; } - bkey_reassemble(_insert.k, k); - insert = _insert.k; + insert = bch2_trans_kmalloc(trans, + bkey_bytes(k.k) + + bkey_val_bytes(&new->k) + + sizeof(struct bch_extent_rebalance)); + ret = PTR_ERR_OR_ZERO(insert); + if (ret) + goto err; + + bkey_reassemble(insert, k); - bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys)); - new = bkey_i_to_extent(_new.k); + new = bch2_trans_kmalloc(trans, bkey_bytes(&new->k)); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + goto err; + + bkey_copy(&new->k_i, bch2_keylist_front(&op->insert_keys)); bch2_cut_front(iter.pos, &new->k_i); bch2_cut_front(iter.pos, insert); @@ -346,44 +419,12 @@ restart_drop_extra_replicas: .btree = m->btree_id, .flags = BCH_VALIDATE_commit, }); - if (invalid) { - struct printbuf buf = PRINTBUF; - - prt_str(&buf, "about to insert invalid key in data update path"); - prt_printf(&buf, "\nop.nonce: %u", m->op.nonce); - prt_str(&buf, "\nold: "); - bch2_bkey_val_to_text(&buf, c, old); - prt_str(&buf, "\nk: "); - bch2_bkey_val_to_text(&buf, c, k); - prt_str(&buf, "\nnew: "); - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); - - bch2_print_string_as_lines(KERN_ERR, buf.buf); - printbuf_exit(&buf); - - bch2_fatal_error(c); - ret = -BCH_ERR_invalid_bkey; + if (unlikely(invalid)) { + ret = data_update_invalid_bkey(m, old, k, insert); goto out; } - if (trace_data_update_enabled()) { - struct printbuf buf = PRINTBUF; - - prt_str(&buf, "\nold: "); - bch2_bkey_val_to_text(&buf, c, old); - prt_str(&buf, "\nk: "); - bch2_bkey_val_to_text(&buf, c, k); - prt_str(&buf, "\nnew: "); - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); - - trace_data_update(c, buf.buf); - printbuf_exit(&buf); - } - - printbuf_reset(&journal_msg); - prt_str(&journal_msg, bch2_data_update_type_strs[m->type]); - - ret = bch2_trans_log_msg(trans, &journal_msg) ?: + ret = bch2_trans_log_str(trans, bch2_data_update_type_strs[m->type]) ?: bch2_trans_log_bkey(trans, m->btree_id, 0, m->k.k) ?: bch2_insert_snapshot_whiteouts(trans, m->btree_id, k.k->p, bkey_start_pos(&insert->k)) ?: @@ -391,28 +432,39 @@ restart_drop_extra_replicas: k.k->p, insert->k.p) ?: bch2_bkey_set_needs_rebalance(c, &op->opts, insert) ?: bch2_trans_update(trans, &iter, insert, - BTREE_UPDATE_internal_snapshot_node) ?: - bch2_trans_commit(trans, &op->res, + BTREE_UPDATE_internal_snapshot_node); + if (ret) + goto err; + + if (trace_data_update_enabled()) + trace_data_update2(m, old, k, insert); + + if (bch2_bkey_sectors_need_rebalance(c, bkey_i_to_s_c(insert)) * k.k->size > + bch2_bkey_sectors_need_rebalance(c, k) * insert->k.size) + trace_io_move_created_rebalance2(m, old, k, insert); + + ret = bch2_trans_commit(trans, &op->res, NULL, BCH_TRANS_COMMIT_no_check_rw| BCH_TRANS_COMMIT_no_enospc| m->data_opts.btree_insert_flags); - if (!ret) { - bch2_btree_iter_set_pos(trans, &iter, next_pos); + if (ret) + goto err; - this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size); - if (trace_io_move_finish_enabled()) - trace_io_move_finish2(m, &new->k_i, insert); - } + bch2_btree_iter_set_pos(trans, &iter, next_pos); + + this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size); + if (trace_io_move_finish_enabled()) + trace_io_move_finish2(m, &new->k_i, insert); err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) ret = 0; if (ret) break; next: - while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) { - bch2_keylist_pop_front(keys); - if (bch2_keylist_empty(keys)) + while (bkey_ge(iter.pos, bch2_keylist_front(&op->insert_keys)->k.p)) { + bch2_keylist_pop_front(&op->insert_keys); + if (bch2_keylist_empty(&op->insert_keys)) goto out; } continue; @@ -430,10 +482,7 @@ nowork: goto next; } out: - printbuf_exit(&journal_msg); bch2_trans_iter_exit(trans, &iter); - bch2_bkey_buf_exit(&_insert, c); - bch2_bkey_buf_exit(&_new, c); BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart)); return ret; } @@ -587,6 +636,10 @@ void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c, prt_str_indented(out, "extra replicas:\t"); prt_u64(out, data_opts->extra_replicas); + prt_newline(out); + + prt_str_indented(out, "scrub:\t"); + prt_u64(out, data_opts->scrub); } void bch2_data_update_to_text(struct printbuf *out, struct data_update *m) @@ -607,9 +660,17 @@ void bch2_data_update_inflight_to_text(struct printbuf *out, struct data_update prt_newline(out); printbuf_indent_add(out, 2); bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts); - prt_printf(out, "read_done:\t%u\n", m->read_done); - bch2_write_op_to_text(out, &m->op); - printbuf_indent_sub(out, 2); + + if (!m->read_done) { + prt_printf(out, "read:\n"); + printbuf_indent_add(out, 2); + bch2_read_bio_to_text(out, &m->rbio); + } else { + prt_printf(out, "write:\n"); + printbuf_indent_add(out, 2); + bch2_write_op_to_text(out, &m->op); + } + printbuf_indent_sub(out, 4); } int bch2_extent_drop_ptrs(struct btree_trans *trans, @@ -707,7 +768,9 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m) rcu_read_lock(); unsigned nr_replicas = 0, i; for_each_set_bit(i, devs.d, BCH_SB_MEMBERS_MAX) { - struct bch_dev *ca = bch2_dev_rcu(c, i); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, i); + if (!ca) + continue; struct bch_dev_usage usage; bch2_dev_usage_read_fast(ca, &usage); diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h index ed05125867da..5e14d13568de 100644 --- a/fs/bcachefs/data_update.h +++ b/fs/bcachefs/data_update.h @@ -50,6 +50,21 @@ struct data_update { struct bio_vec *bvecs; }; +struct promote_op { + struct rcu_head rcu; + u64 start_time; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + unsigned list_idx; +#endif + + struct rhash_head hash; + struct bpos pos; + + struct work_struct work; + struct data_update write; + struct bio_vec bi_inline_vecs[]; /* must be last */ +}; + void bch2_data_update_to_text(struct printbuf *, struct data_update *); void bch2_data_update_inflight_to_text(struct printbuf *, struct data_update *); diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index 5a8bc7013512..4fa70634c90e 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -8,6 +8,7 @@ #include "bcachefs.h" #include "alloc_foreground.h" +#include "async_objs.h" #include "bkey_methods.h" #include "btree_cache.h" #include "btree_io.h" @@ -16,6 +17,7 @@ #include "btree_update.h" #include "btree_update_interior.h" #include "buckets.h" +#include "data_update.h" #include "debug.h" #include "error.h" #include "extents.h" @@ -40,9 +42,10 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, struct btree_node *n_sorted = c->verify_data->data; struct bset *sorted, *inmemory = &b->data->keys; struct bio *bio; - bool failed = false, saw_error = false; + bool failed = false; - struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); + struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, + BCH_DEV_READ_REF_btree_verify_replicas); if (!ca) return false; @@ -57,12 +60,13 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, submit_bio_wait(bio); bio_put(bio); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_btree_verify_replicas); memcpy(n_ondisk, n_sorted, btree_buf_bytes(b)); v->written = 0; - if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error) + if (bch2_btree_node_read_done(c, ca, v, NULL, NULL)) return false; n_sorted = c->verify_data->data; @@ -196,7 +200,8 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c, return; } - ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); + ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, + BCH_DEV_READ_REF_btree_node_ondisk_to_text); if (!ca) { prt_printf(out, "error getting device to read from: not online\n"); return; @@ -297,28 +302,13 @@ out: if (bio) bio_put(bio); kvfree(n_ondisk); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_btree_node_ondisk_to_text); } #ifdef CONFIG_DEBUG_FS -/* XXX: bch_fs refcounting */ - -struct dump_iter { - struct bch_fs *c; - enum btree_id id; - struct bpos from; - struct bpos prev_node; - u64 iter; - - struct printbuf buf; - - char __user *ubuf; /* destination user buffer */ - size_t size; /* size of requested read */ - ssize_t ret; /* bytes read so far */ -}; - -static ssize_t flush_buf(struct dump_iter *i) +ssize_t bch2_debugfs_flush_buf(struct dump_iter *i) { if (i->buf.pos) { size_t bytes = min_t(size_t, i->buf.pos, i->size); @@ -330,6 +320,11 @@ static ssize_t flush_buf(struct dump_iter *i) i->buf.pos -= copied; memmove(i->buf.buf, i->buf.buf + copied, i->buf.pos); + if (i->buf.last_newline >= copied) + i->buf.last_newline -= copied; + if (i->buf.last_field >= copied) + i->buf.last_field -= copied; + if (copied != bytes) return -EFAULT; } @@ -356,7 +351,7 @@ static int bch2_dump_open(struct inode *inode, struct file *file) return 0; } -static int bch2_dump_release(struct inode *inode, struct file *file) +int bch2_dump_release(struct inode *inode, struct file *file) { struct dump_iter *i = file->private_data; @@ -374,7 +369,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, i->size = size; i->ret = 0; - return flush_buf(i) ?: + return bch2_debugfs_flush_buf(i) ?: bch2_trans_run(i->c, for_each_btree_key(trans, iter, i->id, i->from, BTREE_ITER_prefetch| @@ -383,7 +378,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, prt_newline(&i->buf); bch2_trans_unlock(trans); i->from = bpos_successor(iter.pos); - flush_buf(i); + bch2_debugfs_flush_buf(i); }))) ?: i->ret; } @@ -404,7 +399,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, i->size = size; i->ret = 0; - ssize_t ret = flush_buf(i); + ssize_t ret = bch2_debugfs_flush_buf(i); if (ret) return ret; @@ -418,7 +413,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, ? bpos_successor(b->key.k.p) : b->key.k.p; - drop_locks_do(trans, flush_buf(i)); + drop_locks_do(trans, bch2_debugfs_flush_buf(i)); }))) ?: i->ret; } @@ -438,7 +433,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, i->size = size; i->ret = 0; - return flush_buf(i) ?: + return bch2_debugfs_flush_buf(i) ?: bch2_trans_run(i->c, for_each_btree_key(trans, iter, i->id, i->from, BTREE_ITER_prefetch| @@ -456,7 +451,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, bch2_bfloat_to_text(&i->buf, l->b, _k); bch2_trans_unlock(trans); i->from = bpos_successor(iter.pos); - flush_buf(i); + bch2_debugfs_flush_buf(i); }))) ?: i->ret; } @@ -517,7 +512,7 @@ static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf, struct rhash_head *pos; struct btree *b; - ret = flush_buf(i); + ret = bch2_debugfs_flush_buf(i); if (ret) return ret; @@ -540,7 +535,7 @@ static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf, ret = -ENOMEM; if (!ret) - ret = flush_buf(i); + ret = bch2_debugfs_flush_buf(i); return ret ?: i->ret; } @@ -614,7 +609,7 @@ restart: closure_put(&trans->ref); - ret = flush_buf(i); + ret = bch2_debugfs_flush_buf(i); if (ret) goto unlocked; @@ -627,7 +622,7 @@ unlocked: ret = -ENOMEM; if (!ret) - ret = flush_buf(i); + ret = bch2_debugfs_flush_buf(i); return ret ?: i->ret; } @@ -652,7 +647,7 @@ static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf, i->ret = 0; while (1) { - err = flush_buf(i); + err = bch2_debugfs_flush_buf(i); if (err) return err; @@ -695,7 +690,7 @@ static ssize_t bch2_btree_updates_read(struct file *file, char __user *buf, i->iter++; } - err = flush_buf(i); + err = bch2_debugfs_flush_buf(i); if (err) return err; @@ -753,7 +748,7 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf, while (1) { struct btree_transaction_stats *s = &c->btree_transaction_stats[i->iter]; - err = flush_buf(i); + err = bch2_debugfs_flush_buf(i); if (err) return err; @@ -770,6 +765,12 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf, mutex_lock(&s->lock); prt_printf(&i->buf, "Max mem used: %u\n", s->max_mem); +#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE + printbuf_indent_add(&i->buf, 2); + bch2_trans_kmalloc_trace_to_text(&i->buf, &s->trans_kmalloc_trace); + printbuf_indent_sub(&i->buf, 2); +#endif + prt_printf(&i->buf, "Transaction duration:\n"); printbuf_indent_add(&i->buf, 2); @@ -868,7 +869,7 @@ static ssize_t bch2_simple_print(struct file *file, char __user *buf, ret = -ENOMEM; if (!ret) - ret = flush_buf(i); + ret = bch2_debugfs_flush_buf(i); return ret ?: i->ret; } @@ -927,7 +928,11 @@ void bch2_fs_debug_init(struct bch_fs *c) if (IS_ERR_OR_NULL(bch_debug)) return; - snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b); + if (c->sb.multi_device) + snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b); + else + strscpy(name, c->name, sizeof(name)); + c->fs_debug_dir = debugfs_create_dir(name, bch_debug); if (IS_ERR_OR_NULL(c->fs_debug_dir)) return; @@ -953,6 +958,8 @@ void bch2_fs_debug_init(struct bch_fs *c) debugfs_create_file("write_points", 0400, c->fs_debug_dir, c->btree_debug, &write_points_ops); + bch2_fs_async_obj_debugfs_init(c); + c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir); if (IS_ERR_OR_NULL(c->btree_debug_dir)) return; diff --git a/fs/bcachefs/debug.h b/fs/bcachefs/debug.h index 2c37143b5fd1..d88b1194b8ac 100644 --- a/fs/bcachefs/debug.h +++ b/fs/bcachefs/debug.h @@ -14,11 +14,29 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *, struct bch_fs *, static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b) { - if (bch2_verify_btree_ondisk) + if (static_branch_unlikely(&bch2_verify_btree_ondisk)) __bch2_btree_verify(c, b); } #ifdef CONFIG_DEBUG_FS +struct dump_iter { + struct bch_fs *c; + struct async_obj_list *list; + enum btree_id id; + struct bpos from; + struct bpos prev_node; + u64 iter; + + struct printbuf buf; + + char __user *ubuf; /* destination user buffer */ + size_t size; /* size of requested read */ + ssize_t ret; /* bytes read so far */ +}; + +ssize_t bch2_debugfs_flush_buf(struct dump_iter *); +int bch2_dump_release(struct inode *, struct file *); + void bch2_fs_debug_exit(struct bch_fs *); void bch2_fs_debug_init(struct bch_fs *); #else diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index 8a680e52c1ed..d198001838f3 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -212,12 +212,19 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); struct qstr d_name = bch2_dirent_get_name(d); - prt_printf(out, "%.*s -> ", d_name.len, d_name.name); + prt_printf(out, "%.*s", d_name.len, d_name.name); + + if (d.v->d_casefold) { + struct qstr d_name = bch2_dirent_get_lookup_name(d); + prt_printf(out, " (casefold %.*s)", d_name.len, d_name.name); + } + + prt_str(out, " ->"); if (d.v->d_type != DT_SUBVOL) - prt_printf(out, "%llu", le64_to_cpu(d.v->d_inum)); + prt_printf(out, " %llu", le64_to_cpu(d.v->d_inum)); else - prt_printf(out, "%u -> %u", + prt_printf(out, " %u -> %u", le32_to_cpu(d.v->d_parent_subvol), le32_to_cpu(d.v->d_child_subvol)); @@ -288,6 +295,7 @@ static void dirent_init_casefolded_name(struct bkey_i_dirent *dirent, } static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans, + const struct bch_hash_info *hash_info, subvol_inum dir, u8 type, const struct qstr *name, @@ -295,10 +303,19 @@ static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans, u64 dst) { struct bkey_i_dirent *dirent; + struct qstr _cf_name; if (name->len > BCH_NAME_MAX) return ERR_PTR(-ENAMETOOLONG); + if (hash_info->cf_encoding && !cf_name) { + int ret = bch2_casefold(trans, hash_info, name, &_cf_name); + if (ret) + return ERR_PTR(ret); + + cf_name = &_cf_name; + } + dirent = dirent_alloc_key(trans, dir, type, name->len, cf_name ? cf_name->len : 0, dst); if (IS_ERR(dirent)) return dirent; @@ -324,7 +341,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans, struct bkey_i_dirent *dirent; int ret; - dirent = dirent_create_key(trans, dir_inum, type, name, NULL, dst_inum); + dirent = dirent_create_key(trans, hash_info, dir_inum, type, name, NULL, dst_inum); ret = PTR_ERR_OR_ZERO(dirent); if (ret) return ret; @@ -333,8 +350,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans, dirent->k.p.snapshot = snapshot; ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info, - dir_inum, snapshot, &dirent->k_i, - flags|BTREE_UPDATE_internal_snapshot_node); + dir_inum, snapshot, &dirent->k_i, flags); *dir_offset = dirent->k.p.offset; return ret; @@ -344,28 +360,16 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir, const struct bch_hash_info *hash_info, u8 type, const struct qstr *name, u64 dst_inum, u64 *dir_offset, - u64 *i_size, enum btree_iter_update_trigger_flags flags) { struct bkey_i_dirent *dirent; int ret; - if (hash_info->cf_encoding) { - struct qstr cf_name; - ret = bch2_casefold(trans, hash_info, name, &cf_name); - if (ret) - return ret; - dirent = dirent_create_key(trans, dir, type, name, &cf_name, dst_inum); - } else { - dirent = dirent_create_key(trans, dir, type, name, NULL, dst_inum); - } - + dirent = dirent_create_key(trans, hash_info, dir, type, name, NULL, dst_inum); ret = PTR_ERR_OR_ZERO(dirent); if (ret) return ret; - *i_size += bkey_bytes(&dirent->k); - ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info, dir, &dirent->k_i, flags); *dir_offset = dirent->k.p.offset; @@ -466,7 +470,7 @@ int bch2_dirent_rename(struct btree_trans *trans, *src_offset = dst_iter.pos.offset; /* Create new dst key: */ - new_dst = dirent_create_key(trans, dst_dir, 0, dst_name, + new_dst = dirent_create_key(trans, dst_hash, dst_dir, 0, dst_name, dst_hash->cf_encoding ? &dst_name_lookup : NULL, 0); ret = PTR_ERR_OR_ZERO(new_dst); if (ret) @@ -477,7 +481,7 @@ int bch2_dirent_rename(struct btree_trans *trans, /* Create new src key: */ if (mode == BCH_RENAME_EXCHANGE) { - new_src = dirent_create_key(trans, src_dir, 0, src_name, + new_src = dirent_create_key(trans, src_hash, src_dir, 0, src_name, src_hash->cf_encoding ? &src_name_lookup : NULL, 0); ret = PTR_ERR_OR_ZERO(new_src); if (ret) diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h index 9838a7ba7ed1..d3e7ae669575 100644 --- a/fs/bcachefs/dirent.h +++ b/fs/bcachefs/dirent.h @@ -65,7 +65,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *, u32, u64, u32, enum btree_iter_update_trigger_flags); int bch2_dirent_create(struct btree_trans *, subvol_inum, const struct bch_hash_info *, u8, - const struct qstr *, u64, u64 *, u64 *, + const struct qstr *, u64, u64 *, enum btree_iter_update_trigger_flags); static inline unsigned vfs_d_type(unsigned type) diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c index 1f0422bfae35..b3840ff7c407 100644 --- a/fs/bcachefs/disk_accounting.c +++ b/fs/bcachefs/disk_accounting.c @@ -68,23 +68,31 @@ static const char * const disk_accounting_type_strs[] = { NULL }; -static inline void accounting_key_init(struct bkey_i *k, struct disk_accounting_pos *pos, - s64 *d, unsigned nr) +static inline void __accounting_key_init(struct bkey_i *k, struct bpos pos, + s64 *d, unsigned nr) { struct bkey_i_accounting *acc = bkey_accounting_init(k); - acc->k.p = disk_accounting_pos_to_bpos(pos); + acc->k.p = pos; set_bkey_val_u64s(&acc->k, sizeof(struct bch_accounting) / sizeof(u64) + nr); memcpy_u64s_small(acc->v.d, d, nr); } +static inline void accounting_key_init(struct bkey_i *k, struct disk_accounting_pos *pos, + s64 *d, unsigned nr) +{ + return __accounting_key_init(k, disk_accounting_pos_to_bpos(pos), d, nr); +} + static int bch2_accounting_update_sb_one(struct bch_fs *, struct bpos); int bch2_disk_accounting_mod(struct btree_trans *trans, struct disk_accounting_pos *k, s64 *d, unsigned nr, bool gc) { + BUG_ON(nr > BCH_ACCOUNTING_MAX_COUNTERS); + /* Normalize: */ switch (k->type) { case BCH_DISK_ACCOUNTING_replicas: @@ -92,21 +100,49 @@ int bch2_disk_accounting_mod(struct btree_trans *trans, break; } - BUG_ON(nr > BCH_ACCOUNTING_MAX_COUNTERS); + struct bpos pos = disk_accounting_pos_to_bpos(k); + + if (likely(!gc)) { + struct bkey_i_accounting *a; +#if 0 + for (a = btree_trans_subbuf_base(trans, &trans->accounting); + a != btree_trans_subbuf_top(trans, &trans->accounting); + a = (void *) bkey_next(&a->k_i)) + if (bpos_eq(a->k.p, pos)) { + BUG_ON(nr != bch2_accounting_counters(&a->k)); + acc_u64s(a->v.d, d, nr); + + if (bch2_accounting_key_is_zero(accounting_i_to_s_c(a))) { + unsigned offset = (u64 *) a - + (u64 *) btree_trans_subbuf_base(trans, &trans->accounting); + + trans->accounting.u64s -= a->k.u64s; + memmove_u64s_down(a, + bkey_next(&a->k_i), + trans->accounting.u64s - offset); + } + return 0; + } +#endif + unsigned u64s = sizeof(*a) / sizeof(u64) + nr; + a = bch2_trans_subbuf_alloc(trans, &trans->accounting, u64s); + int ret = PTR_ERR_OR_ZERO(a); + if (ret) + return ret; - struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i; + __accounting_key_init(&a->k_i, pos, d, nr); + return 0; + } else { + struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i; - accounting_key_init(&k_i.k, k, d, nr); + __accounting_key_init(&k_i.k, pos, d, nr); - if (unlikely(gc)) { int ret = bch2_accounting_mem_add(trans, bkey_i_to_s_c_accounting(&k_i.k), true); if (ret == -BCH_ERR_btree_insert_need_mark_replicas) ret = drop_locks_do(trans, bch2_accounting_update_sb_one(trans->c, disk_accounting_pos_to_bpos(k))) ?: bch2_accounting_mem_add(trans, bkey_i_to_s_c_accounting(&k_i.k), true); return ret; - } else { - return bch2_trans_update_buffered(trans, BTREE_ID_accounting, &k_i.k); } } @@ -287,7 +323,7 @@ static inline bool accounting_to_replicas(struct bch_replicas_entry_v1 *r, struc static int bch2_accounting_update_sb_one(struct bch_fs *c, struct bpos p) { - struct bch_replicas_padded r; + union bch_replicas_padded r; return accounting_to_replicas(&r.e, p) ? bch2_mark_replicas(c, &r.e) : 0; @@ -299,14 +335,13 @@ static int bch2_accounting_update_sb_one(struct bch_fs *c, struct bpos p) */ int bch2_accounting_update_sb(struct btree_trans *trans) { - for (struct jset_entry *i = trans->journal_entries; - i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); - i = vstruct_next(i)) - if (jset_entry_is_key(i) && i->start->k.type == KEY_TYPE_accounting) { - int ret = bch2_accounting_update_sb_one(trans->c, i->start->k.p); - if (ret) - return ret; - } + for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting); + i != btree_trans_subbuf_top(trans, &trans->accounting); + i = bkey_next(i)) { + int ret = bch2_accounting_update_sb_one(trans->c, i->k.p); + if (ret) + return ret; + } return 0; } @@ -361,7 +396,7 @@ err: int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, enum bch_accounting_mode mode) { - struct bch_replicas_padded r; + union bch_replicas_padded r; if (mode != BCH_ACCOUNTING_read && accounting_to_replicas(&r.e, a.k->p) && @@ -379,7 +414,7 @@ int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, int bch2_accounting_mem_insert_locked(struct bch_fs *c, struct bkey_s_c_accounting a, enum bch_accounting_mode mode) { - struct bch_replicas_padded r; + union bch_replicas_padded r; if (mode != BCH_ACCOUNTING_read && accounting_to_replicas(&r.e, a.k->p) && @@ -438,10 +473,12 @@ int bch2_fs_replicas_usage_read(struct bch_fs *c, darray_char *usage) percpu_down_read(&c->mark_lock); darray_for_each(acc->k, i) { - struct { + union { + u8 bytes[struct_size_t(struct bch_replicas_usage, r.devs, + BCH_BKEY_PTRS_MAX)]; struct bch_replicas_usage r; - u8 pad[BCH_BKEY_PTRS_MAX]; } u; + u.r.r.nr_devs = BCH_BKEY_PTRS_MAX; if (!accounting_to_replicas(&u.r.r, i->pos)) continue; @@ -570,11 +607,11 @@ int bch2_gc_accounting_done(struct bch_fs *c) prt_str(&buf, "accounting mismatch for "); bch2_accounting_key_to_text(&buf, &acc_k); - prt_str(&buf, ": got"); + prt_str(&buf, ":\n got"); for (unsigned j = 0; j < nr; j++) prt_printf(&buf, " %llu", dst_v[j]); - prt_str(&buf, " should be"); + prt_str(&buf, "\nshould be"); for (unsigned j = 0; j < nr; j++) prt_printf(&buf, " %llu", src_v[j]); @@ -631,17 +668,17 @@ static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k) } static int bch2_disk_accounting_validate_late(struct btree_trans *trans, - struct disk_accounting_pos acc, + struct disk_accounting_pos *acc, u64 *v, unsigned nr) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; int ret = 0, invalid_dev = -1; - switch (acc.type) { + switch (acc->type) { case BCH_DISK_ACCOUNTING_replicas: { - struct bch_replicas_padded r; - __accounting_to_replicas(&r.e, &acc); + union bch_replicas_padded r; + __accounting_to_replicas(&r.e, acc); for (unsigned i = 0; i < r.e.nr_devs; i++) if (r.e.devs[i] != BCH_SB_MEMBER_INVALID && @@ -660,7 +697,7 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans, trans, accounting_replicas_not_marked, "accounting not marked in superblock replicas\n%s", (printbuf_reset(&buf), - bch2_accounting_key_to_text(&buf, &acc), + bch2_accounting_key_to_text(&buf, acc), buf.buf))) { /* * We're not RW yet and still single threaded, dropping @@ -676,8 +713,8 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans, } case BCH_DISK_ACCOUNTING_dev_data_type: - if (!bch2_dev_exists(c, acc.dev_data_type.dev)) { - invalid_dev = acc.dev_data_type.dev; + if (!bch2_dev_exists(c, acc->dev_data_type.dev)) { + invalid_dev = acc->dev_data_type.dev; goto invalid_device; } break; @@ -691,13 +728,13 @@ invalid_device: "accounting entry points to invalid device %i\n%s", invalid_dev, (printbuf_reset(&buf), - bch2_accounting_key_to_text(&buf, &acc), + bch2_accounting_key_to_text(&buf, acc), buf.buf))) { for (unsigned i = 0; i < nr; i++) v[i] = -v[i]; ret = commit_do(trans, NULL, NULL, 0, - bch2_disk_accounting_mod(trans, &acc, v, nr, false)) ?: + bch2_disk_accounting_mod(trans, acc, v, nr, false)) ?: -BCH_ERR_remove_disk_accounting_entry; } else { ret = -BCH_ERR_remove_disk_accounting_entry; @@ -748,7 +785,7 @@ int bch2_accounting_read(struct bch_fs *c) if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR) break; - if (!bch2_accounting_is_mem(acc_k)) { + if (!bch2_accounting_is_mem(&acc_k)) { struct disk_accounting_pos next; memset(&next, 0, sizeof(next)); next.type = acc_k.type + 1; @@ -770,7 +807,7 @@ int bch2_accounting_read(struct bch_fs *c) struct disk_accounting_pos acc_k; bpos_to_disk_accounting_pos(&acc_k, i->k->k.p); - if (!bch2_accounting_is_mem(acc_k)) + if (!bch2_accounting_is_mem(&acc_k)) continue; struct bkey_s_c k = bkey_i_to_s_c(i->k); @@ -826,7 +863,7 @@ int bch2_accounting_read(struct bch_fs *c) */ ret = bch2_is_zero(v, sizeof(v[0]) * i->nr_counters) ? -BCH_ERR_remove_disk_accounting_entry - : bch2_disk_accounting_validate_late(trans, acc_k, v, i->nr_counters); + : bch2_disk_accounting_validate_late(trans, &acc_k, v, i->nr_counters); if (ret == -BCH_ERR_remove_disk_accounting_entry) { free_percpu(i->v[0]); @@ -939,7 +976,7 @@ void bch2_verify_accounting_clean(struct bch_fs *c) if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR) break; - if (!bch2_accounting_is_mem(acc_k)) { + if (!bch2_accounting_is_mem(&acc_k)) { struct disk_accounting_pos next; memset(&next, 0, sizeof(next)); next.type = acc_k.type + 1; diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h index d557b99b3c0a..f6098e33ab30 100644 --- a/fs/bcachefs/disk_accounting.h +++ b/fs/bcachefs/disk_accounting.h @@ -139,10 +139,10 @@ int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum int bch2_accounting_mem_insert_locked(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode); void bch2_accounting_mem_gc(struct bch_fs *); -static inline bool bch2_accounting_is_mem(struct disk_accounting_pos acc) +static inline bool bch2_accounting_is_mem(struct disk_accounting_pos *acc) { - return acc.type < BCH_DISK_ACCOUNTING_TYPE_NR && - acc.type != BCH_DISK_ACCOUNTING_inum; + return acc->type < BCH_DISK_ACCOUNTING_TYPE_NR && + acc->type != BCH_DISK_ACCOUNTING_inum; } /* @@ -163,7 +163,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, if (gc && !acc->gc_running) return 0; - if (!bch2_accounting_is_mem(acc_k)) + if (!bch2_accounting_is_mem(&acc_k)) return 0; if (mode == BCH_ACCOUNTING_normal) { @@ -259,8 +259,8 @@ static inline int bch2_accounting_trans_commit_hook(struct btree_trans *trans, struct bkey_i_accounting *a, unsigned commit_flags) { - a->k.bversion = journal_pos_to_bversion(&trans->journal_res, - (u64 *) a - (u64 *) trans->journal_entries); + u64 *base = (u64 *) btree_trans_subbuf_base(trans, &trans->accounting); + a->k.bversion = journal_pos_to_bversion(&trans->journal_res, (u64 *) a - base); EBUG_ON(bversion_zero(a->k.bversion)); diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c index 2ca3cbf12b71..c20ecf5e5381 100644 --- a/fs/bcachefs/disk_groups.c +++ b/fs/bcachefs/disk_groups.c @@ -86,35 +86,6 @@ err: return ret; } -void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c) -{ - out->atomic++; - rcu_read_lock(); - - struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups); - if (!g) - goto out; - - for (unsigned i = 0; i < g->nr; i++) { - if (i) - prt_printf(out, " "); - - if (g->entries[i].deleted) { - prt_printf(out, "[deleted]"); - continue; - } - - prt_printf(out, "[parent %d devs", g->entries[i].parent); - for_each_member_device_rcu(c, ca, &g->entries[i].devs) - prt_printf(out, " %s", ca->name); - prt_printf(out, "]"); - } - -out: - rcu_read_unlock(); - out->atomic--; -} - static void bch2_sb_disk_groups_to_text(struct printbuf *out, struct bch_sb *sb, struct bch_sb_field *f) @@ -241,20 +212,13 @@ bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target) case TARGET_DEV: return dev == t.dev; case TARGET_GROUP: { - struct bch_disk_groups_cpu *g; - const struct bch_devs_mask *m; - bool ret; - - rcu_read_lock(); - g = rcu_dereference(c->disk_groups); - m = g && t.group < g->nr && !g->entries[t.group].deleted + struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups); + const struct bch_devs_mask *m = + g && t.group < g->nr && !g->entries[t.group].deleted ? &g->entries[t.group].devs : NULL; - ret = m ? test_bit(dev, m->d) : false; - rcu_read_unlock(); - - return ret; + return m ? test_bit(dev, m->d) : false; } default: BUG(); @@ -377,54 +341,81 @@ int bch2_disk_path_find_or_create(struct bch_sb_handle *sb, const char *name) return v; } -void bch2_disk_path_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) +static void __bch2_disk_path_to_text(struct printbuf *out, struct bch_disk_groups_cpu *g, + unsigned v) { - struct bch_disk_groups_cpu *groups; - struct bch_disk_group_cpu *g; - unsigned nr = 0; u16 path[32]; - - out->atomic++; - rcu_read_lock(); - groups = rcu_dereference(c->disk_groups); - if (!groups) - goto invalid; + unsigned nr = 0; while (1) { if (nr == ARRAY_SIZE(path)) goto invalid; - if (v >= groups->nr) + if (v >= (g ? g->nr : 0)) goto invalid; - g = groups->entries + v; + struct bch_disk_group_cpu *e = g->entries + v; - if (g->deleted) + if (e->deleted) goto invalid; path[nr++] = v; - if (!g->parent) + if (!e->parent) break; - v = g->parent - 1; + v = e->parent - 1; } while (nr) { - v = path[--nr]; - g = groups->entries + v; + struct bch_disk_group_cpu *e = g->entries + path[--nr]; - prt_printf(out, "%.*s", (int) sizeof(g->label), g->label); + prt_printf(out, "%.*s", (int) sizeof(e->label), e->label); if (nr) prt_printf(out, "."); } -out: - rcu_read_unlock(); - out->atomic--; return; invalid: prt_printf(out, "invalid label %u", v); - goto out; +} + +void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c) +{ + bch2_printbuf_make_room(out, 4096); + + out->atomic++; + rcu_read_lock(); + struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups); + + for (unsigned i = 0; i < (g ? g->nr : 0); i++) { + prt_printf(out, "%2u: ", i); + + if (g->entries[i].deleted) { + prt_printf(out, "[deleted]"); + goto next; + } + + __bch2_disk_path_to_text(out, g, i); + + prt_printf(out, " devs"); + + for_each_member_device_rcu(c, ca, &g->entries[i].devs) + prt_printf(out, " %s", ca->name); +next: + prt_newline(out); + } + + rcu_read_unlock(); + out->atomic--; +} + +void bch2_disk_path_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) +{ + out->atomic++; + rcu_read_lock(); + __bch2_disk_path_to_text(out, rcu_dereference(c->disk_groups), v), + rcu_read_unlock(); + --out->atomic; } void bch2_disk_path_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v) @@ -554,14 +545,12 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) ? rcu_dereference(c->devs[t.dev]) : NULL; - if (ca && percpu_ref_tryget(&ca->io_ref[READ])) { + if (ca && ca->disk_sb.bdev) prt_printf(out, "/dev/%s", ca->name); - percpu_ref_put(&ca->io_ref[READ]); - } else if (ca) { + else if (ca) prt_printf(out, "offline device %u", t.dev); - } else { + else prt_printf(out, "invalid device %u", t.dev); - } rcu_read_unlock(); out->atomic--; diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index fff58b78327c..c581426e3894 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -16,6 +16,7 @@ #include "disk_accounting.h" #include "disk_groups.h" #include "ec.h" +#include "enumerated_ref.h" #include "error.h" #include "io_read.h" #include "io_write.h" @@ -507,20 +508,14 @@ static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s, static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) { - switch (k.k->type) { - case KEY_TYPE_extent: { - struct bkey_s_c_extent e = bkey_s_c_to_extent(k); - const union bch_extent_entry *entry; - - extent_for_each_entry(e, entry) - if (extent_entry_type(entry) == - BCH_EXTENT_ENTRY_stripe_ptr && - entry->stripe_ptr.idx == idx) - return true; + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; - break; - } - } + bkey_extent_entry_for_each(ptrs, entry) + if (extent_entry_type(entry) == + BCH_EXTENT_ENTRY_stripe_ptr && + entry->stripe_ptr.idx == idx) + return true; return false; } @@ -706,6 +701,9 @@ static void ec_block_endio(struct bio *bio) struct bch_dev *ca = ec_bio->ca; struct closure *cl = bio->bi_private; int rw = ec_bio->rw; + unsigned ref = rw == READ + ? BCH_DEV_READ_REF_ec_block + : BCH_DEV_WRITE_REF_ec_block; bch2_account_io_completion(ca, bio_data_dir(bio), ec_bio->submit_time, !bio->bi_status); @@ -727,7 +725,7 @@ static void ec_block_endio(struct bio *bio) } bio_put(&ec_bio->bio); - percpu_ref_put(&ca->io_ref[rw]); + enumerated_ref_put(&ca->io_ref[rw], ref); closure_put(cl); } @@ -741,8 +739,11 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, ? BCH_DATA_user : BCH_DATA_parity; int rw = op_is_write(opf); + unsigned ref = rw == READ + ? BCH_DEV_READ_REF_ec_block + : BCH_DEV_WRITE_REF_ec_block; - struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw); + struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw, ref); if (!ca) { clear_bit(idx, buf->valid); return; @@ -788,14 +789,14 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b); closure_get(cl); - percpu_ref_get(&ca->io_ref[rw]); + enumerated_ref_get(&ca->io_ref[rw], ref); submit_bio(&ec_bio->bio); offset += b; } - percpu_ref_put(&ca->io_ref[rw]); + enumerated_ref_put(&ca->io_ref[rw], ref); } static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, @@ -1017,14 +1018,14 @@ static void ec_stripe_delete_work(struct work_struct *work) BCH_TRANS_COMMIT_no_enospc, ({ ec_stripe_delete(trans, lru_k.k->p.offset); }))); - bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_stripe_delete); } void bch2_do_stripe_deletes(struct bch_fs *c) { - if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) && + if (enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_stripe_delete) && !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work)) - bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_stripe_delete); } /* stripe creation: */ @@ -1252,7 +1253,8 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c, unsigned block, struct open_bucket *ob) { - struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE); + struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE, + BCH_DEV_WRITE_REF_ec_bucket_zero); if (!ca) { s->err = -BCH_ERR_erofs_no_writes; return; @@ -1268,7 +1270,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c, ob->sectors_free, GFP_KERNEL, 0); - percpu_ref_put(&ca->io_ref[WRITE]); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_ec_bucket_zero); if (ret) s->err = ret; @@ -1418,15 +1420,15 @@ static void ec_stripe_create_work(struct work_struct *work) while ((s = get_pending_stripe(c))) ec_stripe_create(s); - bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_stripe_create); } void bch2_ec_do_stripe_creates(struct bch_fs *c) { - bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create); + enumerated_ref_get(&c->writes, BCH_WRITE_REF_stripe_create); if (!queue_work(system_long_wq, &c->ec_stripe_create_work)) - bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_stripe_create); } static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h) @@ -1716,23 +1718,32 @@ err: } static int new_stripe_alloc_buckets(struct btree_trans *trans, + struct alloc_request *req, struct ec_stripe_head *h, struct ec_stripe_new *s, - enum bch_watermark watermark, struct closure *cl) + struct closure *cl) { struct bch_fs *c = trans->c; - struct bch_devs_mask devs = h->devs; struct open_bucket *ob; - struct open_buckets buckets; struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; unsigned i, j, nr_have_parity = 0, nr_have_data = 0; - bool have_cache = true; int ret = 0; + req->scratch_data_type = req->data_type; + req->scratch_ptrs = req->ptrs; + req->scratch_nr_replicas = req->nr_replicas; + req->scratch_nr_effective = req->nr_effective; + req->scratch_have_cache = req->have_cache; + req->scratch_devs_may_alloc = req->devs_may_alloc; + + req->devs_may_alloc = h->devs; + req->have_cache = true; + BUG_ON(v->nr_blocks != s->nr_data + s->nr_parity); BUG_ON(v->nr_redundant != s->nr_parity); /* * We bypass the sector allocator which normally does this: */ - bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); + bitmap_and(req->devs_may_alloc.d, req->devs_may_alloc.d, + c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) { /* @@ -1742,7 +1753,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, * block when updating the stripe */ if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID) - __clear_bit(v->ptrs[i].dev, devs.d); + __clear_bit(v->ptrs[i].dev, req->devs_may_alloc.d); if (i < s->nr_data) nr_have_data++; @@ -1753,60 +1764,58 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, BUG_ON(nr_have_data > s->nr_data); BUG_ON(nr_have_parity > s->nr_parity); - buckets.nr = 0; + req->ptrs.nr = 0; if (nr_have_parity < s->nr_parity) { - ret = bch2_bucket_alloc_set_trans(trans, &buckets, - &h->parity_stripe, - &devs, - s->nr_parity, - &nr_have_parity, - &have_cache, 0, - BCH_DATA_parity, - watermark, - cl); - - open_bucket_for_each(c, &buckets, ob, i) { + req->nr_replicas = s->nr_parity; + req->nr_effective = nr_have_parity; + req->data_type = BCH_DATA_parity; + + ret = bch2_bucket_alloc_set_trans(trans, req, &h->parity_stripe, cl); + + open_bucket_for_each(c, &req->ptrs, ob, i) { j = find_next_zero_bit(s->blocks_gotten, s->nr_data + s->nr_parity, s->nr_data); BUG_ON(j >= s->nr_data + s->nr_parity); - s->blocks[j] = buckets.v[i]; + s->blocks[j] = req->ptrs.v[i]; v->ptrs[j] = bch2_ob_ptr(c, ob); __set_bit(j, s->blocks_gotten); } if (ret) - return ret; + goto err; } - buckets.nr = 0; + req->ptrs.nr = 0; if (nr_have_data < s->nr_data) { - ret = bch2_bucket_alloc_set_trans(trans, &buckets, - &h->block_stripe, - &devs, - s->nr_data, - &nr_have_data, - &have_cache, 0, - BCH_DATA_user, - watermark, - cl); - - open_bucket_for_each(c, &buckets, ob, i) { + req->nr_replicas = s->nr_data; + req->nr_effective = nr_have_data; + req->data_type = BCH_DATA_user; + + ret = bch2_bucket_alloc_set_trans(trans, req, &h->block_stripe, cl); + + open_bucket_for_each(c, &req->ptrs, ob, i) { j = find_next_zero_bit(s->blocks_gotten, s->nr_data, 0); BUG_ON(j >= s->nr_data); - s->blocks[j] = buckets.v[i]; + s->blocks[j] = req->ptrs.v[i]; v->ptrs[j] = bch2_ob_ptr(c, ob); __set_bit(j, s->blocks_gotten); } if (ret) - return ret; + goto err; } - - return 0; +err: + req->data_type = req->scratch_data_type; + req->ptrs = req->scratch_ptrs; + req->nr_replicas = req->scratch_nr_replicas; + req->nr_effective = req->scratch_nr_effective; + req->have_cache = req->scratch_have_cache; + req->devs_may_alloc = req->scratch_devs_may_alloc; + return ret; } static int __get_existing_stripe(struct btree_trans *trans, @@ -1987,17 +1996,15 @@ err: } struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, - unsigned target, + struct alloc_request *req, unsigned algo, - unsigned redundancy, - enum bch_watermark watermark, struct closure *cl) { struct bch_fs *c = trans->c; - struct ec_stripe_head *h; - bool waiting = false; + unsigned redundancy = req->nr_replicas - 1; unsigned disk_label = 0; - struct target t = target_decode(target); + struct target t = target_decode(req->target); + bool waiting = false; int ret; if (t.type == TARGET_GROUP) { @@ -2008,7 +2015,9 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, disk_label = t.group + 1; /* 0 == no label */ } - h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark); + struct ec_stripe_head *h = + __bch2_ec_stripe_head_get(trans, disk_label, algo, + redundancy, req->watermark); if (IS_ERR_OR_NULL(h)) return h; @@ -2032,8 +2041,12 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, goto alloc_existing; /* First, try to allocate a full stripe: */ - ret = new_stripe_alloc_buckets(trans, h, s, BCH_WATERMARK_stripe, NULL) ?: + enum bch_watermark saved_watermark = BCH_WATERMARK_stripe; + swap(req->watermark, saved_watermark); + ret = new_stripe_alloc_buckets(trans, req, h, s, NULL) ?: __bch2_ec_stripe_head_reserve(trans, h, s); + swap(req->watermark, saved_watermark); + if (!ret) goto allocate_buf; if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || @@ -2051,8 +2064,8 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) goto err; - if (watermark == BCH_WATERMARK_copygc) { - ret = new_stripe_alloc_buckets(trans, h, s, watermark, NULL) ?: + if (req->watermark == BCH_WATERMARK_copygc) { + ret = new_stripe_alloc_buckets(trans, req, h, s, NULL) ?: __bch2_ec_stripe_head_reserve(trans, h, s); if (ret) goto err; @@ -2071,7 +2084,7 @@ alloc_existing: * Retry allocating buckets, with the watermark for this * particular write: */ - ret = new_stripe_alloc_buckets(trans, h, s, watermark, cl); + ret = new_stripe_alloc_buckets(trans, req, h, s, cl); if (ret) goto err; @@ -2093,23 +2106,17 @@ err: /* device removal */ -static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a) +int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k, + unsigned dev_idx, + unsigned flags) { - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert); - - if (!a->stripe) + if (k.k->type != KEY_TYPE_stripe) return 0; - if (a->stripe_sectors) { - bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data"); - return -BCH_ERR_invalidate_stripe_to_dev; - } - - struct btree_iter iter; struct bkey_i_stripe *s = - bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe), - BTREE_ITER_slots, stripe); + bch2_bkey_make_mut_typed(trans, iter, &k, 0, stripe); int ret = PTR_ERR_OR_ZERO(s); if (ret) return ret; @@ -2126,36 +2133,79 @@ static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_ acc.replicas.data_type = BCH_DATA_user; ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); if (ret) - goto err; + return ret; struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i)); - bkey_for_each_ptr(ptrs, ptr) - if (ptr->dev == k_a.k->p.inode) + + /* XXX: how much redundancy do we still have? check degraded flags */ + + unsigned nr_good = 0; + + rcu_read_lock(); + bkey_for_each_ptr(ptrs, ptr) { + if (ptr->dev == dev_idx) ptr->dev = BCH_SB_MEMBER_INVALID; + struct bch_dev *ca = bch2_dev_rcu(trans->c, ptr->dev); + nr_good += ca && ca->mi.state != BCH_MEMBER_STATE_failed; + } + rcu_read_unlock(); + + if (nr_good < s->v.nr_blocks && !(flags & BCH_FORCE_IF_DATA_DEGRADED)) + return -BCH_ERR_remove_would_lose_data; + + unsigned nr_data = s->v.nr_blocks - s->v.nr_redundant; + + if (nr_good < nr_data && !(flags & BCH_FORCE_IF_DATA_LOST)) + return -BCH_ERR_remove_would_lose_data; + sectors = -sectors; memset(&acc, 0, sizeof(acc)); acc.type = BCH_DISK_ACCOUNTING_replicas; bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); acc.replicas.data_type = BCH_DATA_user; - ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); + return bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); +} + +static int bch2_invalidate_stripe_to_dev_from_alloc(struct btree_trans *trans, struct bkey_s_c k_a, + unsigned flags) +{ + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert); + + if (!a->stripe) + return 0; + + if (a->stripe_sectors) { + bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data"); + return -BCH_ERR_invalidate_stripe_to_dev; + } + + struct btree_iter iter; + struct bkey_s_c_stripe s = + bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe), + BTREE_ITER_slots, stripe); + int ret = bkey_err(s); if (ret) - goto err; -err: + return ret; + + ret = bch2_invalidate_stripe_to_dev(trans, &iter, s.s_c, k_a.k->p.inode, flags); bch2_trans_iter_exit(trans, &iter); return ret; } -int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx) +int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx, unsigned flags) { - return bch2_trans_run(c, + int ret = bch2_trans_run(c, for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX), BTREE_ITER_intent, k, NULL, NULL, 0, ({ - bch2_invalidate_stripe_to_dev(trans, k); + bch2_invalidate_stripe_to_dev_from_alloc(trans, k, flags); }))); + bch_err_fn(c, ret); + return ret; } /* startup/shutdown */ diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h index 51893e1ee874..548048adf0d5 100644 --- a/fs/bcachefs/ec.h +++ b/fs/bcachefs/ec.h @@ -255,9 +255,10 @@ void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *, int); int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *); void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *); + +struct alloc_request; struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *, - unsigned, unsigned, unsigned, - enum bch_watermark, struct closure *); + struct alloc_request *, unsigned, struct closure *); void bch2_do_stripe_deletes(struct bch_fs *); void bch2_ec_do_stripe_creates(struct bch_fs *); @@ -287,7 +288,9 @@ static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s, } } -int bch2_dev_remove_stripes(struct bch_fs *, unsigned); +int bch2_invalidate_stripe_to_dev(struct btree_trans *, struct btree_iter *, + struct bkey_s_c, unsigned, unsigned); +int bch2_dev_remove_stripes(struct bch_fs *, unsigned, unsigned); void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *); void bch2_fs_ec_stop(struct bch_fs *); diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h index 06144bfd9c19..809446c78951 100644 --- a/fs/bcachefs/ec_types.h +++ b/fs/bcachefs/ec_types.h @@ -4,9 +4,10 @@ #include "bcachefs_format.h" -struct bch_replicas_padded { +union bch_replicas_padded { + u8 bytes[struct_size_t(struct bch_replicas_entry_v1, + devs, BCH_BKEY_PTRS_MAX)]; struct bch_replicas_entry_v1 e; - u8 pad[BCH_BKEY_PTRS_MAX]; }; struct stripe { @@ -28,7 +29,7 @@ struct gc_stripe { u16 block_sectors[BCH_BKEY_PTRS_MAX]; struct bch_extent_ptr ptrs[BCH_BKEY_PTRS_MAX]; - struct bch_replicas_padded r; + union bch_replicas_padded r; }; #endif /* _BCACHEFS_EC_TYPES_H */ diff --git a/fs/bcachefs/enumerated_ref.c b/fs/bcachefs/enumerated_ref.c new file mode 100644 index 000000000000..56ab430f209f --- /dev/null +++ b/fs/bcachefs/enumerated_ref.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "bcachefs.h" +#include "enumerated_ref.h" +#include "util.h" + +#include <linux/completion.h> + +#ifdef ENUMERATED_REF_DEBUG +void enumerated_ref_get(struct enumerated_ref *ref, unsigned idx) +{ + BUG_ON(idx >= ref->nr); + atomic_long_inc(&ref->refs[idx]); +} + +bool __enumerated_ref_tryget(struct enumerated_ref *ref, unsigned idx) +{ + BUG_ON(idx >= ref->nr); + return atomic_long_inc_not_zero(&ref->refs[idx]); +} + +bool enumerated_ref_tryget(struct enumerated_ref *ref, unsigned idx) +{ + BUG_ON(idx >= ref->nr); + return !ref->dying && + atomic_long_inc_not_zero(&ref->refs[idx]); +} + +void enumerated_ref_put(struct enumerated_ref *ref, unsigned idx) +{ + BUG_ON(idx >= ref->nr); + long v = atomic_long_dec_return(&ref->refs[idx]); + + BUG_ON(v < 0); + if (v) + return; + + for (unsigned i = 0; i < ref->nr; i++) + if (atomic_long_read(&ref->refs[i])) + return; + + if (ref->stop_fn) + ref->stop_fn(ref); + complete(&ref->stop_complete); +} +#endif + +#ifndef ENUMERATED_REF_DEBUG +static void enumerated_ref_kill_cb(struct percpu_ref *percpu_ref) +{ + struct enumerated_ref *ref = + container_of(percpu_ref, struct enumerated_ref, ref); + + if (ref->stop_fn) + ref->stop_fn(ref); + complete(&ref->stop_complete); +} +#endif + +void enumerated_ref_stop_async(struct enumerated_ref *ref) +{ + reinit_completion(&ref->stop_complete); + +#ifndef ENUMERATED_REF_DEBUG + percpu_ref_kill(&ref->ref); +#else + ref->dying = true; + for (unsigned i = 0; i < ref->nr; i++) + enumerated_ref_put(ref, i); +#endif +} + +void enumerated_ref_stop(struct enumerated_ref *ref, + const char * const names[]) +{ + enumerated_ref_stop_async(ref); + while (!wait_for_completion_timeout(&ref->stop_complete, HZ * 10)) { + struct printbuf buf = PRINTBUF; + + prt_str(&buf, "Waited for 10 seconds to shutdown enumerated ref\n"); + prt_str(&buf, "Outstanding refs:\n"); + enumerated_ref_to_text(&buf, ref, names); + printk(KERN_ERR "%s", buf.buf); + printbuf_exit(&buf); + } +} + +void enumerated_ref_start(struct enumerated_ref *ref) +{ +#ifndef ENUMERATED_REF_DEBUG + percpu_ref_reinit(&ref->ref); +#else + ref->dying = false; + for (unsigned i = 0; i < ref->nr; i++) { + BUG_ON(atomic_long_read(&ref->refs[i])); + atomic_long_inc(&ref->refs[i]); + } +#endif +} + +void enumerated_ref_exit(struct enumerated_ref *ref) +{ +#ifndef ENUMERATED_REF_DEBUG + percpu_ref_exit(&ref->ref); +#else + kfree(ref->refs); + ref->refs = NULL; + ref->nr = 0; +#endif +} + +int enumerated_ref_init(struct enumerated_ref *ref, unsigned nr, + void (*stop_fn)(struct enumerated_ref *)) +{ + init_completion(&ref->stop_complete); + ref->stop_fn = stop_fn; + +#ifndef ENUMERATED_REF_DEBUG + return percpu_ref_init(&ref->ref, enumerated_ref_kill_cb, + PERCPU_REF_INIT_DEAD, GFP_KERNEL); +#else + ref->refs = kzalloc(sizeof(ref->refs[0]) * nr, GFP_KERNEL); + if (!ref->refs) + return -ENOMEM; + + ref->nr = nr; + return 0; +#endif +} + +void enumerated_ref_to_text(struct printbuf *out, + struct enumerated_ref *ref, + const char * const names[]) +{ +#ifdef ENUMERATED_REF_DEBUG + bch2_printbuf_tabstop_push(out, 32); + + for (unsigned i = 0; i < ref->nr; i++) + prt_printf(out, "%s\t%li\n", names[i], + atomic_long_read(&ref->refs[i])); +#else + prt_str(out, "(not in debug mode)\n"); +#endif +} diff --git a/fs/bcachefs/enumerated_ref.h b/fs/bcachefs/enumerated_ref.h new file mode 100644 index 000000000000..ec01cf59ef80 --- /dev/null +++ b/fs/bcachefs/enumerated_ref.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_ENUMERATED_REF_H +#define _BCACHEFS_ENUMERATED_REF_H + +#include "enumerated_ref_types.h" + +/* + * A refcount where the users are enumerated: in debug mode, we create sepate + * refcounts for each user, to make leaks and refcount errors easy to track + * down: + */ + +#ifdef ENUMERATED_REF_DEBUG +void enumerated_ref_get(struct enumerated_ref *, unsigned); +bool __enumerated_ref_tryget(struct enumerated_ref *, unsigned); +bool enumerated_ref_tryget(struct enumerated_ref *, unsigned); +void enumerated_ref_put(struct enumerated_ref *, unsigned); +#else + +static inline void enumerated_ref_get(struct enumerated_ref *ref, unsigned idx) +{ + percpu_ref_get(&ref->ref); +} + +static inline bool __enumerated_ref_tryget(struct enumerated_ref *ref, unsigned idx) +{ + return percpu_ref_tryget(&ref->ref); +} + +static inline bool enumerated_ref_tryget(struct enumerated_ref *ref, unsigned idx) +{ + return percpu_ref_tryget_live(&ref->ref); +} + +static inline void enumerated_ref_put(struct enumerated_ref *ref, unsigned idx) +{ + percpu_ref_put(&ref->ref); +} +#endif + +static inline bool enumerated_ref_is_zero(struct enumerated_ref *ref) +{ +#ifndef ENUMERATED_REF_DEBUG + return percpu_ref_is_zero(&ref->ref); +#else + for (unsigned i = 0; i < ref->nr; i++) + if (atomic_long_read(&ref->refs[i])) + return false; + return true; +#endif +} + +void enumerated_ref_stop_async(struct enumerated_ref *); +void enumerated_ref_stop(struct enumerated_ref *, const char * const[]); +void enumerated_ref_start(struct enumerated_ref *); + +void enumerated_ref_exit(struct enumerated_ref *); +int enumerated_ref_init(struct enumerated_ref *, unsigned, + void (*stop_fn)(struct enumerated_ref *)); + +struct printbuf; +void enumerated_ref_to_text(struct printbuf *, + struct enumerated_ref *, + const char * const[]); + +#endif /* _BCACHEFS_ENUMERATED_REF_H */ diff --git a/fs/bcachefs/enumerated_ref_types.h b/fs/bcachefs/enumerated_ref_types.h new file mode 100644 index 000000000000..0e6076f466d3 --- /dev/null +++ b/fs/bcachefs/enumerated_ref_types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_ENUMERATED_REF_TYPES_H +#define _BCACHEFS_ENUMERATED_REF_TYPES_H + +#include <linux/percpu-refcount.h> + +struct enumerated_ref { +#ifdef ENUMERATED_REF_DEBUG + unsigned nr; + bool dying; + atomic_long_t *refs; +#else + struct percpu_ref ref; +#endif + void (*stop_fn)(struct enumerated_ref *); + struct completion stop_complete; +}; + +#endif /* _BCACHEFS_ENUMERATED_REF_TYPES_H */ diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index d9ebffa5b3a2..62843e772b2c 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -53,6 +53,7 @@ x(ENOMEM, ENOMEM_dio_write_bioset_init) \ x(ENOMEM, ENOMEM_nocow_flush_bioset_init) \ x(ENOMEM, ENOMEM_promote_table_init) \ + x(ENOMEM, ENOMEM_async_obj_init) \ x(ENOMEM, ENOMEM_compression_bounce_read_init) \ x(ENOMEM, ENOMEM_compression_bounce_write_init) \ x(ENOMEM, ENOMEM_compression_workspace_init) \ @@ -174,6 +175,7 @@ x(0, backpointer_to_overwritten_btree_node) \ x(0, journal_reclaim_would_deadlock) \ x(EINVAL, fsck) \ + x(BCH_ERR_fsck, fsck_ask) \ x(BCH_ERR_fsck, fsck_fix) \ x(BCH_ERR_fsck, fsck_delete_bkey) \ x(BCH_ERR_fsck, fsck_ignore) \ @@ -181,7 +183,6 @@ x(BCH_ERR_fsck, fsck_repair_unimplemented) \ x(BCH_ERR_fsck, fsck_repair_impossible) \ x(EINVAL, restart_recovery) \ - x(EINVAL, not_in_recovery) \ x(EINVAL, cannot_rewind_recovery) \ x(0, data_update_done) \ x(BCH_ERR_data_update_done, data_update_done_would_block) \ @@ -201,6 +202,7 @@ x(EINVAL, device_has_been_removed) \ x(EINVAL, device_splitbrain) \ x(EINVAL, device_already_online) \ + x(EINVAL, filesystem_uuid_already_open) \ x(EINVAL, insufficient_devices_to_start) \ x(EINVAL, invalid) \ x(EINVAL, internal_fsck_err) \ @@ -211,6 +213,7 @@ x(EINVAL, inode_unpack_error) \ x(EINVAL, varint_decode_error) \ x(EINVAL, erasure_coding_found_btree_node) \ + x(EINVAL, option_negative) \ x(EOPNOTSUPP, may_not_use_incompat_feature) \ x(EROFS, erofs_trans_commit) \ x(EROFS, erofs_no_writes) \ @@ -219,6 +222,8 @@ x(EROFS, erofs_unfixed_errors) \ x(EROFS, erofs_norecovery) \ x(EROFS, erofs_nochanges) \ + x(EROFS, erofs_no_alloc_info) \ + x(EROFS, erofs_filesystem_full) \ x(EROFS, insufficient_devices) \ x(0, operation_blocked) \ x(BCH_ERR_operation_blocked, btree_cache_cannibalize_lock_blocked) \ diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index 6b8695b1349c..c2cad28635bf 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -11,12 +11,12 @@ #define FSCK_ERR_RATELIMIT_NR 10 -void bch2_log_msg_start(struct bch_fs *c, struct printbuf *out) +void __bch2_log_msg_start(const char *fs_or_dev_name, struct printbuf *out) { printbuf_indent_add_nextline(out, 2); #ifdef BCACHEFS_LOG_PREFIX - prt_printf(out, bch2_log_msg(c, "")); + prt_printf(out, "bcachefs (%s): ", fs_or_dev_name); #endif } @@ -29,12 +29,10 @@ bool __bch2_inconsistent_error(struct bch_fs *c, struct printbuf *out) return false; case BCH_ON_ERROR_fix_safe: case BCH_ON_ERROR_ro: - if (bch2_fs_emergency_read_only(c)) - prt_printf(out, "inconsistency detected - emergency read only at journal seq %llu\n", - journal_cur_seq(&c->journal)); + bch2_fs_emergency_read_only2(c, out); return true; case BCH_ON_ERROR_panic: - bch2_print_string_as_lines_nonblocking(KERN_ERR, out->buf); + bch2_print_str(c, KERN_ERR, out->buf); panic(bch2_fmt(c, "panic after error")); return true; default: @@ -71,7 +69,7 @@ static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *tra if (trans) bch2_trans_updates_to_text(&buf, trans); bool ret = __bch2_inconsistent_error(c, &buf); - bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf); + bch2_print_str_nonblocking(c, KERN_ERR, buf.buf); printbuf_exit(&buf); return ret; @@ -100,11 +98,11 @@ int __bch2_topology_error(struct bch_fs *c, struct printbuf *out) prt_printf(out, "btree topology error: "); set_bit(BCH_FS_topology_error, &c->flags); - if (!test_bit(BCH_FS_recovery_running, &c->flags)) { + if (!test_bit(BCH_FS_in_recovery, &c->flags)) { __bch2_inconsistent_error(c, out); return -BCH_ERR_btree_need_topology_repair; } else { - return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?: + return bch2_run_explicit_recovery_pass(c, out, BCH_RECOVERY_PASS_check_topology, 0) ?: -BCH_ERR_btree_node_read_validate_error; } } @@ -121,7 +119,7 @@ int bch2_fs_topology_error(struct bch_fs *c, const char *fmt, ...) va_end(args); int ret = __bch2_topology_error(c, &buf); - bch2_print_string_as_lines(KERN_ERR, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); return ret; @@ -151,14 +149,17 @@ void bch2_io_error_work(struct work_struct *work) bool dev = !__bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro, BCH_FORCE_IF_DEGRADED); + struct printbuf buf = PRINTBUF; + __bch2_log_msg_start(ca->name, &buf); - bch_err(ca, - "writes erroring for %u seconds, setting %s ro", + prt_printf(&buf, "writes erroring for %u seconds, setting %s ro", c->opts.write_error_timeout, dev ? "device" : "filesystem"); if (!dev) - bch2_fs_emergency_read_only(c); + bch2_fs_emergency_read_only2(c, &buf); + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); } out: up_write(&c->state_lock); @@ -328,7 +329,7 @@ static int do_fsck_ask_yn(struct bch_fs *c, if (bch2_fs_stdio_redirect(c)) bch2_print(c, "%s", question->buf); else - bch2_print_string_as_lines(KERN_ERR, question->buf); + bch2_print_str(c, KERN_ERR, question->buf); int ask = bch2_fsck_ask_yn(c, trans); @@ -376,15 +377,63 @@ static struct fsck_err_state *count_fsck_err_locked(struct bch_fs *c, return s; } -void __bch2_count_fsck_err(struct bch_fs *c, - enum bch_sb_error_id id, const char *msg, - bool *repeat, bool *print, bool *suppress) +bool __bch2_count_fsck_err(struct bch_fs *c, + enum bch_sb_error_id id, struct printbuf *msg) { bch2_sb_error_count(c, id); mutex_lock(&c->fsck_error_msgs_lock); - count_fsck_err_locked(c, id, msg, repeat, print, suppress); + bool print = true, repeat = false, suppress = false; + + count_fsck_err_locked(c, id, msg->buf, &repeat, &print, &suppress); mutex_unlock(&c->fsck_error_msgs_lock); + + if (suppress) + prt_printf(msg, "Ratelimiting new instances of previous error\n"); + + return print && !repeat; +} + +int bch2_fsck_err_opt(struct bch_fs *c, + enum bch_fsck_flags flags, + enum bch_sb_error_id err) +{ + if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra))) + flags |= fsck_flags_extra[err]; + + if (test_bit(BCH_FS_in_fsck, &c->flags)) { + if (!(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) + return -BCH_ERR_fsck_repair_unimplemented; + + switch (c->opts.fix_errors) { + case FSCK_FIX_exit: + return -BCH_ERR_fsck_errors_not_fixed; + case FSCK_FIX_yes: + if (flags & FSCK_CAN_FIX) + return -BCH_ERR_fsck_fix; + fallthrough; + case FSCK_FIX_no: + if (flags & FSCK_CAN_IGNORE) + return -BCH_ERR_fsck_ignore; + return -BCH_ERR_fsck_errors_not_fixed; + case FSCK_FIX_ask: + if (flags & FSCK_AUTOFIX) + return -BCH_ERR_fsck_fix; + return -BCH_ERR_fsck_ask; + default: + BUG(); + } + } else { + if ((flags & FSCK_AUTOFIX) && + (c->opts.errors == BCH_ON_ERROR_continue || + c->opts.errors == BCH_ON_ERROR_fix_safe)) + return -BCH_ERR_fsck_fix; + + if (c->opts.errors == BCH_ON_ERROR_continue && + (flags & FSCK_CAN_IGNORE)) + return -BCH_ERR_fsck_ignore; + return -BCH_ERR_fsck_errors_not_fixed; + } } int __bch2_fsck_err(struct bch_fs *c, @@ -475,7 +524,7 @@ int __bch2_fsck_err(struct bch_fs *c, } goto print; - } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) { + } else if (!test_bit(BCH_FS_in_fsck, &c->flags)) { if (c->opts.errors != BCH_ON_ERROR_continue || !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { prt_str_indented(out, ", shutting down\n" @@ -534,7 +583,7 @@ int __bch2_fsck_err(struct bch_fs *c, !(flags & FSCK_CAN_IGNORE))) ret = -BCH_ERR_fsck_errors_not_fixed; - if (test_bit(BCH_FS_fsck_running, &c->flags) && + if (test_bit(BCH_FS_in_fsck, &c->flags) && (ret != -BCH_ERR_fsck_fix && ret != -BCH_ERR_fsck_ignore)) { exiting = true; @@ -559,7 +608,7 @@ print: if (bch2_fs_stdio_redirect(c)) bch2_print(c, "%s", out->buf); else - bch2_print_string_as_lines(KERN_ERR, out->buf); + bch2_print_str(c, KERN_ERR, out->buf); } if (s) @@ -693,25 +742,9 @@ void bch2_inum_offset_err_msg(struct bch_fs *c, struct printbuf *out, int bch2_inum_snap_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out, struct bpos pos) { - struct bch_fs *c = trans->c; - int ret = 0; - - if (!bch2_snapshot_is_leaf(c, pos.snapshot)) - prt_str(out, "(multiple snapshots) "); - - subvol_inum inum = { - .subvol = bch2_snapshot_tree_oldest_subvol(c, pos.snapshot), - .inum = pos.inode, - }; - - if (inum.subvol) { - ret = bch2_inum_to_path(trans, inum, out); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - return ret; - } - - if (!inum.subvol || ret) - prt_printf(out, "inum %llu:%u", pos.inode, pos.snapshot); + int ret = bch2_inum_snapshot_to_path(trans, pos.inode, pos.snapshot, NULL, out); + if (ret) + return ret; prt_printf(out, " offset %llu: ", pos.offset << 8); return 0; diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h index 4a364fd44abe..5123d4c86770 100644 --- a/fs/bcachefs/error.h +++ b/fs/bcachefs/error.h @@ -18,7 +18,12 @@ struct work_struct; /* Error messages: */ -void bch2_log_msg_start(struct bch_fs *, struct printbuf *); +void __bch2_log_msg_start(const char *, struct printbuf *); + +static inline void bch2_log_msg_start(struct bch_fs *c, struct printbuf *out) +{ + __bch2_log_msg_start(c->name, out); +} /* * Inconsistency errors: The on disk data is inconsistent. If these occur during @@ -76,12 +81,14 @@ struct fsck_err_state { #define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err) -void __bch2_count_fsck_err(struct bch_fs *, - enum bch_sb_error_id, const char *, - bool *, bool *, bool *); +bool __bch2_count_fsck_err(struct bch_fs *, enum bch_sb_error_id, struct printbuf *); #define bch2_count_fsck_err(_c, _err, ...) \ __bch2_count_fsck_err(_c, BCH_FSCK_ERR_##_err, __VA_ARGS__) +int bch2_fsck_err_opt(struct bch_fs *, + enum bch_fsck_flags, + enum bch_sb_error_id); + __printf(5, 6) __cold int __bch2_fsck_err(struct bch_fs *, struct btree_trans *, enum bch_fsck_flags, diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c index 6bb42985306e..b899ee75f5b9 100644 --- a/fs/bcachefs/extent_update.c +++ b/fs/bcachefs/extent_update.c @@ -37,16 +37,17 @@ static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k) return lru + ret * 2; } +#define EXTENT_ITERS_MAX 64 + static int count_iters_for_insert(struct btree_trans *trans, struct bkey_s_c k, unsigned offset, struct bpos *end, - unsigned *nr_iters, - unsigned max_iters) + unsigned *nr_iters) { int ret = 0, ret2 = 0; - if (*nr_iters >= max_iters) { + if (*nr_iters >= EXTENT_ITERS_MAX) { *end = bpos_min(*end, k.k->p); ret = 1; } @@ -56,7 +57,7 @@ static int count_iters_for_insert(struct btree_trans *trans, case KEY_TYPE_reflink_v: *nr_iters += bch2_bkey_nr_alloc_ptrs(k); - if (*nr_iters >= max_iters) { + if (*nr_iters >= EXTENT_ITERS_MAX) { *end = bpos_min(*end, k.k->p); ret = 1; } @@ -81,7 +82,7 @@ static int count_iters_for_insert(struct btree_trans *trans, *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k); - if (*nr_iters >= max_iters) { + if (*nr_iters >= EXTENT_ITERS_MAX) { struct bpos pos = bkey_start_pos(k.k); pos.offset += min_t(u64, k.k->size, r_k.k->p.offset - idx); @@ -100,59 +101,31 @@ static int count_iters_for_insert(struct btree_trans *trans, return ret2 ?: ret; } -#define EXTENT_ITERS_MAX (BTREE_ITER_INITIAL / 3) - int bch2_extent_atomic_end(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_i *insert, struct bpos *end) { - struct btree_iter copy; - struct bkey_s_c k; unsigned nr_iters = 0; - int ret; - - ret = bch2_btree_iter_traverse(trans, iter); - if (ret) - return ret; - - *end = insert->k.p; - - /* extent_update_to_keys(): */ - nr_iters += 1; - - ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end, - &nr_iters, EXTENT_ITERS_MAX / 2); - if (ret < 0) - return ret; + struct btree_iter copy; bch2_trans_copy_iter(trans, ©, iter); - for_each_btree_key_max_continue_norestart(trans, copy, insert->k.p, 0, k, ret) { - unsigned offset = 0; + int ret = bch2_btree_iter_traverse(trans, ©); + if (ret) + goto err; - if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) - offset = bkey_start_offset(&insert->k) - - bkey_start_offset(k.k); + struct bkey_s_c k; + for_each_btree_key_max_continue_norestart(trans, copy, *end, 0, k, ret) { + unsigned offset = 0; - /* extent_handle_overwrites(): */ - switch (bch2_extent_overlap(&insert->k, k.k)) { - case BCH_EXTENT_OVERLAP_ALL: - case BCH_EXTENT_OVERLAP_FRONT: - nr_iters += 1; - break; - case BCH_EXTENT_OVERLAP_BACK: - case BCH_EXTENT_OVERLAP_MIDDLE: - nr_iters += 2; - break; - } + if (bkey_gt(iter->pos, bkey_start_pos(k.k))) + offset = iter->pos.offset - bkey_start_offset(k.k); - ret = count_iters_for_insert(trans, k, offset, end, - &nr_iters, EXTENT_ITERS_MAX); + ret = count_iters_for_insert(trans, k, offset, end, &nr_iters); if (ret) break; } - +err: bch2_trans_iter_exit(trans, ©); return ret < 0 ? ret : 0; } @@ -161,10 +134,8 @@ int bch2_extent_trim_atomic(struct btree_trans *trans, struct btree_iter *iter, struct bkey_i *k) { - struct bpos end; - int ret; - - ret = bch2_extent_atomic_end(trans, iter, k, &end); + struct bpos end = k->k.p; + int ret = bch2_extent_atomic_end(trans, iter, &end); if (ret) return ret; diff --git a/fs/bcachefs/extent_update.h b/fs/bcachefs/extent_update.h index 6f5cf449361a..34467db53f45 100644 --- a/fs/bcachefs/extent_update.h +++ b/fs/bcachefs/extent_update.h @@ -5,7 +5,7 @@ #include "bcachefs.h" int bch2_extent_atomic_end(struct btree_trans *, struct btree_iter *, - struct bkey_i *, struct bpos *); + struct bpos *); int bch2_extent_trim_atomic(struct btree_trans *, struct btree_iter *, struct bkey_i *); diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index e597fb9c9823..1ac9897f189d 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -45,6 +45,49 @@ static void bch2_extent_crc_pack(union bch_extent_crc *, struct bch_extent_crc_unpacked, enum bch_extent_entry_type); +void bch2_io_failures_to_text(struct printbuf *out, + struct bch_fs *c, + struct bch_io_failures *failed) +{ + static const char * const error_types[] = { + "io", "checksum", "ec reconstruct", NULL + }; + + for (struct bch_dev_io_failures *f = failed->devs; + f < failed->devs + failed->nr; + f++) { + unsigned errflags = + ((!!f->failed_io) << 0) | + ((!!f->failed_csum_nr) << 1) | + ((!!f->failed_ec) << 2); + + if (!errflags) + continue; + + bch2_printbuf_make_room(out, 1024); + rcu_read_lock(); + out->atomic++; + struct bch_dev *ca = bch2_dev_rcu_noerror(c, f->dev); + if (ca) + prt_str(out, ca->name); + else + prt_printf(out, "(invalid device %u)", f->dev); + --out->atomic; + rcu_read_unlock(); + + prt_char(out, ' '); + + if (is_power_of_2(errflags)) { + prt_bitflags(out, error_types, errflags); + prt_str(out, " error"); + } else { + prt_str(out, "errors: "); + prt_bitflags(out, error_types, errflags); + } + prt_newline(out); + } +} + struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f, unsigned dev) { @@ -79,6 +122,22 @@ void bch2_mark_io_failure(struct bch_io_failures *failed, f->failed_csum_nr++; } +void bch2_mark_btree_validate_failure(struct bch_io_failures *failed, + unsigned dev) +{ + struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, dev); + + if (!f) { + BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs)); + + f = &failed->devs[failed->nr++]; + memset(f, 0, sizeof(*f)); + f->dev = dev; + } + + f->failed_btree_validate = true; +} + static inline u64 dev_latency(struct bch_dev *ca) { return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX; @@ -105,7 +164,7 @@ static inline bool ptr_better(struct bch_fs *c, if (unlikely(failed_delta)) return failed_delta < 0; - if (unlikely(bch2_force_reconstruct_read)) + if (static_branch_unlikely(&bch2_force_reconstruct_read)) return p1.do_ec_reconstruct > p2.do_ec_reconstruct; if (unlikely(p1.do_ec_reconstruct || p2.do_ec_reconstruct)) @@ -136,12 +195,8 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, if (k.k->type == KEY_TYPE_error) return -BCH_ERR_key_type_error; - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - - if (bch2_bkey_extent_ptrs_flags(ptrs) & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) - return -BCH_ERR_extent_poisoned; - rcu_read_lock(); + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; struct extent_ptr_decoded p; u64 pick_latency; @@ -162,7 +217,15 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, if (dev >= 0 && p.ptr.dev != dev) continue; - struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev); + + if (unlikely(!ca && p.ptr.dev != BCH_SB_MEMBER_INVALID)) { + rcu_read_unlock(); + int ret = bch2_dev_missing_bkey(c, k, p.ptr.dev); + if (ret) + return ret; + rcu_read_lock(); + } if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr))) continue; @@ -175,6 +238,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, if (ca && ca->mi.state != BCH_MEMBER_STATE_failed) { have_io_errors |= f->failed_io; + have_io_errors |= f->failed_btree_validate; have_io_errors |= f->failed_ec; } have_csum_errors |= !!f->failed_csum_nr; @@ -182,6 +246,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, if (p.has_ec && (f->failed_io || f->failed_csum_nr)) p.do_ec_reconstruct = true; else if (f->failed_io || + f->failed_btree_validate || f->failed_csum_nr > c->opts.checksum_err_retry_nr) continue; } @@ -194,7 +259,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, p.do_ec_reconstruct = true; } - if (bch2_force_reconstruct_read && p.has_ec) + if (static_branch_unlikely(&bch2_force_reconstruct_read) && p.has_ec) p.do_ec_reconstruct = true; u64 p_latency = dev_latency(ca); @@ -1071,33 +1136,50 @@ void bch2_extent_ptr_set_cached(struct bch_fs *c, struct bkey_s k, struct bch_extent_ptr *ptr) { - struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); + struct bkey_ptrs ptrs; union bch_extent_entry *entry; struct extent_ptr_decoded p; + bool have_cached_ptr; + unsigned drop_dev = ptr->dev; rcu_read_lock(); - if (!want_cached_ptr(c, opts, ptr)) { - bch2_bkey_drop_ptr_noerror(k, ptr); - goto out; - } +restart_drop_ptrs: + ptrs = bch2_bkey_ptrs(k); + have_cached_ptr = false; - /* - * Stripes can't contain cached data, for - reasons. - * - * Possibly something we can fix in the future? - */ - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) - if (&entry->ptr == ptr) { - if (p.has_ec) - bch2_bkey_drop_ptr_noerror(k, ptr); - else - ptr->cached = true; - goto out; + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + /* + * Check if it's erasure coded - stripes can't contain cached + * data. Possibly something we can fix in the future? + */ + if (&entry->ptr == ptr && p.has_ec) + goto drop; + + if (p.ptr.cached) { + if (have_cached_ptr || !want_cached_ptr(c, opts, &p.ptr)) { + bch2_bkey_drop_ptr_noerror(k, &entry->ptr); + ptr = NULL; + goto restart_drop_ptrs; + } + + have_cached_ptr = true; } + } + + if (!ptr) + bkey_for_each_ptr(ptrs, ptr2) + if (ptr2->dev == drop_dev) + ptr = ptr2; - BUG(); -out: + if (have_cached_ptr || !want_cached_ptr(c, opts, ptr)) + goto drop; + + ptr->cached = true; + rcu_read_unlock(); + return; +drop: rcu_read_unlock(); + bch2_bkey_drop_ptr_noerror(k, ptr); } /* diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index e78a39e7e18f..b8590e51b76e 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -380,13 +380,6 @@ out: \ /* Iterate over pointers in KEY_TYPE_extent: */ -#define extent_for_each_entry_from(_e, _entry, _start) \ - __bkey_extent_entry_for_each_from(_start, \ - extent_entry_last(_e), _entry) - -#define extent_for_each_entry(_e, _entry) \ - extent_for_each_entry_from(_e, _entry, (_e).v->start) - #define extent_ptr_next(_e, _ptr) \ __bkey_ptr_next(_ptr, extent_entry_last(_e)) @@ -399,10 +392,13 @@ out: \ /* utility code common to all keys with pointers: */ +void bch2_io_failures_to_text(struct printbuf *, struct bch_fs *, + struct bch_io_failures *); struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *, unsigned); void bch2_mark_io_failure(struct bch_io_failures *, struct extent_ptr_decoded *, bool); +void bch2_mark_btree_validate_failure(struct bch_io_failures *, unsigned); int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c, struct bch_io_failures *, struct extent_ptr_decoded *, int); diff --git a/fs/bcachefs/extents_types.h b/fs/bcachefs/extents_types.h index e51529dca4c2..b23ce4a373c0 100644 --- a/fs/bcachefs/extents_types.h +++ b/fs/bcachefs/extents_types.h @@ -34,6 +34,7 @@ struct bch_io_failures { u8 dev; unsigned failed_csum_nr:6, failed_io:1, + failed_btree_validate:1, failed_ec:1; } devs[BCH_REPLICAS_MAX + 1]; }; diff --git a/fs/bcachefs/fast_list.c b/fs/bcachefs/fast_list.c new file mode 100644 index 000000000000..2faec143eb31 --- /dev/null +++ b/fs/bcachefs/fast_list.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Fast, unordered lists + * + * Supports add, remove, and iterate + * + * Underneath, they're a radix tree and an IDA, with a percpu buffer for slot + * allocation and freeing. + * + * This means that adding, removing, and iterating over items is lockless, + * except when refilling/emptying the percpu slot buffers. + */ + +#include "fast_list.h" + +struct fast_list_pcpu { + u32 nr; + u32 entries[31]; +}; + +static int fast_list_alloc_idx(struct fast_list *l, gfp_t gfp) +{ + int idx = ida_alloc_range(&l->slots_allocated, 1, INT_MAX, gfp); + if (unlikely(idx < 0)) + return 0; + + if (unlikely(!genradix_ptr_alloc_inlined(&l->items, idx, gfp))) { + ida_free(&l->slots_allocated, idx); + return 0; + } + + return idx; +} + +/** + * fast_list_get_idx - get a slot in a fast_list + * @l: list to get slot in + * + * This allocates a slot in the radix tree without storing to it, so that we can + * take the potential memory allocation failure early and do the list add later + * when we can't take an allocation failure. + * + * Returns: positive integer on success, -ENOMEM on failure + */ +int fast_list_get_idx(struct fast_list *l) +{ + unsigned long flags; + int idx; +retry: + local_irq_save(flags); + struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer); + + if (unlikely(!lp->nr)) { + u32 entries[16], nr = 0; + + local_irq_restore(flags); + while (nr < ARRAY_SIZE(entries) && + (idx = fast_list_alloc_idx(l, GFP_KERNEL))) + entries[nr++] = idx; + local_irq_save(flags); + + lp = this_cpu_ptr(l->buffer); + + while (nr && lp->nr < ARRAY_SIZE(lp->entries)) + lp->entries[lp->nr++] = entries[--nr]; + + if (unlikely(nr)) { + local_irq_restore(flags); + while (nr) + ida_free(&l->slots_allocated, entries[--nr]); + goto retry; + } + + if (unlikely(!lp->nr)) { + local_irq_restore(flags); + return -ENOMEM; + } + } + + idx = lp->entries[--lp->nr]; + local_irq_restore(flags); + + return idx; +} + +/** + * fast_list_add - add an item to a fast_list + * @l: list + * @item: item to add + * + * Allocates a slot in the radix tree and stores to it and then returns the + * slot index, which must be passed to fast_list_remove(). + * + * Returns: positive integer on success, -ENOMEM on failure + */ +int fast_list_add(struct fast_list *l, void *item) +{ + int idx = fast_list_get_idx(l); + if (idx < 0) + return idx; + + *genradix_ptr_inlined(&l->items, idx) = item; + return idx; +} + +/** + * fast_list_remove - remove an item from a fast_list + * @l: list + * @idx: item's slot index + * + * Zeroes out the slot in the radix tree and frees the slot for future + * fast_list_add() operations. + */ +void fast_list_remove(struct fast_list *l, unsigned idx) +{ + u32 entries[16], nr = 0; + unsigned long flags; + + if (!idx) + return; + + *genradix_ptr_inlined(&l->items, idx) = NULL; + + local_irq_save(flags); + struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer); + + if (unlikely(lp->nr == ARRAY_SIZE(lp->entries))) + while (nr < ARRAY_SIZE(entries)) + entries[nr++] = lp->entries[--lp->nr]; + + lp->entries[lp->nr++] = idx; + local_irq_restore(flags); + + if (unlikely(nr)) + while (nr) + ida_free(&l->slots_allocated, entries[--nr]); +} + +void fast_list_exit(struct fast_list *l) +{ + /* XXX: warn if list isn't empty */ + free_percpu(l->buffer); + ida_destroy(&l->slots_allocated); + genradix_free(&l->items); +} + +int fast_list_init(struct fast_list *l) +{ + genradix_init(&l->items); + ida_init(&l->slots_allocated); + l->buffer = alloc_percpu(*l->buffer); + if (!l->buffer) + return -ENOMEM; + return 0; +} diff --git a/fs/bcachefs/fast_list.h b/fs/bcachefs/fast_list.h new file mode 100644 index 000000000000..73c9bf591fd6 --- /dev/null +++ b/fs/bcachefs/fast_list.h @@ -0,0 +1,41 @@ +#ifndef _LINUX_FAST_LIST_H +#define _LINUX_FAST_LIST_H + +#include <linux/generic-radix-tree.h> +#include <linux/idr.h> +#include <linux/percpu.h> + +struct fast_list_pcpu; + +struct fast_list { + GENRADIX(void *) items; + struct ida slots_allocated;; + struct fast_list_pcpu __percpu + *buffer; +}; + +static inline void *fast_list_iter_peek(struct genradix_iter *iter, + struct fast_list *list) +{ + void **p; + while ((p = genradix_iter_peek(iter, &list->items)) && !*p) + genradix_iter_advance(iter, &list->items); + + return p ? *p : NULL; +} + +#define fast_list_for_each_from(_list, _iter, _i, _start) \ + for (_iter = genradix_iter_init(&(_list)->items, _start); \ + (_i = fast_list_iter_peek(&(_iter), _list)) != NULL; \ + genradix_iter_advance(&(_iter), &(_list)->items)) + +#define fast_list_for_each(_list, _iter, _i) \ + fast_list_for_each_from(_list, _iter, _i, 0) + +int fast_list_get_idx(struct fast_list *l); +int fast_list_add(struct fast_list *l, void *item); +void fast_list_remove(struct fast_list *l, unsigned idx); +void fast_list_exit(struct fast_list *l); +int fast_list_init(struct fast_list *l); + +#endif /* _LINUX_FAST_LIST_H */ diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c index 535bc5fcbcc0..1f5154d9676b 100644 --- a/fs/bcachefs/fs-io-direct.c +++ b/fs/bcachefs/fs-io-direct.c @@ -3,6 +3,7 @@ #include "bcachefs.h" #include "alloc_foreground.h" +#include "enumerated_ref.h" #include "fs.h" #include "fs-io.h" #include "fs-io-direct.h" @@ -401,7 +402,7 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio) ret = dio->op.error ?: ((long) dio->written << 9); bio_put(&dio->op.wbio.bio); - bch2_write_ref_put(c, BCH_WRITE_REF_dio_write); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_dio_write); /* inode->i_dio_count is our ref on inode and thus bch_fs */ inode_dio_end(&inode->v); @@ -606,7 +607,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) prefetch(&inode->ei_inode); prefetch((void *) &inode->ei_inode + 64); - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_dio_write)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_dio_write)) return -EROFS; inode_lock(&inode->v); @@ -675,7 +676,7 @@ err_put_bio: bio_put(bio); inode_dio_end(&inode->v); err_put_write_ref: - bch2_write_ref_put(c, BCH_WRITE_REF_dio_write); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_dio_write); goto out; } diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c index e072900e6a5b..fbae9c1de746 100644 --- a/fs/bcachefs/fs-io-pagecache.c +++ b/fs/bcachefs/fs-io-pagecache.c @@ -605,10 +605,14 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) struct address_space *mapping = file->f_mapping; struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch2_folio_reservation res; - unsigned len; - loff_t isize; vm_fault_t ret; + loff_t file_offset = round_down(vmf->pgoff << PAGE_SHIFT, block_bytes(c)); + unsigned offset = file_offset - folio_pos(folio); + unsigned len = max(PAGE_SIZE, block_bytes(c)); + + BUG_ON(offset + len > folio_size(folio)); + bch2_folio_reservation_init(c, inode, &res); sb_start_pagefault(inode->v.i_sb); @@ -623,24 +627,24 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) bch2_pagecache_add_get(inode); folio_lock(folio); - isize = i_size_read(&inode->v); + u64 isize = i_size_read(&inode->v); - if (folio->mapping != mapping || folio_pos(folio) >= isize) { + if (folio->mapping != mapping || file_offset >= isize) { folio_unlock(folio); ret = VM_FAULT_NOPAGE; goto out; } - len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio)); + len = min_t(unsigned, len, isize - file_offset); if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?: - bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) { + bch2_folio_reservation_get(c, inode, folio, &res, offset, len)) { folio_unlock(folio); ret = VM_FAULT_SIGBUS; goto out; } - bch2_set_folio_dirty(c, inode, folio, &res, 0, len); + bch2_set_folio_dirty(c, inode, folio, &res, offset, len); bch2_folio_reservation_put(c, inode, &res); folio_wait_stable(folio); diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 9657144666b8..b1e9ee28fc0f 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -7,6 +7,7 @@ #include "btree_update.h" #include "buckets.h" #include "clock.h" +#include "enumerated_ref.h" #include "error.h" #include "extents.h" #include "extent_update.h" @@ -48,7 +49,8 @@ static void nocow_flush_endio(struct bio *_bio) struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio); closure_put(bio->cl); - percpu_ref_put(&bio->ca->io_ref[WRITE]); + enumerated_ref_put(&bio->ca->io_ref[WRITE], + BCH_DEV_WRITE_REF_nocow_flush); bio_put(&bio->bio); } @@ -71,7 +73,8 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c, for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) { rcu_read_lock(); ca = rcu_dereference(c->devs[dev]); - if (ca && !percpu_ref_tryget(&ca->io_ref[WRITE])) + if (ca && !enumerated_ref_tryget(&ca->io_ref[WRITE], + BCH_DEV_WRITE_REF_nocow_flush)) ca = NULL; rcu_read_unlock(); @@ -151,10 +154,9 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, inode->v.i_ino, (u64) inode->v.i_blocks, sectors, inode->ei_inode.bi_sectors); - bool repeat = false, print = false, suppress = false; - bch2_count_fsck_err(c, vfs_inode_i_blocks_underflow, buf.buf, &repeat, &print, &suppress); + bool print = bch2_count_fsck_err(c, vfs_inode_i_blocks_underflow, &buf); if (print) - bch2_print_str(c, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); if (sectors < 0) @@ -220,7 +222,7 @@ static int bch2_flush_inode(struct bch_fs *c, if (c->opts.journal_flush_disabled) return 0; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_fsync)) return -EROFS; u64 seq; @@ -228,7 +230,7 @@ static int bch2_flush_inode(struct bch_fs *c, bch2_get_inode_journal_seq_trans(trans, inode_inum(inode), &seq)) ?: bch2_journal_flush_seq(&c->journal, seq, TASK_INTERRUPTIBLE) ?: bch2_inode_flush_nocow_writes(c, inode); - bch2_write_ref_put(c, BCH_WRITE_REF_fsync); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_fsync); return ret; } @@ -526,11 +528,9 @@ int bchfs_truncate(struct mnt_idmap *idmap, inode->v.i_ino, (u64) inode->v.i_blocks, inode->ei_inode.bi_sectors); - bool repeat = false, print = false, suppress = false; - bch2_count_fsck_err(c, vfs_inode_i_blocks_not_zero_at_truncate, buf.buf, - &repeat, &print, &suppress); + bool print = bch2_count_fsck_err(c, vfs_inode_i_blocks_not_zero_at_truncate, &buf); if (print) - bch2_print_str(c, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); } @@ -821,7 +821,7 @@ long bch2_fallocate_dispatch(struct file *file, int mode, struct bch_fs *c = inode->v.i_sb->s_fs_info; long ret; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_fallocate)) return -EROFS; inode_lock(&inode->v); @@ -845,7 +845,7 @@ long bch2_fallocate_dispatch(struct file *file, int mode, err: bch2_pagecache_block_put(inode); inode_unlock(&inode->v); - bch2_write_ref_put(c, BCH_WRITE_REF_fallocate); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_fallocate); return bch2_err_class(ret); } diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c index a82dfce9e4ad..05361a793206 100644 --- a/fs/bcachefs/fs-ioctl.c +++ b/fs/bcachefs/fs-ioctl.c @@ -172,7 +172,10 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) if (get_user(flags, arg)) return -EFAULT; - bch_notice(c, "shutdown by ioctl type %u", flags); + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "shutdown by ioctl type %u", flags); switch (flags) { case FSOP_GOING_FLAGS_DEFAULT: @@ -180,20 +183,23 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) if (ret) break; bch2_journal_flush(&c->journal); - bch2_fs_emergency_read_only(c); + bch2_fs_emergency_read_only2(c, &buf); bdev_thaw(c->vfs_sb->s_bdev); break; case FSOP_GOING_FLAGS_LOGFLUSH: bch2_journal_flush(&c->journal); fallthrough; case FSOP_GOING_FLAGS_NOLOGFLUSH: - bch2_fs_emergency_read_only(c); + bch2_fs_emergency_read_only2(c, &buf); break; default: ret = -EINVAL; - break; + goto noprint; } + bch2_print_str(c, KERN_ERR, buf.buf); +noprint: + printbuf_exit(&buf); return ret; } diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 4b742e62255b..ddfe89d84966 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -191,11 +191,6 @@ int bch2_fs_quota_transfer(struct bch_fs *c, return ret; } -static bool subvol_inum_eq(subvol_inum a, subvol_inum b) -{ - return a.subvol == b.subvol && a.inum == b.inum; -} - static u32 bch2_vfs_inode_hash_fn(const void *data, u32 len, u32 seed) { const subvol_inum *inum = data; @@ -352,9 +347,8 @@ repeat: if (!trans) { __wait_on_freeing_inode(c, inode, inum); } else { - bch2_trans_unlock(trans); - __wait_on_freeing_inode(c, inode, inum); - int ret = bch2_trans_relock(trans); + int ret = drop_locks_do(trans, + (__wait_on_freeing_inode(c, inode, inum), 0)); if (ret) return ERR_PTR(ret); } @@ -1664,33 +1658,9 @@ static int fssetxattr_inode_update_fn(struct btree_trans *trans, return -EINVAL; if (s->casefold != bch2_inode_casefold(c, bi)) { -#ifdef CONFIG_UNICODE - int ret = 0; - /* Not supported on individual files. */ - if (!S_ISDIR(bi->bi_mode)) - return -EOPNOTSUPP; - - /* - * Make sure the dir is empty, as otherwise we'd need to - * rehash everything and update the dirent keys. - */ - ret = bch2_empty_dir_trans(trans, inode_inum(inode)); - if (ret < 0) - return ret; - - ret = bch2_request_incompat_feature(c, bcachefs_metadata_version_casefolding); + int ret = bch2_inode_set_casefold(trans, inode_inum(inode), bi, s->casefold); if (ret) return ret; - - bch2_check_set_feature(c, BCH_FEATURE_casefolding); - - bi->bi_casefold = s->casefold + 1; - bi->bi_fields_set |= BIT(Inode_opt_casefold); - -#else - printk(KERN_ERR "Cannot use casefolding on a kernel without CONFIG_UNICODE\n"); - return -EOPNOTSUPP; -#endif } if (s->set_project) { @@ -2352,12 +2322,14 @@ static int bch2_show_devname(struct seq_file *seq, struct dentry *root) struct bch_fs *c = root->d_sb->s_fs_info; bool first = true; - for_each_online_member(c, ca) { + rcu_read_lock(); + for_each_online_member_rcu(c, ca) { if (!first) seq_putc(seq, ':'); first = false; seq_puts(seq, ca->disk_sb.sb_name); } + rcu_read_unlock(); return 0; } @@ -2464,7 +2436,7 @@ static int bch2_fs_get_tree(struct fs_context *fc) struct inode *vinode; struct bch2_opts_parse *opts_parse = fc->fs_private; struct bch_opts opts = opts_parse->opts; - darray_str devs; + darray_const_str devs; darray_fs devs_to_fs = {}; int ret; @@ -2488,7 +2460,7 @@ static int bch2_fs_get_tree(struct fs_context *fc) if (!IS_ERR(sb)) goto got_sb; - c = bch2_fs_open(devs.data, devs.nr, opts); + c = bch2_fs_open(&devs, &opts); ret = PTR_ERR_OR_ZERO(c); if (ret) goto err; @@ -2538,7 +2510,12 @@ got_sb: sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); super_set_uuid(sb, c->sb.user_uuid.b, sizeof(c->sb.user_uuid)); - super_set_sysfs_name_uuid(sb); + + if (c->sb.multi_device) + super_set_sysfs_name_uuid(sb); + else + strscpy(sb->s_sysfs_name, c->name, sizeof(sb->s_sysfs_name)); + sb->s_shrink->seeks = 0; c->vfs_sb = sb; strscpy(sb->s_id, c->name, sizeof(sb->s_id)); @@ -2549,15 +2526,16 @@ got_sb: sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; - for_each_online_member(c, ca) { + rcu_read_lock(); + for_each_online_member_rcu(c, ca) { struct block_device *bdev = ca->disk_sb.bdev; /* XXX: create an anonymous device for multi device filesystems */ sb->s_bdev = bdev; sb->s_dev = bdev->bd_dev; - percpu_ref_put(&ca->io_ref[READ]); break; } + rcu_read_unlock(); c->dev = sb->s_dev; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 71d428f376a5..49f46df8340e 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -109,27 +109,6 @@ static int subvol_lookup(struct btree_trans *trans, u32 subvol, return ret; } -static int lookup_inode(struct btree_trans *trans, u64 inode_nr, u32 snapshot, - struct bch_inode_unpacked *inode) -{ - struct btree_iter iter; - struct bkey_s_c k; - int ret; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inode_nr, snapshot), 0); - ret = bkey_err(k); - if (ret) - goto err; - - ret = bkey_is_inode(k.k) - ? bch2_inode_unpack(k, inode) - : -BCH_ERR_ENOENT_inode; -err: - bch2_trans_iter_exit(trans, &iter); - return ret; -} - static int lookup_dirent_in_snapshot(struct btree_trans *trans, struct bch_hash_info hash_info, subvol_inum dir, struct qstr *name, @@ -231,7 +210,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, struct bch_inode_unpacked root_inode; struct bch_hash_info root_hash_info; - ret = lookup_inode(trans, root_inum.inum, snapshot, &root_inode); + ret = bch2_inode_find_by_inum_snapshot(trans, root_inum.inum, snapshot, &root_inode, 0); bch_err_msg(c, ret, "looking up root inode %llu for subvol %u", root_inum.inum, subvolid); if (ret) @@ -257,7 +236,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, * The bch2_check_dirents pass has already run, dangling dirents * shouldn't exist here: */ - ret = lookup_inode(trans, inum, snapshot, lostfound); + ret = bch2_inode_find_by_inum_snapshot(trans, inum, snapshot, lostfound, 0); bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)", inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot)); return ret; @@ -285,7 +264,7 @@ create_lostfound: u64 cpu = raw_smp_processor_id(); bch2_inode_init_early(c, lostfound); - bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode); + bch2_inode_init_late(c, lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode); lostfound->bi_dir = root_inode.bi_inum; lostfound->bi_snapshot = le32_to_cpu(st.root_snapshot); @@ -306,6 +285,7 @@ create_lostfound: &lostfound_str, lostfound->bi_inum, &lostfound->bi_dir_offset, + BTREE_UPDATE_internal_snapshot_node| STR_HASH_must_create) ?: bch2_inode_write_flags(trans, &lostfound_iter, lostfound, BTREE_UPDATE_internal_snapshot_node); @@ -431,6 +411,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * &name, inode->bi_subvol ?: inode->bi_inum, &inode->bi_dir_offset, + BTREE_UPDATE_internal_snapshot_node| STR_HASH_must_create); if (ret) { bch_err_msg(c, ret, "error creating dirent"); @@ -564,7 +545,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub u64 cpu = raw_smp_processor_id(); bch2_inode_init_early(c, &new_inode); - bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL); + bch2_inode_init_late(c, &new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL); new_inode.bi_subvol = subvolid; @@ -654,7 +635,7 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 struct bch_inode_unpacked new_inode; bch2_inode_init_early(c, &new_inode); - bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, i_mode|0600, 0, NULL); + bch2_inode_init_late(c, &new_inode, bch2_current_time(c), 0, 0, i_mode|0600, 0, NULL); new_inode.bi_size = i_size; new_inode.bi_inum = inum; new_inode.bi_snapshot = snapshot; @@ -785,12 +766,12 @@ static int ref_visible2(struct bch_fs *c, #define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \ for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \ - (_i)->snapshot <= (_snapshot); _i++) \ - if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot)) + (_i)->inode.bi_snapshot <= (_snapshot); _i++) \ + if (key_visible_in_snapshot(_c, _s, _i->inode.bi_snapshot, _snapshot)) struct inode_walker_entry { struct bch_inode_unpacked inode; - u32 snapshot; + bool whiteout; u64 count; u64 i_size; }; @@ -819,13 +800,20 @@ static struct inode_walker inode_walker_init(void) static int add_inode(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c inode) { - struct bch_inode_unpacked u; - - return bch2_inode_unpack(inode, &u) ?: - darray_push(&w->inodes, ((struct inode_walker_entry) { - .inode = u, - .snapshot = inode.k->p.snapshot, + int ret = darray_push(&w->inodes, ((struct inode_walker_entry) { + .whiteout = !bkey_is_inode(inode.k), })); + if (ret) + return ret; + + struct inode_walker_entry *n = &darray_last(w->inodes); + if (!n->whiteout) { + return bch2_inode_unpack(inode, &n->inode); + } else { + n->inode.bi_inum = inode.k->p.inode; + n->inode.bi_snapshot = inode.k->p.snapshot; + return 0; + } } static int get_inodes_all_snapshots(struct btree_trans *trans, @@ -845,13 +833,12 @@ static int get_inodes_all_snapshots(struct btree_trans *trans, w->recalculate_sums = false; w->inodes.nr = 0; - for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum), - BTREE_ITER_all_snapshots, k, ret) { - if (k.k->p.offset != inum) + for_each_btree_key_max_norestart(trans, iter, + BTREE_ID_inodes, POS(0, inum), SPOS(0, inum, U32_MAX), + BTREE_ITER_all_snapshots, k, ret) { + ret = add_inode(c, w, k); + if (ret) break; - - if (bkey_is_inode(k.k)) - add_inode(c, w, k); } bch2_trans_iter_exit(trans, &iter); @@ -863,48 +850,112 @@ static int get_inodes_all_snapshots(struct btree_trans *trans, return 0; } +static int get_visible_inodes(struct btree_trans *trans, + struct inode_walker *w, + struct snapshots_seen *s, + u64 inum) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + int ret; + + w->inodes.nr = 0; + w->deletes.nr = 0; + + for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot), + BTREE_ITER_all_snapshots, k, ret) { + if (k.k->p.offset != inum) + break; + + if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot)) + continue; + + if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot)) + continue; + + ret = bkey_is_inode(k.k) + ? add_inode(c, w, k) + : snapshot_list_add(c, &w->deletes, k.k->p.snapshot); + if (ret) + break; + } + bch2_trans_iter_exit(trans, &iter); + + return ret; +} + static struct inode_walker_entry * -lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c k) +lookup_inode_for_snapshot(struct btree_trans *trans, struct inode_walker *w, struct bkey_s_c k) { - bool is_whiteout = k.k->type == KEY_TYPE_whiteout; + struct bch_fs *c = trans->c; struct inode_walker_entry *i; __darray_for_each(w->inodes, i) - if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->snapshot)) + if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->inode.bi_snapshot)) goto found; return NULL; found: - BUG_ON(k.k->p.snapshot > i->snapshot); + BUG_ON(k.k->p.snapshot > i->inode.bi_snapshot); - if (k.k->p.snapshot != i->snapshot && !is_whiteout) { - struct inode_walker_entry new = *i; - - new.snapshot = k.k->p.snapshot; - new.count = 0; - new.i_size = 0; - - struct printbuf buf = PRINTBUF; - bch2_bkey_val_to_text(&buf, c, k); + struct printbuf buf = PRINTBUF; + int ret = 0; - bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n" + if (fsck_err_on(k.k->p.snapshot != i->inode.bi_snapshot, + trans, snapshot_key_missing_inode_snapshot, + "have key for inode %llu:%u but have inode in ancestor snapshot %u\n" "unexpected because we should always update the inode when we update a key in that inode\n" "%s", - w->last_pos.inode, k.k->p.snapshot, i->snapshot, buf.buf); - printbuf_exit(&buf); + w->last_pos.inode, k.k->p.snapshot, i->inode.bi_snapshot, + (bch2_bkey_val_to_text(&buf, c, k), + buf.buf))) { + struct bch_inode_unpacked new = i->inode; + struct bkey_i whiteout; + + new.bi_snapshot = k.k->p.snapshot; + + if (!i->whiteout) { + ret = __bch2_fsck_write_inode(trans, &new); + } else { + bkey_init(&whiteout.k); + whiteout.k.type = KEY_TYPE_whiteout; + whiteout.k.p = SPOS(0, i->inode.bi_inum, i->inode.bi_snapshot); + ret = bch2_btree_insert_nonextent(trans, BTREE_ID_inodes, + &whiteout, + BTREE_UPDATE_internal_snapshot_node); + } + + if (ret) + goto fsck_err; + + ret = bch2_trans_commit(trans, NULL, NULL, 0); + if (ret) + goto fsck_err; - while (i > w->inodes.data && i[-1].snapshot > k.k->p.snapshot) + struct inode_walker_entry new_entry = *i; + + new_entry.inode.bi_snapshot = k.k->p.snapshot; + new_entry.count = 0; + new_entry.i_size = 0; + + while (i > w->inodes.data && i[-1].inode.bi_snapshot > k.k->p.snapshot) --i; size_t pos = i - w->inodes.data; - int ret = darray_insert_item(&w->inodes, pos, new); + ret = darray_insert_item(&w->inodes, pos, new_entry); if (ret) - return ERR_PTR(ret); + goto fsck_err; - i = w->inodes.data + pos; + ret = -BCH_ERR_transaction_restart_nested; + goto fsck_err; } + printbuf_exit(&buf); return i; +fsck_err: + printbuf_exit(&buf); + return ERR_PTR(ret); } static struct inode_walker_entry *walk_inode(struct btree_trans *trans, @@ -919,42 +970,7 @@ static struct inode_walker_entry *walk_inode(struct btree_trans *trans, w->last_pos = k.k->p; - return lookup_inode_for_snapshot(trans->c, w, k); -} - -static int get_visible_inodes(struct btree_trans *trans, - struct inode_walker *w, - struct snapshots_seen *s, - u64 inum) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - int ret; - - w->inodes.nr = 0; - w->deletes.nr = 0; - - for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot), - BTREE_ITER_all_snapshots, k, ret) { - if (k.k->p.offset != inum) - break; - - if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot)) - continue; - - if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot)) - continue; - - ret = bkey_is_inode(k.k) - ? add_inode(c, w, k) - : snapshot_list_add(c, &w->deletes, k.k->p.snapshot); - if (ret) - break; - } - bch2_trans_iter_exit(trans, &iter); - - return ret; + return lookup_inode_for_snapshot(trans, w, k); } /* @@ -1078,32 +1094,6 @@ fsck_err: return ret; } -static int get_snapshot_root_inode(struct btree_trans *trans, - struct bch_inode_unpacked *root, - u64 inum) -{ - struct btree_iter iter; - struct bkey_s_c k; - int ret = 0; - - for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, - SPOS(0, inum, U32_MAX), - BTREE_ITER_all_snapshots, k, ret) { - if (k.k->p.offset != inum) - break; - if (bkey_is_inode(k.k)) - goto found_root; - } - if (ret) - goto err; - BUG(); -found_root: - ret = bch2_inode_unpack(k, root); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; -} - static int check_inode(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, @@ -1134,20 +1124,23 @@ static int check_inode(struct btree_trans *trans, goto err; if (snapshot_root->bi_inum != u.bi_inum) { - ret = get_snapshot_root_inode(trans, snapshot_root, u.bi_inum); + ret = bch2_inode_find_snapshot_root(trans, u.bi_inum, snapshot_root); if (ret) goto err; } - if (fsck_err_on(u.bi_hash_seed != snapshot_root->bi_hash_seed || - INODE_STR_HASH(&u) != INODE_STR_HASH(snapshot_root), - trans, inode_snapshot_mismatch, - "inode hash info in different snapshots don't match")) { - u.bi_hash_seed = snapshot_root->bi_hash_seed; - SET_INODE_STR_HASH(&u, INODE_STR_HASH(snapshot_root)); - do_update = true; + if (u.bi_hash_seed != snapshot_root->bi_hash_seed || + INODE_STR_HASH(&u) != INODE_STR_HASH(snapshot_root)) { + ret = bch2_repair_inode_hash_info(trans, snapshot_root); + BUG_ON(ret == -BCH_ERR_fsck_repair_unimplemented); + if (ret) + goto err; } + ret = bch2_check_inode_has_case_insensitive(trans, &u, &s->ids, &do_update); + if (ret) + goto err; + if (u.bi_dir || u.bi_dir_offset) { ret = check_inode_dirent_inode(trans, &u, &do_update); if (ret) @@ -1450,7 +1443,9 @@ static int check_key_has_inode(struct btree_trans *trans, if (k.k->type == KEY_TYPE_whiteout) goto out; - if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) { + bool have_inode = i && !i->whiteout; + + if (!have_inode && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) { ret = reconstruct_inode(trans, iter->btree_id, k.k->p.snapshot, k.k->p.inode) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) @@ -1461,14 +1456,14 @@ static int check_key_has_inode(struct btree_trans *trans, goto err; } - if (fsck_err_on(!i, + if (fsck_err_on(!have_inode, trans, key_in_missing_inode, "key in missing inode:\n%s", (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) goto delete; - if (fsck_err_on(i && !btree_matches_i_mode(iter->btree_id, i->inode.bi_mode), + if (fsck_err_on(have_inode && !btree_matches_i_mode(iter->btree_id, i->inode.bi_mode), trans, key_in_wrong_inode_type, "key for wrong inode mode %o:\n%s", i->inode.bi_mode, @@ -1496,21 +1491,21 @@ static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_wal if (i->inode.bi_sectors == i->count) continue; - count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot); + count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->inode.bi_snapshot); if (w->recalculate_sums) i->count = count2; if (i->count != count2) { bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu", - w->last_pos.inode, i->snapshot, i->count, count2); + w->last_pos.inode, i->inode.bi_snapshot, i->count, count2); i->count = count2; } if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty), trans, inode_i_sectors_wrong, "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu", - w->last_pos.inode, i->snapshot, + w->last_pos.inode, i->inode.bi_snapshot, i->inode.bi_sectors, i->count)) { i->inode.bi_sectors = i->count; ret = bch2_fsck_write_inode(trans, &i->inode); @@ -1821,20 +1816,20 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); inode->inodes.data && i >= inode->inodes.data; --i) { - if (i->snapshot > k.k->p.snapshot || - !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot)) + if (i->inode.bi_snapshot > k.k->p.snapshot || + !key_visible_in_snapshot(c, s, i->inode.bi_snapshot, k.k->p.snapshot)) continue; if (fsck_err_on(k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 && !bkey_extent_is_reservation(k), trans, extent_past_end_of_inode, "extent type past end of inode %llu:%u, i_size %llu\n%s", - i->inode.bi_inum, i->snapshot, i->inode.bi_size, + i->inode.bi_inum, i->inode.bi_snapshot, i->inode.bi_size, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { struct btree_iter iter2; bch2_trans_copy_iter(trans, &iter2, iter); - bch2_btree_iter_set_snapshot(trans, &iter2, i->snapshot); + bch2_btree_iter_set_snapshot(trans, &iter2, i->inode.bi_snapshot); ret = bch2_btree_iter_traverse(trans, &iter2) ?: bch2_btree_delete_at(trans, &iter2, BTREE_UPDATE_internal_snapshot_node); @@ -1856,8 +1851,9 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); inode->inodes.data && i >= inode->inodes.data; --i) { - if (i->snapshot > k.k->p.snapshot || - !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot)) + if (i->whiteout || + i->inode.bi_snapshot > k.k->p.snapshot || + !key_visible_in_snapshot(c, s, i->inode.bi_snapshot, k.k->p.snapshot)) continue; i->count += k.k->size; @@ -1939,13 +1935,13 @@ static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_ if (i->inode.bi_nlink == i->count) continue; - count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot); + count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->inode.bi_snapshot); if (count2 < 0) return count2; if (i->count != count2) { bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu", - w->last_pos.inode, i->snapshot, i->count, count2); + w->last_pos.inode, i->inode.bi_snapshot, i->count, count2); i->count = count2; if (i->inode.bi_nlink == i->count) continue; @@ -1954,7 +1950,7 @@ static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_ if (fsck_err_on(i->inode.bi_nlink != i->count, trans, inode_dir_wrong_nlink, "directory %llu:%u with wrong i_nlink: got %u, should be %llu", - w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) { + w->last_pos.inode, i->inode.bi_snapshot, i->inode.bi_nlink, i->count)) { i->inode.bi_nlink = i->count; ret = bch2_fsck_write_inode(trans, &i->inode); if (ret) @@ -2066,7 +2062,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * 0, subvolume); ret = bkey_err(s.s_c); if (ret && !bch2_err_matches(ret, ENOENT)) - return ret; + goto err; if (ret) { if (fsck_err(trans, dirent_to_missing_subvol, @@ -2077,24 +2073,35 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * goto out; } - if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol, - trans, subvol_fs_path_parent_wrong, - "subvol with wrong fs_path_parent, should be be %u\n%s", - parent_subvol, - (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - struct bkey_i_subvolume *n = - bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume); - ret = PTR_ERR_OR_ZERO(n); + if (le32_to_cpu(s.v->fs_path_parent) != parent_subvol) { + printbuf_reset(&buf); + + prt_printf(&buf, "subvol with wrong fs_path_parent, should be be %u\n", + parent_subvol); + + ret = bch2_inum_to_path(trans, (subvol_inum) { s.k->p.offset, + le64_to_cpu(s.v->inode) }, &buf); if (ret) goto err; + prt_newline(&buf); + bch2_bkey_val_to_text(&buf, c, s.s_c); + + if (fsck_err(trans, subvol_fs_path_parent_wrong, "%s", buf.buf)) { + struct bkey_i_subvolume *n = + bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume); + ret = PTR_ERR_OR_ZERO(n); + if (ret) + goto err; - n->v.fs_path_parent = cpu_to_le32(parent_subvol); + n->v.fs_path_parent = cpu_to_le32(parent_subvol); + } } u64 target_inum = le64_to_cpu(s.v->inode); u32 target_snapshot = le32_to_cpu(s.v->snapshot); - ret = lookup_inode(trans, target_inum, target_snapshot, &subvol_root); + ret = bch2_inode_find_by_inum_snapshot(trans, target_inum, target_snapshot, + &subvol_root, 0); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; @@ -2167,7 +2174,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, if (ret) goto err; - if (!i) + if (!i || i->whiteout) goto out; if (dir->first_this_inode) @@ -2188,6 +2195,41 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); + /* check casefold */ + if (fsck_err_on(d.v->d_casefold != !!hash_info->cf_encoding, + trans, dirent_casefold_mismatch, + "dirent casefold does not match dir casefold\n%s", + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), + buf.buf))) { + struct qstr name = bch2_dirent_get_name(d); + u32 subvol = d.v->d_type == DT_SUBVOL + ? le32_to_cpu(d.v->d_parent_subvol) + : 0; + u64 target = d.v->d_type == DT_SUBVOL + ? le32_to_cpu(d.v->d_child_subvol) + : le64_to_cpu(d.v->d_inum); + u64 dir_offset; + + ret = bch2_hash_delete_at(trans, + bch2_dirent_hash_desc, hash_info, iter, + BTREE_UPDATE_internal_snapshot_node) ?: + bch2_dirent_create_snapshot(trans, subvol, + d.k->p.inode, d.k->p.snapshot, + hash_info, + d.v->d_type, + &name, + target, + &dir_offset, + BTREE_ITER_with_updates| + BTREE_UPDATE_internal_snapshot_node| + STR_HASH_must_create) ?: + bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); + + /* might need another check_dirents pass */ + goto out; + } + if (d.v->d_type == DT_SUBVOL) { ret = check_dirent_to_subvol(trans, iter, d); if (ret) @@ -2307,7 +2349,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter, if (ret) return ret; - if (!i) + if (!i || i->whiteout) return 0; if (inode->first_this_inode) @@ -2376,7 +2418,8 @@ static int check_root_trans(struct btree_trans *trans) goto err; } - ret = lookup_inode(trans, BCACHEFS_ROOT_INO, snapshot, &root_inode); + ret = bch2_inode_find_by_inum_snapshot(trans, BCACHEFS_ROOT_INO, snapshot, + &root_inode, 0); if (ret && !bch2_err_matches(ret, ENOENT)) return ret; @@ -2408,8 +2451,6 @@ int bch2_check_root(struct bch_fs *c) return ret; } -typedef DARRAY(u32) darray_u32; - static bool darray_u32_has(darray_u32 *d, u32 v) { darray_for_each(*d, i) @@ -2446,7 +2487,14 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, u32 parent = le32_to_cpu(s.v->fs_path_parent); if (darray_u32_has(&subvol_path, parent)) { - if (fsck_err(trans, subvol_loop, "subvolume loop")) + printbuf_reset(&buf); + prt_printf(&buf, "subvolume loop:\n"); + + darray_for_each_reverse(subvol_path, i) + prt_printf(&buf, "%u ", *i); + prt_printf(&buf, "%u", parent); + + if (fsck_err(trans, subvol_loop, "%s", buf.buf)) ret = reattach_subvol(trans, s); break; } @@ -2462,7 +2510,8 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, if (fsck_err_on(k.k->type != KEY_TYPE_subvolume, trans, subvol_unreachable, "unreachable subvolume %s", - (bch2_bkey_val_to_text(&buf, c, s.s_c), + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { ret = reattach_subvol(trans, s); break; @@ -2618,14 +2667,13 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) redo_bi_depth = true; if (path_is_dup(&path, inode.bi_inum, snapshot)) { - /* XXX print path */ - bch_err(c, "directory structure loop"); - - darray_for_each(path, i) - pr_err("%llu:%u", i->inum, i->snapshot); - pr_err("%llu:%u", inode.bi_inum, snapshot); + printbuf_reset(&buf); + prt_printf(&buf, "directory structure loop:\n"); + darray_for_each_reverse(path, i) + prt_printf(&buf, "%llu:%u ", i->inum, i->snapshot); + prt_printf(&buf, "%llu:%u", inode.bi_inum, snapshot); - if (fsck_err(trans, dir_loop, "directory structure loop")) { + if (fsck_err(trans, dir_loop, "%s", buf.buf)) { ret = remove_backpointer(trans, &inode); bch_err_msg(c, ret, "removing dirent"); if (ret) @@ -3024,7 +3072,7 @@ long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) { struct bch_ioctl_fsck_offline arg; struct fsck_thread *thr = NULL; - darray_str(devs) = {}; + darray_const_str devs = {}; long ret = 0; if (copy_from_user(&arg, user_arg, sizeof(arg))) @@ -3082,7 +3130,7 @@ long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); - thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); + thr->c = bch2_fs_open(&devs, &thr->opts); if (!IS_ERR(thr->c) && thr->c->opts.errors == BCH_ON_ERROR_panic) @@ -3119,19 +3167,18 @@ static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) c->opts.fix_errors = FSCK_FIX_ask; c->opts.fsck = true; - set_bit(BCH_FS_fsck_running, &c->flags); + set_bit(BCH_FS_in_fsck, &c->flags); - c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; - int ret = bch2_run_online_recovery_passes(c); + int ret = bch2_run_online_recovery_passes(c, ~0ULL); - clear_bit(BCH_FS_fsck_running, &c->flags); + clear_bit(BCH_FS_in_fsck, &c->flags); bch_err_fn(c, ret); c->stdio = NULL; c->stdio_filter = NULL; c->opts.fix_errors = old_fix_errors; - up(&c->online_fsck_mutex); + up(&c->recovery.run_lock); bch2_ro_ref_put(c); return ret; } @@ -3155,7 +3202,7 @@ long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg) if (!bch2_ro_ref_tryget(c)) return -EROFS; - if (down_trylock(&c->online_fsck_mutex)) { + if (down_trylock(&c->recovery.run_lock)) { bch2_ro_ref_put(c); return -EAGAIN; } @@ -3187,7 +3234,7 @@ err: bch_err_fn(c, ret); if (thr) bch2_fsck_thread_exit(&thr->thr); - up(&c->online_fsck_mutex); + up(&c->recovery.run_lock); bch2_ro_ref_put(c); } return ret; diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index b51d98cf8a80..5cf70108ae2f 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -14,6 +14,7 @@ #include "extent_update.h" #include "fs.h" #include "inode.h" +#include "namei.h" #include "opts.h" #include "str_hash.h" #include "snapshot.h" @@ -240,6 +241,7 @@ static int bch2_inode_unpack_v3(struct bkey_s_c k, u64 v[2]; unpacked->bi_inum = inode.k->p.offset; + unpacked->bi_snapshot = inode.k->p.snapshot; unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq); unpacked->bi_hash_seed = inode.v->bi_hash_seed; unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags); @@ -284,13 +286,12 @@ static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k, { memset(unpacked, 0, sizeof(*unpacked)); - unpacked->bi_snapshot = k.k->p.snapshot; - switch (k.k->type) { case KEY_TYPE_inode: { struct bkey_s_c_inode inode = bkey_s_c_to_inode(k); unpacked->bi_inum = inode.k->p.offset; + unpacked->bi_snapshot = inode.k->p.snapshot; unpacked->bi_journal_seq= 0; unpacked->bi_hash_seed = inode.v->bi_hash_seed; unpacked->bi_flags = le32_to_cpu(inode.v->bi_flags); @@ -309,6 +310,7 @@ static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k, struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k); unpacked->bi_inum = inode.k->p.offset; + unpacked->bi_snapshot = inode.k->p.snapshot; unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq); unpacked->bi_hash_seed = inode.v->bi_hash_seed; unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags); @@ -326,8 +328,6 @@ static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k, int bch2_inode_unpack(struct bkey_s_c k, struct bch_inode_unpacked *unpacked) { - unpacked->bi_snapshot = k.k->p.snapshot; - return likely(k.k->type == KEY_TYPE_inode_v3) ? bch2_inode_unpack_v3(k, unpacked) : bch2_inode_unpack_slowpath(k, unpacked); @@ -367,6 +367,82 @@ err: return ret; } +int bch2_inode_find_by_inum_snapshot(struct btree_trans *trans, + u64 inode_nr, u32 snapshot, + struct bch_inode_unpacked *inode, + unsigned flags) +{ + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + SPOS(0, inode_nr, snapshot), flags); + int ret = bkey_err(k); + if (ret) + goto err; + + ret = bkey_is_inode(k.k) + ? bch2_inode_unpack(k, inode) + : -BCH_ERR_ENOENT_inode; +err: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans, + subvol_inum inum, + struct bch_inode_unpacked *inode) +{ + struct btree_iter iter; + int ret; + + ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0); + if (!ret) + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +int bch2_inode_find_by_inum_trans(struct btree_trans *trans, + subvol_inum inum, + struct bch_inode_unpacked *inode) +{ + struct btree_iter iter; + int ret; + + ret = bch2_inode_peek(trans, &iter, inode, inum, 0); + if (!ret) + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum, + struct bch_inode_unpacked *inode) +{ + return bch2_trans_do(c, bch2_inode_find_by_inum_trans(trans, inum, inode)); +} + +int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum, + struct bch_inode_unpacked *root) +{ + struct btree_iter iter; + struct bkey_s_c k; + int ret = 0; + + for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, + SPOS(0, inum, U32_MAX), + BTREE_ITER_all_snapshots, k, ret) { + if (k.k->p.offset != inum) + break; + if (bkey_is_inode(k.k)) { + ret = bch2_inode_unpack(k, root); + goto out; + } + } + /* We're only called when we know we have an inode for @inum */ + BUG_ON(!ret); +out: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + int bch2_inode_write_flags(struct btree_trans *trans, struct btree_iter *iter, struct bch_inode_unpacked *inode, @@ -832,7 +908,8 @@ void bch2_inode_init_early(struct bch_fs *c, get_random_bytes(&inode_u->bi_hash_seed, sizeof(inode_u->bi_hash_seed)); } -void bch2_inode_init_late(struct bch_inode_unpacked *inode_u, u64 now, +void bch2_inode_init_late(struct bch_fs *c, + struct bch_inode_unpacked *inode_u, u64 now, uid_t uid, gid_t gid, umode_t mode, dev_t rdev, struct bch_inode_unpacked *parent) { @@ -856,6 +933,12 @@ void bch2_inode_init_late(struct bch_inode_unpacked *inode_u, u64 now, BCH_INODE_OPTS() #undef x } + + if (!S_ISDIR(mode)) + inode_u->bi_casefold = 0; + + if (bch2_inode_casefold(c, inode_u)) + inode_u->bi_flags |= BCH_INODE_has_case_insensitive; } void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u, @@ -863,7 +946,7 @@ void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u, struct bch_inode_unpacked *parent) { bch2_inode_init_early(c, inode_u); - bch2_inode_init_late(inode_u, bch2_current_time(c), + bch2_inode_init_late(c, inode_u, bch2_current_time(c), uid, gid, mode, rdev, parent); } @@ -1099,38 +1182,6 @@ err2: return ret; } -int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans, - subvol_inum inum, - struct bch_inode_unpacked *inode) -{ - struct btree_iter iter; - int ret; - - ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0); - if (!ret) - bch2_trans_iter_exit(trans, &iter); - return ret; -} - -int bch2_inode_find_by_inum_trans(struct btree_trans *trans, - subvol_inum inum, - struct bch_inode_unpacked *inode) -{ - struct btree_iter iter; - int ret; - - ret = bch2_inode_peek(trans, &iter, inode, inum, 0); - if (!ret) - bch2_trans_iter_exit(trans, &iter); - return ret; -} - -int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum, - struct bch_inode_unpacked *inode) -{ - return bch2_trans_do(c, bch2_inode_find_by_inum_trans(trans, inum, inode)); -} - int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi) { if (bi->bi_flags & BCH_INODE_unlinked) @@ -1204,6 +1255,41 @@ int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_i return 0; } +int bch2_inode_set_casefold(struct btree_trans *trans, subvol_inum inum, + struct bch_inode_unpacked *bi, unsigned v) +{ + struct bch_fs *c = trans->c; + +#ifdef CONFIG_UNICODE + int ret = 0; + /* Not supported on individual files. */ + if (!S_ISDIR(bi->bi_mode)) + return -EOPNOTSUPP; + + /* + * Make sure the dir is empty, as otherwise we'd need to + * rehash everything and update the dirent keys. + */ + ret = bch2_empty_dir_trans(trans, inum); + if (ret < 0) + return ret; + + ret = bch2_request_incompat_feature(c, bcachefs_metadata_version_casefolding); + if (ret) + return ret; + + bch2_check_set_feature(c, BCH_FEATURE_casefolding); + + bi->bi_casefold = v + 1; + bi->bi_fields_set |= BIT(Inode_opt_casefold); + + return bch2_maybe_propagate_has_case_insensitive(trans, inum, bi); +#else + bch_err(c, "Cannot use casefolding on a kernel without CONFIG_UNICODE"); + return -EOPNOTSUPP; +#endif +} + static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot) { struct bch_fs *c = trans->c; diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h index c74af15b14f2..77ad2d549541 100644 --- a/fs/bcachefs/inode.h +++ b/fs/bcachefs/inode.h @@ -134,10 +134,21 @@ static inline int bch2_inode_peek(struct btree_trans *trans, subvol_inum inum, unsigned flags) { return __bch2_inode_peek(trans, iter, inode, inum, flags, true); - int ret = bch2_inode_peek_nowarn(trans, iter, inode, inum, flags); - return ret; } +int bch2_inode_find_by_inum_snapshot(struct btree_trans *, u64, u32, + struct bch_inode_unpacked *, unsigned); +int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *, + subvol_inum, + struct bch_inode_unpacked *); +int bch2_inode_find_by_inum_trans(struct btree_trans *, subvol_inum, + struct bch_inode_unpacked *); +int bch2_inode_find_by_inum(struct bch_fs *, subvol_inum, + struct bch_inode_unpacked *); + +int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum, + struct bch_inode_unpacked *root); + int bch2_inode_write_flags(struct btree_trans *, struct btree_iter *, struct bch_inode_unpacked *, enum btree_iter_update_trigger_flags); @@ -153,7 +164,7 @@ int bch2_fsck_write_inode(struct btree_trans *, struct bch_inode_unpacked *); void bch2_inode_init_early(struct bch_fs *, struct bch_inode_unpacked *); -void bch2_inode_init_late(struct bch_inode_unpacked *, u64, +void bch2_inode_init_late(struct bch_fs *, struct bch_inode_unpacked *, u64, uid_t, gid_t, umode_t, dev_t, struct bch_inode_unpacked *); void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *, @@ -165,14 +176,6 @@ int bch2_inode_create(struct btree_trans *, struct btree_iter *, int bch2_inode_rm(struct bch_fs *, subvol_inum); -int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *, - subvol_inum, - struct bch_inode_unpacked *); -int bch2_inode_find_by_inum_trans(struct btree_trans *, subvol_inum, - struct bch_inode_unpacked *); -int bch2_inode_find_by_inum(struct bch_fs *, subvol_inum, - struct bch_inode_unpacked *); - #define inode_opt_get(_c, _inode, _name) \ ((_inode)->bi_##_name ? (_inode)->bi_##_name - 1 : (_c)->opts._name) @@ -245,7 +248,7 @@ static inline unsigned bkey_inode_mode(struct bkey_s_c k) static inline bool bch2_inode_casefold(struct bch_fs *c, const struct bch_inode_unpacked *bi) { - /* inode apts are stored with a +1 bias: 0 means "unset, use fs opt" */ + /* inode opts are stored with a +1 bias: 0 means "unset, use fs opt" */ return bi->bi_casefold ? bi->bi_casefold - 1 : c->opts.casefold; @@ -292,7 +295,9 @@ static inline bool bch2_inode_should_have_single_bp(struct bch_inode_unpacked *i struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *); void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *, struct bch_inode_unpacked *); -int bch2_inum_opts_get(struct btree_trans*, subvol_inum, struct bch_io_opts *); +int bch2_inum_opts_get(struct btree_trans *, subvol_inum, struct bch_io_opts *); +int bch2_inode_set_casefold(struct btree_trans *, subvol_inum, + struct bch_inode_unpacked *, unsigned); #include "rebalance.h" @@ -304,6 +309,14 @@ bch2_inode_rebalance_opts_get(struct bch_fs *c, struct bch_inode_unpacked *inode return io_opts_to_rebalance_opts(c, &io_opts); } +#define BCACHEFS_ROOT_SUBVOL_INUM \ + ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO }) + +static inline bool subvol_inum_eq(subvol_inum a, subvol_inum b) +{ + return a.subvol == b.subvol && a.inum == b.inum; +} + int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32); int bch2_delete_dead_inodes(struct bch_fs *); diff --git a/fs/bcachefs/inode_format.h b/fs/bcachefs/inode_format.h index 87e193e8ed25..1f00938b1bdc 100644 --- a/fs/bcachefs/inode_format.h +++ b/fs/bcachefs/inode_format.h @@ -129,6 +129,10 @@ enum inode_opt_id { Inode_opt_nr, }; +/* + * BCH_INODE_has_case_insensitive is set if any descendent is case insensitive - + * for overlayfs + */ #define BCH_INODE_FLAGS() \ x(sync, 0) \ x(immutable, 1) \ @@ -139,7 +143,8 @@ enum inode_opt_id { x(i_sectors_dirty, 6) \ x(unlinked, 7) \ x(backptr_untrusted, 8) \ - x(has_child_snapshot, 9) + x(has_child_snapshot, 9) \ + x(has_case_insensitive, 10) /* bits 20+ reserved for packed fields below: */ diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index def4a26a3b45..cc708d46557e 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -9,6 +9,7 @@ #include "bcachefs.h" #include "alloc_background.h" #include "alloc_foreground.h" +#include "async_objs.h" #include "btree_update.h" #include "buckets.h" #include "checksum.h" @@ -17,6 +18,7 @@ #include "data_update.h" #include "disk_groups.h" #include "ec.h" +#include "enumerated_ref.h" #include "error.h" #include "io_read.h" #include "io_misc.h" @@ -25,6 +27,7 @@ #include "subvolume.h" #include "trace.h" +#include <linux/moduleparam.h> #include <linux/random.h> #include <linux/sched/mm.h> @@ -34,6 +37,12 @@ module_param_named(read_corrupt_ratio, bch2_read_corrupt_ratio, uint, 0644); MODULE_PARM_DESC(read_corrupt_ratio, ""); #endif +static bool bch2_poison_extents_on_checksum_error; +module_param_named(poison_extents_on_checksum_error, + bch2_poison_extents_on_checksum_error, bool, 0644); +MODULE_PARM_DESC(poison_extents_on_checksum_error, + "Extents with checksum errors are marked as poisoned - unsafe without read fua support"); + #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT static bool bch2_target_congested(struct bch_fs *c, u16 target) @@ -80,18 +89,6 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target) /* Cache promotion on read */ -struct promote_op { - struct rcu_head rcu; - u64 start_time; - - struct rhash_head hash; - struct bpos pos; - - struct work_struct work; - struct data_update write; - struct bio_vec bi_inline_vecs[]; /* must be last */ -}; - static const struct rhashtable_params bch_promote_params = { .head_offset = offsetof(struct promote_op, hash), .key_offset = offsetof(struct promote_op, pos), @@ -169,9 +166,11 @@ static noinline void promote_free(struct bch_read_bio *rbio) bch_promote_params); BUG_ON(ret); + async_object_list_del(c, promote, op->list_idx); + bch2_data_update_exit(&op->write); - bch2_write_ref_put(c, BCH_WRITE_REF_promote); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_promote); kfree_rcu(op, rcu); } @@ -236,7 +235,7 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans, return NULL; } - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_promote)) return ERR_PTR(-BCH_ERR_nopromote_no_writes); struct promote_op *op = kzalloc(sizeof(*op), GFP_KERNEL); @@ -254,6 +253,10 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans, goto err; } + ret = async_object_list_add(c, promote, op, &op->list_idx); + if (ret < 0) + goto err_remove_hash; + ret = bch2_data_update_init(trans, NULL, NULL, &op->write, writepoint_hashed((unsigned long) current), &orig->opts, @@ -265,7 +268,7 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans, * -BCH_ERR_ENOSPC_disk_reservation: */ if (ret) - goto err_remove_hash; + goto err_remove_list; rbio_init_fragment(&op->write.rbio.bio, orig); op->write.rbio.bounce = true; @@ -273,6 +276,8 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans, op->write.op.end_io = promote_done; return &op->write.rbio; +err_remove_list: + async_object_list_del(c, promote, op->list_idx); err_remove_hash: BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash, bch_promote_params)); @@ -281,7 +286,7 @@ err: /* We may have added to the rhashtable and thus need rcu freeing: */ kfree_rcu(op, rcu); err_put: - bch2_write_ref_put(c, BCH_WRITE_REF_promote); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_promote); return ERR_PTR(ret); } @@ -296,6 +301,13 @@ static struct bch_read_bio *promote_alloc(struct btree_trans *trans, bool *read_full, struct bch_io_failures *failed) { + /* + * We're in the retry path, but we don't know what to repair yet, and we + * don't want to do a promote here: + */ + if (failed && !failed->nr) + return NULL; + struct bch_fs *c = trans->c; /* * if failed != NULL we're not actually doing a promote, we're @@ -338,6 +350,18 @@ nopromote: return NULL; } +void bch2_promote_op_to_text(struct printbuf *out, struct promote_op *op) +{ + if (!op->write.read_done) { + prt_printf(out, "parent read: %px\n", op->write.rbio.parent); + printbuf_indent_add(out, 2); + bch2_read_bio_to_text(out, op->write.rbio.parent); + printbuf_indent_sub(out, 2); + } + + bch2_data_update_to_text(out, &op->write); +} + /* Read */ static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *out, @@ -394,7 +418,7 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio) if (rbio->have_ioref) { struct bch_dev *ca = bch2_dev_have_ref(rbio->c, rbio->pick.ptr.dev); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read); } if (rbio->split) { @@ -406,6 +430,8 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio) else promote_free(rbio); } else { + async_object_list_del(rbio->c, rbio, rbio->list_idx); + if (rbio->bounce) bch2_bio_free_pages_pool(rbio->c, &rbio->bio); @@ -430,6 +456,74 @@ static void bch2_rbio_done(struct bch_read_bio *rbio) bio_endio(&rbio->bio); } +static void get_rbio_extent(struct btree_trans *trans, + struct bch_read_bio *rbio, + struct bkey_buf *sk) +{ + struct btree_iter iter; + struct bkey_s_c k; + int ret = lockrestart_do(trans, + bkey_err(k = bch2_bkey_get_iter(trans, &iter, + rbio->data_btree, rbio->data_pos, 0))); + if (ret) + return; + + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + bkey_for_each_ptr(ptrs, ptr) + if (bch2_extent_ptr_eq(*ptr, rbio->pick.ptr)) { + bch2_bkey_buf_reassemble(sk, trans->c, k); + break; + } + + bch2_trans_iter_exit(trans, &iter); +} + +static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_read_bio *rbio, + enum btree_id btree, struct bkey_s_c read_k) +{ + if (!bch2_poison_extents_on_checksum_error) + return 0; + + struct bch_fs *c = trans->c; + + struct data_update *u = rbio_data_update(rbio); + if (u) + read_k = bkey_i_to_s_c(u->k.k); + + u64 flags = bch2_bkey_extent_flags(read_k); + if (flags & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) + return 0; + + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, btree, bkey_start_pos(read_k.k), + BTREE_ITER_intent); + int ret = bkey_err(k); + if (ret) + return ret; + + if (!bkey_and_val_eq(k, read_k)) + goto out; + + struct bkey_i *new = bch2_trans_kmalloc(trans, + bkey_bytes(k.k) + sizeof(struct bch_extent_flags)); + ret = PTR_ERR_OR_ZERO(new) ?: + (bkey_reassemble(new, k), 0) ?: + bch2_bkey_extent_flags_set(c, new, flags|BIT_ULL(BCH_EXTENT_FLAG_poisoned)) ?: + bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node) ?: + bch2_trans_commit(trans, NULL, NULL, 0); + + /* + * Propagate key change back to data update path, in particular so it + * knows the extent has been poisoned and it's safe to change the + * checksum + */ + if (u && !ret) + bch2_bkey_buf_copy(&u->k, c, new); +out: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + static noinline int bch2_read_retry_nodecode(struct btree_trans *trans, struct bch_read_bio *rbio, struct bvec_iter bvec_iter, @@ -463,7 +557,8 @@ retry: err: bch2_trans_iter_exit(trans, &iter); - if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, BCH_ERR_data_read_retry)) goto retry; if (ret) { @@ -487,15 +582,21 @@ static void bch2_rbio_retry(struct work_struct *work) .inum = rbio->read_pos.inode, }; struct bch_io_failures failed = { .nr = 0 }; - int orig_error = rbio->ret; struct btree_trans *trans = bch2_trans_get(c); + struct bkey_buf sk; + bch2_bkey_buf_init(&sk); + bkey_init(&sk.k->k); + trace_io_read_retry(&rbio->bio); this_cpu_add(c->counters[BCH_COUNTER_io_read_retry], bvec_iter_sectors(rbio->bvec_iter)); - if (bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid)) + get_rbio_extent(trans, rbio, &sk); + + if (!bkey_deleted(&sk.k->k) && + bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid)) bch2_mark_io_failure(&failed, &rbio->pick, rbio->ret == -BCH_ERR_data_read_retry_csum_err); @@ -516,15 +617,16 @@ static void bch2_rbio_retry(struct work_struct *work) int ret = rbio->data_update ? bch2_read_retry_nodecode(trans, rbio, iter, &failed, flags) - : __bch2_read(trans, rbio, iter, inum, &failed, flags); + : __bch2_read(trans, rbio, iter, inum, &failed, &sk, flags); if (ret) { rbio->ret = ret; rbio->bio.bi_status = BLK_STS_IOERR; - } else if (orig_error != -BCH_ERR_data_read_retry_csum_err_maybe_userspace && - orig_error != -BCH_ERR_data_read_ptr_stale_race && - !failed.nr) { + } + + if (failed.nr || ret) { struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, &buf, @@ -532,13 +634,27 @@ static void bch2_rbio_retry(struct work_struct *work) read_pos.offset << 9)); if (rbio->data_update) prt_str(&buf, "(internal move) "); - prt_str(&buf, "successful retry"); - bch_err_ratelimited(c, "%s", buf.buf); + prt_str(&buf, "data read error, "); + if (!ret) + prt_str(&buf, "successful retry"); + else + prt_str(&buf, bch2_err_str(ret)); + prt_newline(&buf); + + if (!bkey_deleted(&sk.k->k)) { + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(sk.k)); + prt_newline(&buf); + } + + bch2_io_failures_to_text(&buf, c, &failed); + + bch2_print_str_ratelimited(c, KERN_ERR, buf.buf); printbuf_exit(&buf); } bch2_rbio_done(rbio); + bch2_bkey_buf_exit(&sk, c); bch2_trans_put(trans); } @@ -568,27 +684,6 @@ static void bch2_rbio_error(struct bch_read_bio *rbio, } } -static void bch2_read_io_err(struct work_struct *work) -{ - struct bch_read_bio *rbio = - container_of(work, struct bch_read_bio, work); - struct bio *bio = &rbio->bio; - struct bch_fs *c = rbio->c; - struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; - struct printbuf buf = PRINTBUF; - - bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); - prt_printf(&buf, "data read error: %s", bch2_blk_status_to_str(bio->bi_status)); - - if (ca) - bch_err_ratelimited(ca, "%s", buf.buf); - else - bch_err_ratelimited(c, "%s", buf.buf); - - printbuf_exit(&buf); - bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_io_err, bio->bi_status); -} - static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, struct bch_read_bio *rbio) { @@ -652,31 +747,6 @@ static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) __bch2_rbio_narrow_crcs(trans, rbio)); } -static void bch2_read_csum_err(struct work_struct *work) -{ - struct bch_read_bio *rbio = - container_of(work, struct bch_read_bio, work); - struct bch_fs *c = rbio->c; - struct bio *src = &rbio->bio; - struct bch_extent_crc_unpacked crc = rbio->pick.crc; - struct nonce nonce = extent_nonce(rbio->version, crc); - struct bch_csum csum = bch2_checksum_bio(c, crc.csum_type, nonce, src); - struct printbuf buf = PRINTBUF; - - bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); - prt_str(&buf, "data "); - bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum); - - struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; - if (ca) - bch_err_ratelimited(ca, "%s", buf.buf); - else - bch_err_ratelimited(c, "%s", buf.buf); - - bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR); - printbuf_exit(&buf); -} - static void bch2_read_decompress_err(struct work_struct *work) { struct bch_read_bio *rbio = @@ -837,7 +907,7 @@ out: memalloc_nofs_restore(nofs_flags); return; csum_err: - bch2_rbio_punt(rbio, bch2_read_csum_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); + bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR); goto out; decompression_err: bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); @@ -863,7 +933,7 @@ static void bch2_read_endio(struct bio *bio) rbio->bio.bi_end_io = rbio->end_io; if (unlikely(bio->bi_status)) { - bch2_rbio_punt(rbio, bch2_read_io_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); + bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_io_err, bio->bi_status); return; } @@ -963,6 +1033,10 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, bvec_iter_sectors(iter)); goto out_read_done; } + + if ((bch2_bkey_extent_flags(k) & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) && + !orig->data_update) + return -BCH_ERR_extent_poisoned; retry_pick: ret = bch2_bkey_pick_read_device(c, k, failed, &pick, dev); @@ -971,6 +1045,16 @@ retry_pick: goto hole; if (unlikely(ret < 0)) { + if (ret == -BCH_ERR_data_read_csum_err) { + int ret2 = maybe_poison_extent(trans, orig, data_btree, k); + if (ret2) { + ret = ret2; + goto err; + } + + trace_and_count(c, io_read_fail_and_poison, &orig->bio); + } + struct printbuf buf = PRINTBUF; bch2_read_err_msg_trans(trans, &buf, orig, read_pos); prt_printf(&buf, "%s\n ", bch2_err_str(ret)); @@ -994,7 +1078,8 @@ retry_pick: goto err; } - struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ); + struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, + BCH_DEV_READ_REF_io_read); /* * Stale dirty pointers are treated as IO errors, but @failed isn't @@ -1008,7 +1093,7 @@ retry_pick: unlikely(dev_ptr_stale(ca, &pick.ptr))) { read_from_stale_dirty_pointer(trans, ca, k, pick.ptr); bch2_mark_io_failure(failed, &pick, false); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read); goto retry_pick; } @@ -1041,7 +1126,8 @@ retry_pick: */ if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) { if (ca) - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_io_read); rbio->ret = -BCH_ERR_data_read_buffer_too_small; goto out_read_done; } @@ -1138,6 +1224,8 @@ retry_pick: rbio->bio.bi_iter.bi_sector = pick.ptr.offset; rbio->bio.bi_end_io = bch2_read_endio; + async_object_list_add(c, rbio, rbio, &rbio->list_idx); + if (rbio->bounce) trace_and_count(c, io_read_bounce, &rbio->bio); @@ -1171,14 +1259,6 @@ retry_pick: if (likely(!rbio->pick.do_ec_reconstruct)) { if (unlikely(!rbio->have_ioref)) { - struct printbuf buf = PRINTBUF; - bch2_read_err_msg_trans(trans, &buf, rbio, read_pos); - prt_printf(&buf, "no device to read from:\n "); - bch2_bkey_val_to_text(&buf, c, k); - - bch_err_ratelimited(c, "%s", buf.buf); - printbuf_exit(&buf); - bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_device_offline, BLK_STS_IOERR); @@ -1265,12 +1345,15 @@ out_read_done: int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, struct bvec_iter bvec_iter, subvol_inum inum, - struct bch_io_failures *failed, unsigned flags) + struct bch_io_failures *failed, + struct bkey_buf *prev_read, + unsigned flags) { struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_buf sk; struct bkey_s_c k; + enum btree_id data_btree; int ret; EBUG_ON(rbio->data_update); @@ -1281,7 +1364,7 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, BTREE_ITER_slots); while (1) { - enum btree_id data_btree = BTREE_ID_extents; + data_btree = BTREE_ID_extents; bch2_trans_begin(trans); @@ -1313,6 +1396,12 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, k = bkey_i_to_s_c(sk.k); + if (unlikely(flags & BCH_READ_in_retry)) { + if (!bkey_and_val_eq(k, bkey_i_to_s_c(prev_read->k))) + failed->nr = 0; + bch2_bkey_buf_copy(prev_read, c, sk.k); + } + /* * With indirect extents, the amount of data to read is the min * of the original extent and the indirect extent: @@ -1347,8 +1436,6 @@ err: break; } - bch2_trans_iter_exit(trans, &iter); - if (unlikely(ret)) { if (ret != -BCH_ERR_extent_poisoned) { struct printbuf buf = PRINTBUF; @@ -1367,20 +1454,64 @@ err: bch2_rbio_done(rbio); } + bch2_trans_iter_exit(trans, &iter); bch2_bkey_buf_exit(&sk, c); return ret; } +static const char * const bch2_read_bio_flags[] = { +#define x(n) #n, + BCH_READ_FLAGS() +#undef x + NULL +}; + +void bch2_read_bio_to_text(struct printbuf *out, struct bch_read_bio *rbio) +{ + u64 now = local_clock(); + prt_printf(out, "start_time:\t%llu\n", rbio->start_time ? now - rbio->start_time : 0); + prt_printf(out, "submit_time:\t%llu\n", rbio->submit_time ? now - rbio->submit_time : 0); + + if (!rbio->split) + prt_printf(out, "end_io:\t%ps\n", rbio->end_io); + else + prt_printf(out, "parent:\t%px\n", rbio->parent); + + prt_printf(out, "bi_end_io:\t%ps\n", rbio->bio.bi_end_io); + + prt_printf(out, "promote:\t%u\n", rbio->promote); + prt_printf(out, "bounce:\t%u\n", rbio->bounce); + prt_printf(out, "split:\t%u\n", rbio->split); + prt_printf(out, "have_ioref:\t%u\n", rbio->have_ioref); + prt_printf(out, "narrow_crcs:\t%u\n", rbio->narrow_crcs); + prt_printf(out, "context:\t%u\n", rbio->context); + prt_printf(out, "ret:\t%s\n", bch2_err_str(rbio->ret)); + + prt_printf(out, "flags:\t"); + bch2_prt_bitflags(out, bch2_read_bio_flags, rbio->flags); + prt_newline(out); + + bch2_bio_to_text(out, &rbio->bio); +} + void bch2_fs_io_read_exit(struct bch_fs *c) { if (c->promote_table.tbl) rhashtable_destroy(&c->promote_table); bioset_exit(&c->bio_read_split); bioset_exit(&c->bio_read); + mempool_exit(&c->bio_bounce_pages); } int bch2_fs_io_read_init(struct bch_fs *c) { + if (mempool_init_page_pool(&c->bio_bounce_pages, + max_t(unsigned, + c->opts.btree_node_size, + c->opts.encoded_extent_max) / + PAGE_SIZE, 0)) + return -BCH_ERR_ENOMEM_bio_bounce_pages_init; + if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio), BIOSET_NEED_BVECS)) return -BCH_ERR_ENOMEM_bio_read_init; diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h index c78025d863e0..c08b9c047b3e 100644 --- a/fs/bcachefs/io_read.h +++ b/fs/bcachefs/io_read.h @@ -4,6 +4,7 @@ #include "bkey_buf.h" #include "btree_iter.h" +#include "extents_types.h" #include "reflink.h" struct bch_read_bio { @@ -48,6 +49,9 @@ struct bch_read_bio { u16 _state; }; s16 ret; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + unsigned list_idx; +#endif struct extent_ptr_decoded pick; @@ -144,7 +148,8 @@ static inline void bch2_read_extent(struct btree_trans *trans, } int __bch2_read(struct btree_trans *, struct bch_read_bio *, struct bvec_iter, - subvol_inum, struct bch_io_failures *, unsigned flags); + subvol_inum, + struct bch_io_failures *, struct bkey_buf *, unsigned flags); static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, subvol_inum inum) @@ -154,7 +159,7 @@ static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, rbio->subvol = inum.subvol; bch2_trans_run(c, - __bch2_read(trans, rbio, rbio->bio.bi_iter, inum, NULL, + __bch2_read(trans, rbio, rbio->bio.bi_iter, inum, NULL, NULL, BCH_READ_retry_if_stale| BCH_READ_may_promote| BCH_READ_user_mapped)); @@ -172,6 +177,9 @@ static inline struct bch_read_bio *rbio_init_fragment(struct bio *bio, rbio->split = true; rbio->parent = orig; rbio->opts = orig->opts; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + rbio->list_idx = 0; +#endif return rbio; } @@ -189,9 +197,16 @@ static inline struct bch_read_bio *rbio_init(struct bio *bio, rbio->ret = 0; rbio->opts = opts; rbio->bio.bi_end_io = end_io; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + rbio->list_idx = 0; +#endif return rbio; } +struct promote_op; +void bch2_promote_op_to_text(struct printbuf *, struct promote_op *); +void bch2_read_bio_to_text(struct printbuf *, struct bch_read_bio *); + void bch2_fs_io_read_exit(struct bch_fs *); int bch2_fs_io_read_init(struct bch_fs *); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index c1237da079ed..52a60982a66b 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -6,6 +6,7 @@ #include "bcachefs.h" #include "alloc_foreground.h" +#include "async_objs.h" #include "bkey_buf.h" #include "bset.h" #include "btree_update.h" @@ -15,6 +16,7 @@ #include "compress.h" #include "debug.h" #include "ec.h" +#include "enumerated_ref.h" #include "error.h" #include "extent_update.h" #include "inode.h" @@ -263,11 +265,9 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0", extent_iter->pos.inode, bi_sectors, i_sectors_delta); - bool repeat = false, print = false, suppress = false; - bch2_count_fsck_err(c, inode_i_sectors_underflow, buf.buf, - &repeat, &print, &suppress); + bool print = bch2_count_fsck_err(c, inode_i_sectors_underflow, &buf); if (print) - bch2_print_str(c, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); if (i_sectors_delta < 0) @@ -280,6 +280,12 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, inode_update_flags = 0; } + /* + * extents, dirents and xattrs updates require that an inode update also + * happens - to ensure that if a key exists in one of those btrees with + * a given snapshot ID an inode is also present - so we may have to skip + * the nojournal optimization: + */ if (inode->k.p.snapshot != iter.snapshot) { inode->k.p.snapshot = iter.snapshot; inode_update_flags = 0; @@ -397,8 +403,7 @@ static int bch2_write_index_default(struct bch_write_op *op) bkey_start_pos(&sk.k->k), BTREE_ITER_slots|BTREE_ITER_intent); - ret = bch2_bkey_set_needs_rebalance(c, &op->opts, sk.k) ?: - bch2_extent_update(trans, inum, &iter, sk.k, + ret = bch2_extent_update(trans, inum, &iter, sk.k, &op->res, op->new_i_size, &op->i_sectors_delta, op->flags & BCH_WRITE_check_enospc); @@ -462,9 +467,17 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k)); struct bch_write_bio *n; + unsigned ref_rw = type == BCH_DATA_btree ? READ : WRITE; + unsigned ref_idx = type == BCH_DATA_btree + ? BCH_DEV_READ_REF_btree_node_write + : BCH_DEV_WRITE_REF_io_write; BUG_ON(c->opts.nochanges); + const struct bch_extent_ptr *last = NULL; + bkey_for_each_ptr(ptrs, ptr) + last = ptr; + bkey_for_each_ptr(ptrs, ptr) { /* * XXX: btree writes should be using io_ref[WRITE], but we @@ -473,9 +486,9 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, */ struct bch_dev *ca = nocow ? bch2_dev_have_ref(c, ptr->dev) - : bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE); + : bch2_dev_get_ioref(c, ptr->dev, ref_rw, ref_idx); - if (to_entry(ptr + 1) < ptrs.end) { + if (ptr != last) { n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set)); n->bio.bi_end_io = wbio->bio.bi_end_io; @@ -533,11 +546,12 @@ static void bch2_write_done(struct closure *cl) bch2_disk_reservation_put(c, &op->res); if (!(op->flags & BCH_WRITE_move)) - bch2_write_ref_put(c, BCH_WRITE_REF_write); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_write); bch2_keylist_free(&op->insert_keys, op->inline_keys); EBUG_ON(cl->parent); closure_debug_destroy(cl); + async_object_list_del(c, write_op, op->list_idx); if (op->end_io) op->end_io(op); } @@ -748,7 +762,8 @@ static void bch2_write_endio(struct bio *bio) } if (wbio->have_ioref) - percpu_ref_put(&ca->io_ref[WRITE]); + enumerated_ref_put(&ca->io_ref[WRITE], + BCH_DEV_WRITE_REF_io_write); if (wbio->bounce) bch2_bio_free_pages_pool(c, bio); @@ -784,6 +799,9 @@ static void init_append_extent(struct bch_write_op *op, bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size, op->flags & BCH_WRITE_cached); + if (!(op->flags & BCH_WRITE_move)) + bch2_bkey_set_needs_rebalance(op->c, &op->opts, &e->k_i); + bch2_keylist_push(&op->insert_keys); } @@ -1345,7 +1363,8 @@ retry: /* Get iorefs before dropping btree locks: */ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); bkey_for_each_ptr(ptrs, ptr) { - struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); + struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE, + BCH_DEV_WRITE_REF_io_write); if (unlikely(!ca)) goto err_get_ioref; @@ -1447,7 +1466,8 @@ err: return; err_get_ioref: darray_for_each(buckets, i) - percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE]); + enumerated_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE], + BCH_DEV_WRITE_REF_io_write); /* Fall back to COW path: */ goto out; @@ -1661,6 +1681,8 @@ CLOSURE_CALLBACK(bch2_write) BUG_ON(!op->write_point.v); BUG_ON(bkey_eq(op->pos, POS_MAX)); + async_object_list_add(c, write_op, op, &op->list_idx); + if (op->flags & BCH_WRITE_only_specified_devs) op->flags |= BCH_WRITE_alloc_nowait; @@ -1681,7 +1703,7 @@ CLOSURE_CALLBACK(bch2_write) } if (!(op->flags & BCH_WRITE_move) && - !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) { + !enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_write)) { op->error = -BCH_ERR_erofs_no_writes; goto err; } @@ -1705,6 +1727,7 @@ err: bch2_disk_reservation_put(c, &op->res); closure_debug_destroy(&op->cl); + async_object_list_del(c, write_op, op->list_idx); if (op->end_io) op->end_io(op); } @@ -1738,13 +1761,13 @@ void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op) prt_printf(out, "nr_replicas_required:\t%u\n", op->nr_replicas_required); prt_printf(out, "ref:\t%u\n", closure_nr_remaining(&op->cl)); + prt_printf(out, "ret\t%s\n", bch2_err_str(op->error)); printbuf_indent_sub(out, 2); } void bch2_fs_io_write_exit(struct bch_fs *c) { - mempool_exit(&c->bio_bounce_pages); bioset_exit(&c->replica_set); bioset_exit(&c->bio_write); } @@ -1755,12 +1778,5 @@ int bch2_fs_io_write_init(struct bch_fs *c) bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0)) return -BCH_ERR_ENOMEM_bio_write_init; - if (mempool_init_page_pool(&c->bio_bounce_pages, - max_t(unsigned, - c->opts.btree_node_size, - c->opts.encoded_extent_max) / - PAGE_SIZE, 0)) - return -BCH_ERR_ENOMEM_bio_bounce_pages_init; - return 0; } diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h index b8ab19a1e1da..2c0a8f35ee1f 100644 --- a/fs/bcachefs/io_write.h +++ b/fs/bcachefs/io_write.h @@ -17,34 +17,6 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *, __printf(3, 4) void bch2_write_op_error(struct bch_write_op *op, u64, const char *, ...); -#define BCH_WRITE_FLAGS() \ - x(alloc_nowait) \ - x(cached) \ - x(data_encoded) \ - x(pages_stable) \ - x(pages_owned) \ - x(only_specified_devs) \ - x(wrote_data_inline) \ - x(check_enospc) \ - x(sync) \ - x(move) \ - x(in_worker) \ - x(submitted) \ - x(io_error) \ - x(convert_unwritten) - -enum __bch_write_flags { -#define x(f) __BCH_WRITE_##f, - BCH_WRITE_FLAGS() -#undef x -}; - -enum bch_write_flags { -#define x(f) BCH_WRITE_##f = BIT(__BCH_WRITE_##f), - BCH_WRITE_FLAGS() -#undef x -}; - static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op) { return op->watermark == BCH_WATERMARK_copygc diff --git a/fs/bcachefs/io_write_types.h b/fs/bcachefs/io_write_types.h index 3ef6df9145ef..5da4eb8bb6f6 100644 --- a/fs/bcachefs/io_write_types.h +++ b/fs/bcachefs/io_write_types.h @@ -13,6 +13,34 @@ #include <linux/llist.h> #include <linux/workqueue.h> +#define BCH_WRITE_FLAGS() \ + x(alloc_nowait) \ + x(cached) \ + x(data_encoded) \ + x(pages_stable) \ + x(pages_owned) \ + x(only_specified_devs) \ + x(wrote_data_inline) \ + x(check_enospc) \ + x(sync) \ + x(move) \ + x(in_worker) \ + x(submitted) \ + x(io_error) \ + x(convert_unwritten) + +enum __bch_write_flags { +#define x(f) __BCH_WRITE_##f, + BCH_WRITE_FLAGS() +#undef x +}; + +enum bch_write_flags { +#define x(f) BCH_WRITE_##f = BIT(__BCH_WRITE_##f), + BCH_WRITE_FLAGS() +#undef x +}; + struct bch_write_bio { struct_group(wbio, struct bch_fs *c; @@ -43,6 +71,10 @@ struct bch_write_op { void (*end_io)(struct bch_write_op *); u64 start_time; +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + unsigned list_idx; +#endif + unsigned written; /* sectors */ u16 flags; s16 error; /* dio write path expects it to hold -ERESTARTSYS... */ diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index bb45d3634194..09b70fd140a1 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -12,6 +12,7 @@ #include "btree_update.h" #include "btree_write_buffer.h" #include "buckets.h" +#include "enumerated_ref.h" #include "error.h" #include "journal.h" #include "journal_io.h" @@ -173,7 +174,7 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) spin_unlock(&j->lock); prt_printf(&buf, bch2_fmt(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)"), bch2_err_str(error)); - bch2_print_string_as_lines(KERN_ERR, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); printbuf_reset(&buf); bch2_journal_pins_to_text(&buf, j); @@ -331,16 +332,6 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t __bch2_journal_buf_put(j, le64_to_cpu(buf->data->seq)); } -void bch2_journal_halt(struct journal *j) -{ - spin_lock(&j->lock); - __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true); - if (!j->err_seq) - j->err_seq = journal_cur_seq(j); - journal_wake(j); - spin_unlock(&j->lock); -} - void bch2_journal_halt_locked(struct journal *j) { lockdep_assert_held(&j->lock); @@ -351,6 +342,13 @@ void bch2_journal_halt_locked(struct journal *j) journal_wake(j); } +void bch2_journal_halt(struct journal *j) +{ + spin_lock(&j->lock); + bch2_journal_halt_locked(j); + spin_unlock(&j->lock); +} + static bool journal_entry_want_write(struct journal *j) { bool ret = !journal_entry_is_open(j) || @@ -417,7 +415,7 @@ static int journal_entry_open(struct journal *j) if (atomic64_read(&j->seq) - j->seq_write_started == JOURNAL_STATE_BUF_NR) return -BCH_ERR_journal_max_open; - if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) { + if (unlikely(journal_cur_seq(j) >= JOURNAL_SEQ_MAX)) { bch_err(c, "cannot start: journal seq overflow"); if (bch2_fs_emergency_read_only_locked(c)) bch_err(c, "fatal error - emergency read only"); @@ -461,6 +459,14 @@ static int journal_entry_open(struct journal *j) atomic64_inc(&j->seq); journal_pin_list_init(fifo_push_ref(&j->pin), 1); + if (unlikely(bch2_journal_seq_is_blacklisted(c, journal_cur_seq(j), false))) { + bch_err(c, "attempting to open blacklisted journal seq %llu", + journal_cur_seq(j)); + if (bch2_fs_emergency_read_only_locked(c)) + bch_err(c, "fatal error - emergency read only"); + return -BCH_ERR_journal_shutdown; + } + BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq)); BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf); @@ -702,8 +708,10 @@ static unsigned max_dev_latency(struct bch_fs *c) { u64 nsecs = 0; - for_each_rw_member(c, ca) + rcu_read_lock(); + for_each_rw_member_rcu(c, ca) nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration); + rcu_read_unlock(); return nsecs_to_jiffies(nsecs); } @@ -746,7 +754,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, struct printbuf buf = PRINTBUF; bch2_journal_debug_to_text(&buf, j); - bch2_print_string_as_lines(KERN_ERR, buf.buf); + bch2_print_str(c, KERN_ERR, buf.buf); prt_printf(&buf, bch2_fmt(c, "Journal stuck? Waited for 10 seconds, err %s"), bch2_err_str(ret)); printbuf_exit(&buf); @@ -990,11 +998,11 @@ int bch2_journal_meta(struct journal *j) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_journal)) return -BCH_ERR_erofs_no_writes; int ret = __bch2_journal_meta(j); - bch2_write_ref_put(c, BCH_WRITE_REF_journal); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_journal); return ret; } @@ -1298,6 +1306,16 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs) { + struct bch_fs *c = ca->fs; + + if (!(ca->mi.data_allowed & BIT(BCH_DATA_journal))) + return 0; + + if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) { + bch_err(c, "cannot allocate journal, filesystem is an unresized image file"); + return -BCH_ERR_erofs_filesystem_full; + } + unsigned nr; int ret; @@ -1318,7 +1336,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs) min(1 << 13, (1 << 24) / ca->mi.bucket_size)); - ret = bch2_set_nr_journal_buckets_loop(ca->fs, ca, nr, new_fs); + ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, new_fs); err: bch_err_fn(ca, ret); return ret; @@ -1326,13 +1344,14 @@ err: int bch2_fs_journal_alloc(struct bch_fs *c) { - for_each_online_member(c, ca) { + for_each_online_member(c, ca, BCH_DEV_READ_REF_fs_journal_alloc) { if (ca->journal.nr) continue; int ret = bch2_dev_journal_alloc(ca, true); if (ret) { - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_fs_journal_alloc); return ret; } } @@ -1404,6 +1423,13 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq) bool had_entries = false; u64 last_seq = cur_seq, nr, seq; + /* + * + * XXX pick most recent non blacklisted sequence number + */ + + cur_seq = max(cur_seq, bch2_journal_last_blacklisted_seq(c)); + if (cur_seq >= JOURNAL_SEQ_MAX) { bch_err(c, "cannot start: journal seq overflow"); return -EINVAL; @@ -1429,13 +1455,11 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq) */ nr += nr / 4; - if (nr + 1 > j->pin.size) { - free_fifo(&j->pin); - init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL); - if (!j->pin.data) { - bch_err(c, "error reallocating journal fifo (%llu open entries)", nr); - return -BCH_ERR_ENOMEM_journal_pin_fifo; - } + nr = max(nr, JOURNAL_PIN); + init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL); + if (!j->pin.data) { + bch_err(c, "error reallocating journal fifo (%llu open entries)", nr); + return -BCH_ERR_ENOMEM_journal_pin_fifo; } j->replay_journal_seq = last_seq; @@ -1590,7 +1614,7 @@ void bch2_fs_journal_exit(struct journal *j) free_fifo(&j->pin); } -int bch2_fs_journal_init(struct journal *j) +void bch2_fs_journal_init_early(struct journal *j) { static struct lock_class_key res_key; @@ -1609,10 +1633,10 @@ int bch2_fs_journal_init(struct journal *j) atomic64_set(&j->reservations.counter, ((union journal_res_state) { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v); +} - if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) - return -BCH_ERR_ENOMEM_journal_pin_fifo; - +int bch2_fs_journal_init(struct journal *j) +{ j->free_buf_size = j->buf_size_want = JOURNAL_ENTRY_SIZE_MIN; j->free_buf = kvmalloc(j->free_buf_size, GFP_KERNEL); if (!j->free_buf) @@ -1621,8 +1645,6 @@ int bch2_fs_journal_init(struct journal *j) for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) j->buf[i].idx = i; - j->pin.front = j->pin.back = 1; - j->wq = alloc_workqueue("bcachefs_journal", WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512); if (!j->wq) diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h index 641e20c05a14..8ff00a0ec778 100644 --- a/fs/bcachefs/journal.h +++ b/fs/bcachefs/journal.h @@ -426,8 +426,8 @@ int bch2_journal_flush(struct journal *); bool bch2_journal_noflush_seq(struct journal *, u64, u64); int bch2_journal_meta(struct journal *); -void bch2_journal_halt(struct journal *); void bch2_journal_halt_locked(struct journal *); +void bch2_journal_halt(struct journal *); static inline int bch2_journal_error(struct journal *j) { @@ -458,6 +458,7 @@ void bch2_journal_set_replay_done(struct journal *); void bch2_dev_journal_exit(struct bch_dev *); int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *); void bch2_fs_journal_exit(struct journal *); +void bch2_fs_journal_init_early(struct journal *); int bch2_fs_journal_init(struct journal *); #endif /* _BCACHEFS_JOURNAL_H */ diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index ded18a94ed02..63bb207208b2 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1219,7 +1219,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device) out: bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); kvfree(buf.data); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_journal_read); closure_return(cl); return; err: @@ -1254,7 +1254,8 @@ int bch2_journal_read(struct bch_fs *c, if ((ca->mi.state == BCH_MEMBER_STATE_rw || ca->mi.state == BCH_MEMBER_STATE_ro) && - percpu_ref_tryget(&ca->io_ref[READ])) + enumerated_ref_tryget(&ca->io_ref[READ], + BCH_DEV_READ_REF_journal_read)) closure_call(&ca->journal.read, bch2_journal_read_device, system_unbound_wq, @@ -1405,7 +1406,7 @@ int bch2_journal_read(struct bch_fs *c, } genradix_for_each(&c->journal_entries, radix_iter, _i) { - struct bch_replicas_padded replicas = { + union bch_replicas_padded replicas = { .e.data_type = BCH_DATA_journal, .e.nr_devs = 0, .e.nr_required = 1, @@ -1466,6 +1467,7 @@ static void journal_advance_devs_to_next_bucket(struct journal *j, { struct bch_fs *c = container_of(j, struct bch_fs, journal); + rcu_read_lock(); darray_for_each(*devs, i) { struct bch_dev *ca = rcu_dereference(c->devs[*i]); if (!ca) @@ -1487,6 +1489,7 @@ static void journal_advance_devs_to_next_bucket(struct journal *j, ja->bucket_seq[ja->cur_idx] = le64_to_cpu(seq); } } + rcu_read_unlock(); } static void __journal_write_alloc(struct journal *j, @@ -1499,7 +1502,8 @@ static void __journal_write_alloc(struct journal *j, struct bch_fs *c = container_of(j, struct bch_fs, journal); darray_for_each(*devs, i) { - struct bch_dev *ca = rcu_dereference(c->devs[*i]); + struct bch_dev *ca = bch2_dev_get_ioref(c, *i, WRITE, + BCH_DEV_WRITE_REF_journal_write); if (!ca) continue; @@ -1513,8 +1517,10 @@ static void __journal_write_alloc(struct journal *j, ca->mi.state != BCH_MEMBER_STATE_rw || !ja->nr || bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) || - sectors > ja->sectors_free) + sectors > ja->sectors_free) { + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_journal_write); continue; + } bch2_dev_stripe_increment(ca, &j->wp.stripe); @@ -1537,15 +1543,8 @@ static void __journal_write_alloc(struct journal *j, } } -/** - * journal_write_alloc - decide where to write next journal entry - * - * @j: journal object - * @w: journal buf (entry to be written) - * - * Returns: 0 on success, or -BCH_ERR_insufficient_devices on failure - */ -static int journal_write_alloc(struct journal *j, struct journal_buf *w) +static int journal_write_alloc(struct journal *j, struct journal_buf *w, + unsigned *replicas) { struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_devs_mask devs; @@ -1553,29 +1552,18 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w) unsigned sectors = vstruct_sectors(w->data, c->block_bits); unsigned target = c->opts.metadata_target ?: c->opts.foreground_target; - unsigned replicas = 0, replicas_want = - READ_ONCE(c->opts.metadata_replicas); + unsigned replicas_want = READ_ONCE(c->opts.metadata_replicas); unsigned replicas_need = min_t(unsigned, replicas_want, READ_ONCE(c->opts.metadata_replicas_required)); bool advance_done = false; - rcu_read_lock(); - - /* We might run more than once if we have to stop and do discards: */ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&w->key)); - bkey_for_each_ptr(ptrs, p) { - struct bch_dev *ca = bch2_dev_rcu_noerror(c, p->dev); - if (ca) - replicas += ca->mi.durability; - } - retry_target: devs = target_rw_devs(c, BCH_DATA_journal, target); devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); retry_alloc: - __journal_write_alloc(j, w, &devs_sorted, sectors, &replicas, replicas_want); + __journal_write_alloc(j, w, &devs_sorted, sectors, replicas, replicas_want); - if (likely(replicas >= replicas_want)) + if (likely(*replicas >= replicas_want)) goto done; if (!advance_done) { @@ -1584,18 +1572,16 @@ retry_alloc: goto retry_alloc; } - if (replicas < replicas_want && target) { + if (*replicas < replicas_want && target) { /* Retry from all devices: */ target = 0; advance_done = false; goto retry_target; } done: - rcu_read_unlock(); - BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX); - return replicas >= replicas_need ? 0 : -BCH_ERR_insufficient_journal_devices; + return *replicas >= replicas_need ? 0 : -BCH_ERR_insufficient_journal_devices; } static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) @@ -1633,7 +1619,7 @@ static CLOSURE_CALLBACK(journal_write_done) closure_type(w, struct journal_buf, io); struct journal *j = container_of(w, struct journal, buf[w->idx]); struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct bch_replicas_padded replicas; + union bch_replicas_padded replicas; u64 seq = le64_to_cpu(w->data->seq); int err = 0; @@ -1642,8 +1628,6 @@ static CLOSURE_CALLBACK(journal_write_done) : j->noflush_write_time, j->write_start_time); if (!w->devs_written.nr) { - if (!bch2_journal_error(j)) - bch_err(c, "unable to write journal to sufficient devices"); err = -BCH_ERR_journal_write_err; } else { bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, @@ -1651,8 +1635,20 @@ static CLOSURE_CALLBACK(journal_write_done) err = bch2_mark_replicas(c, &replicas.e); } - if (err) - bch2_fatal_error(c); + if (err && !bch2_journal_error(j)) { + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + if (err == -BCH_ERR_journal_write_err) + prt_printf(&buf, "unable to write journal to sufficient devices"); + else + prt_printf(&buf, "journal write error marking replicas: %s", bch2_err_str(err)); + + bch2_fs_emergency_read_only2(c, &buf); + + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + } closure_debug_destroy(cl); @@ -1770,7 +1766,7 @@ static void journal_write_endio(struct bio *bio) } closure_put(&w->io); - percpu_ref_put(&ca->io_ref[WRITE]); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_journal_write); } static CLOSURE_CALLBACK(journal_write_submit) @@ -1781,12 +1777,7 @@ static CLOSURE_CALLBACK(journal_write_submit) unsigned sectors = vstruct_sectors(w->data, c->block_bits); extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { - struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); - if (!ca) { - /* XXX: fix this */ - bch_err(c, "missing device %u for journal write", ptr->dev); - continue; - } + struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal], sectors); @@ -1844,8 +1835,9 @@ static CLOSURE_CALLBACK(journal_write_preflush) } if (w->separate_flush) { - for_each_rw_member(c, ca) { - percpu_ref_get(&ca->io_ref[WRITE]); + for_each_rw_member(c, ca, BCH_DEV_WRITE_REF_journal_write) { + enumerated_ref_get(&ca->io_ref[WRITE], + BCH_DEV_WRITE_REF_journal_write); struct journal_device *ja = &ca->journal; struct bio *bio = &ja->bio[w->idx]->bio; @@ -1872,9 +1864,8 @@ static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) struct jset_entry *start, *end; struct jset *jset = w->data; struct journal_keys_to_wb wb = { NULL }; - unsigned sectors, bytes, u64s; + unsigned u64s; unsigned long btree_roots_have = 0; - bool validate_before_checksum = false; u64 seq = le64_to_cpu(jset->seq); int ret; @@ -1957,8 +1948,7 @@ static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) le32_add_cpu(&jset->u64s, u64s); - sectors = vstruct_sectors(jset, c->block_bits); - bytes = vstruct_bytes(jset); + unsigned sectors = vstruct_sectors(jset, c->block_bits); if (sectors > w->sectors) { bch2_fs_fatal_error(c, ": journal write overran available space, %zu > %u (extra %u reserved %u/%u)", @@ -1967,6 +1957,17 @@ static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) return -EINVAL; } + return 0; +} + +static int bch2_journal_write_checksum(struct journal *j, struct journal_buf *w) +{ + struct bch_fs *c = container_of(j, struct bch_fs, journal); + struct jset *jset = w->data; + u64 seq = le64_to_cpu(jset->seq); + bool validate_before_checksum = false; + int ret = 0; + jset->magic = cpu_to_le64(jset_magic(c)); jset->version = cpu_to_le32(c->sb.version); @@ -1989,7 +1990,7 @@ static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset->encrypted_start, vstruct_end(jset) - (void *) jset->encrypted_start); - if (bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret))) + if (bch2_fs_fatal_err_on(ret, c, "encrypting journal entry: %s", bch2_err_str(ret))) return ret; jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), @@ -1999,6 +2000,8 @@ static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) (ret = jset_validate(c, NULL, jset, 0, WRITE))) return ret; + unsigned sectors = vstruct_sectors(jset, c->block_bits); + unsigned bytes = vstruct_bytes(jset); memset((void *) jset + bytes, 0, (sectors << 9) - bytes); return 0; } @@ -2054,13 +2057,10 @@ CLOSURE_CALLBACK(bch2_journal_write) closure_type(w, struct journal_buf, io); struct journal *j = container_of(w, struct journal, buf[w->idx]); struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct bch_replicas_padded replicas; - unsigned nr_rw_members = 0; + union bch_replicas_padded replicas; + unsigned nr_rw_members = dev_mask_nr(&c->rw_devs[BCH_DATA_journal]); int ret; - for_each_rw_member(c, ca) - nr_rw_members++; - BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); BUG_ON(!w->write_started); BUG_ON(w->write_allocated); @@ -2074,7 +2074,8 @@ CLOSURE_CALLBACK(bch2_journal_write) ret = bch2_journal_write_pick_flush(j, w); spin_unlock(&j->lock); - if (ret) + + if (unlikely(ret)) goto err; mutex_lock(&j->buf_lock); @@ -2082,43 +2083,34 @@ CLOSURE_CALLBACK(bch2_journal_write) ret = bch2_journal_write_prep(j, w); mutex_unlock(&j->buf_lock); - if (ret) - goto err; - j->entry_bytes_written += vstruct_bytes(w->data); + if (unlikely(ret)) + goto err; + unsigned replicas_allocated = 0; while (1) { - spin_lock(&j->lock); - ret = journal_write_alloc(j, w); + ret = journal_write_alloc(j, w, &replicas_allocated); if (!ret || !j->can_discard) break; - spin_unlock(&j->lock); bch2_journal_do_discards(j); } - if (ret && !bch2_journal_error(j)) { - struct printbuf buf = PRINTBUF; - buf.atomic++; + if (unlikely(ret)) + goto err_allocate_write; - __bch2_journal_debug_to_text(&buf, j); - spin_unlock(&j->lock); - prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu for %zu sectors: %s"), - le64_to_cpu(w->data->seq), - vstruct_sectors(w->data, c->block_bits), - bch2_err_str(ret)); - bch2_print_string_as_lines(KERN_ERR, buf.buf); - printbuf_exit(&buf); - } - if (ret) + ret = bch2_journal_write_checksum(j, w); + if (unlikely(ret)) goto err; + spin_lock(&j->lock); /* * write is allocated, no longer need to account for it in * bch2_journal_space_available(): */ w->sectors = 0; w->write_allocated = true; + j->entry_bytes_written += vstruct_bytes(w->data); /* * journal entry has been compacted and allocated, recalculate space @@ -2130,9 +2122,6 @@ CLOSURE_CALLBACK(bch2_journal_write) w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); - if (c->opts.nochanges) - goto no_io; - /* * Mark journal replicas before we submit the write to guarantee * recovery will find the journal entries after a crash. @@ -2143,15 +2132,33 @@ CLOSURE_CALLBACK(bch2_journal_write) if (ret) goto err; + if (c->opts.nochanges) + goto no_io; + if (!JSET_NO_FLUSH(w->data)) continue_at(cl, journal_write_preflush, j->wq); else continue_at(cl, journal_write_submit, j->wq); return; -no_io: - continue_at(cl, journal_write_done, j->wq); - return; +err_allocate_write: + if (!bch2_journal_error(j)) { + struct printbuf buf = PRINTBUF; + + bch2_journal_debug_to_text(&buf, j); + prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu for %zu sectors: %s"), + le64_to_cpu(w->data->seq), + vstruct_sectors(w->data, c->block_bits), + bch2_err_str(ret)); + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + } err: bch2_fatal_error(c); +no_io: + extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { + struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev); + enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_journal_write); + } + continue_at(cl, journal_write_done, j->wq); } diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index cc00b0fc40d8..70f36f6bc482 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -215,18 +215,20 @@ void bch2_journal_space_available(struct journal *j) j->can_discard = can_discard; if (nr_online < metadata_replicas_required(c)) { - struct printbuf buf = PRINTBUF; - buf.atomic++; - prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n" - "rw journal devs:", nr_online, metadata_replicas_required(c)); - - rcu_read_lock(); - for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) - prt_printf(&buf, " %s", ca->name); - rcu_read_unlock(); - - bch_err(c, "%s", buf.buf); - printbuf_exit(&buf); + if (!(c->sb.features & BIT_ULL(BCH_FEATURE_small_image))) { + struct printbuf buf = PRINTBUF; + buf.atomic++; + prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n" + "rw journal devs:", nr_online, metadata_replicas_required(c)); + + rcu_read_lock(); + for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) + prt_printf(&buf, " %s", ca->name); + rcu_read_unlock(); + + bch_err(c, "%s", buf.buf); + printbuf_exit(&buf); + } ret = -BCH_ERR_insufficient_journal_devices; goto out; } @@ -293,12 +295,12 @@ void bch2_journal_do_discards(struct journal *j) mutex_lock(&j->discard_lock); - for_each_rw_member(c, ca) { + for_each_rw_member(c, ca, BCH_DEV_WRITE_REF_journal_do_discards) { struct journal_device *ja = &ca->journal; while (should_discard_bucket(j, ja)) { if (!c->opts.nochanges && - ca->mi.discard && + bch2_discard_opt_enabled(c, ca) && bdev_max_discard_sectors(ca->disk_sb.bdev)) blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, @@ -625,7 +627,8 @@ static u64 journal_seq_to_flush(struct journal *j) spin_lock(&j->lock); - for_each_rw_member(c, ca) { + rcu_read_lock(); + for_each_rw_member_rcu(c, ca) { struct journal_device *ja = &ca->journal; unsigned nr_buckets, bucket_to_flush; @@ -635,12 +638,11 @@ static u64 journal_seq_to_flush(struct journal *j) /* Try to keep the journal at most half full: */ nr_buckets = ja->nr / 2; - nr_buckets = min(nr_buckets, ja->nr); - bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr; seq_to_flush = max(seq_to_flush, ja->bucket_seq[bucket_to_flush]); } + rcu_read_unlock(); /* Also flush if the pin fifo is more than half full */ seq_to_flush = max_t(s64, seq_to_flush, @@ -699,6 +701,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) if (ret) break; + /* XXX shove journal discards off to another thread */ bch2_journal_do_discards(j); seq_to_flush = journal_seq_to_flush(j); @@ -961,7 +964,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) seq = 0; spin_lock(&j->lock); while (!ret) { - struct bch_replicas_padded replicas; + union bch_replicas_padded replicas; seq = max(seq, journal_last_seq(j)); if (seq >= j->pin.back) diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c index e463d2d95359..c5a7d800a0f5 100644 --- a/fs/bcachefs/journal_seq_blacklist.c +++ b/fs/bcachefs/journal_seq_blacklist.c @@ -130,6 +130,16 @@ bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq, return true; } +u64 bch2_journal_last_blacklisted_seq(struct bch_fs *c) +{ + struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table; + + if (!t || !t->nr) + return 0; + + return t->entries[eytzinger0_last(t->nr)].end - 1; +} + int bch2_blacklist_table_initialize(struct bch_fs *c) { struct bch_sb_field_journal_seq_blacklist *bl = diff --git a/fs/bcachefs/journal_seq_blacklist.h b/fs/bcachefs/journal_seq_blacklist.h index d47636f96fdc..f06942ccfcdd 100644 --- a/fs/bcachefs/journal_seq_blacklist.h +++ b/fs/bcachefs/journal_seq_blacklist.h @@ -12,6 +12,7 @@ blacklist_nr_entries(struct bch_sb_field_journal_seq_blacklist *bl) } bool bch2_journal_seq_is_blacklisted(struct bch_fs *, u64, bool); +u64 bch2_journal_last_blacklisted_seq(struct bch_fs *); int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64, u64); int bch2_blacklist_table_initialize(struct bch_fs *); diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h index 8e0eba776b9d..51104bbb99da 100644 --- a/fs/bcachefs/journal_types.h +++ b/fs/bcachefs/journal_types.h @@ -151,8 +151,6 @@ enum journal_flags { #undef x }; -typedef DARRAY(u64) darray_u64; - struct journal_bio { struct bch_dev *ca; unsigned buf_idx; diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c index 90dcf80bd64a..bb7a92270c09 100644 --- a/fs/bcachefs/migrate.c +++ b/fs/bcachefs/migrate.c @@ -4,10 +4,13 @@ */ #include "bcachefs.h" +#include "backpointers.h" #include "bkey_buf.h" #include "btree_update.h" #include "btree_update_interior.h" +#include "btree_write_buffer.h" #include "buckets.h" +#include "ec.h" #include "errcode.h" #include "extents.h" #include "io_write.h" @@ -20,7 +23,7 @@ #include "super-io.h" static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k, - unsigned dev_idx, int flags, bool metadata) + unsigned dev_idx, unsigned flags, bool metadata) { unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas; unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST; @@ -37,11 +40,28 @@ static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k, return 0; } +static int drop_btree_ptrs(struct btree_trans *trans, struct btree_iter *iter, + struct btree *b, unsigned dev_idx, unsigned flags) +{ + struct bch_fs *c = trans->c; + struct bkey_buf k; + + bch2_bkey_buf_init(&k); + bch2_bkey_buf_copy(&k, c, &b->key); + + int ret = drop_dev_ptrs(c, bkey_i_to_s(k.k), dev_idx, flags, true) ?: + bch2_btree_node_update_key(trans, iter, b, k.k, 0, false); + + bch_err_fn(c, ret); + bch2_bkey_buf_exit(&k, c); + return ret; +} + static int bch2_dev_usrdata_drop_key(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, unsigned dev_idx, - int flags) + unsigned flags) { struct bch_fs *c = trans->c; struct bkey_i *n; @@ -77,9 +97,27 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans, return 0; } +static int bch2_dev_btree_drop_key(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, + unsigned dev_idx, + struct bkey_buf *last_flushed, + unsigned flags) +{ + struct btree_iter iter; + struct btree *b = bch2_backpointer_get_node(trans, bp, &iter, last_flushed); + int ret = PTR_ERR_OR_ZERO(b); + if (ret) + return ret == -BCH_ERR_backpointer_to_overwritten_btree_node ? 0 : ret; + + ret = drop_btree_ptrs(trans, &iter, b, dev_idx, flags); + + bch2_trans_iter_exit(trans, &iter); + return ret; +} + static int bch2_dev_usrdata_drop(struct bch_fs *c, struct progress_indicator_state *progress, - unsigned dev_idx, int flags) + unsigned dev_idx, unsigned flags) { struct btree_trans *trans = bch2_trans_get(c); enum btree_id id; @@ -106,7 +144,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, static int bch2_dev_metadata_drop(struct bch_fs *c, struct progress_indicator_state *progress, - unsigned dev_idx, int flags) + unsigned dev_idx, unsigned flags) { struct btree_trans *trans; struct btree_iter iter; @@ -137,20 +175,12 @@ retry: if (!bch2_bkey_has_device_c(bkey_i_to_s_c(&b->key), dev_idx)) goto next; - bch2_bkey_buf_copy(&k, c, &b->key); - - ret = drop_dev_ptrs(c, bkey_i_to_s(k.k), - dev_idx, flags, true); - if (ret) - break; - - ret = bch2_btree_node_update_key(trans, &iter, b, k.k, 0, false); + ret = drop_btree_ptrs(trans, &iter, b, dev_idx, flags); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { ret = 0; continue; } - bch_err_msg(c, ret, "updating btree node key"); if (ret) break; next: @@ -176,7 +206,66 @@ err: return ret; } -int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags) +static int data_drop_bp(struct btree_trans *trans, unsigned dev_idx, + struct bkey_s_c_backpointer bp, struct bkey_buf *last_flushed, + unsigned flags) +{ + struct btree_iter iter; + struct bkey_s_c k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, + last_flushed); + int ret = bkey_err(k); + if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) + return 0; + if (ret) + return ret; + + if (!k.k || !bch2_bkey_has_device_c(k, dev_idx)) + goto out; + + /* + * XXX: pass flags arg to invalidate_stripe_to_dev and handle it + * properly + */ + + if (bkey_is_btree_ptr(k.k)) + ret = bch2_dev_btree_drop_key(trans, bp, dev_idx, last_flushed, flags); + else if (k.k->type == KEY_TYPE_stripe) + ret = bch2_invalidate_stripe_to_dev(trans, &iter, k, dev_idx, flags); + else + ret = bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags); +out: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +int bch2_dev_data_drop_by_backpointers(struct bch_fs *c, unsigned dev_idx, unsigned flags) +{ + struct btree_trans *trans = bch2_trans_get(c); + + struct bkey_buf last_flushed; + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + + int ret = bch2_btree_write_buffer_flush_sync(trans) ?: + for_each_btree_key_max_commit(trans, iter, BTREE_ID_backpointers, + POS(dev_idx, 0), + POS(dev_idx, U64_MAX), 0, k, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + if (k.k->type != KEY_TYPE_backpointer) + continue; + + data_drop_bp(trans, dev_idx, bkey_s_c_to_backpointer(k), + &last_flushed, flags); + + })); + + bch2_bkey_buf_exit(&last_flushed, trans->c); + bch2_trans_put(trans); + bch_err_fn(c, ret); + return ret; +} + +int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, unsigned flags) { struct progress_indicator_state progress; bch2_progress_init(&progress, c, diff --git a/fs/bcachefs/migrate.h b/fs/bcachefs/migrate.h index 027efaa0d575..30018140711b 100644 --- a/fs/bcachefs/migrate.h +++ b/fs/bcachefs/migrate.h @@ -2,6 +2,7 @@ #ifndef _BCACHEFS_MIGRATE_H #define _BCACHEFS_MIGRATE_H -int bch2_dev_data_drop(struct bch_fs *, unsigned, int); +int bch2_dev_data_drop_by_backpointers(struct bch_fs *, unsigned, unsigned); +int bch2_dev_data_drop(struct bch_fs *, unsigned, unsigned); #endif /* _BCACHEFS_MIGRATE_H */ diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index dfdbb9259985..79f4722621d5 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -67,7 +67,7 @@ static void trace_io_move_read2(struct bch_fs *c, struct bkey_s_c k) struct moving_io { struct list_head read_list; struct list_head io_list; - struct move_bucket_in_flight *b; + struct move_bucket *b; struct closure cl; bool read_completed; @@ -109,7 +109,6 @@ static void move_write_done(struct bch_write_op *op) struct printbuf buf = PRINTBUF; bch2_write_op_to_text(&buf, op); - prt_printf(&buf, "ret\t%s\n", bch2_err_str(op->error)); trace_io_move_write_fail(c, buf.buf); printbuf_exit(&buf); } @@ -126,26 +125,40 @@ static void move_write_done(struct bch_write_op *op) static void move_write(struct moving_io *io) { + struct bch_fs *c = io->write.op.c; struct moving_context *ctxt = io->write.ctxt; + struct bch_read_bio *rbio = &io->write.rbio; if (ctxt->stats) { - if (io->write.rbio.bio.bi_status) + if (rbio->bio.bi_status) atomic64_add(io->write.rbio.bvec_iter.bi_size >> 9, &ctxt->stats->sectors_error_uncorrected); - else if (io->write.rbio.saw_error) + else if (rbio->saw_error) atomic64_add(io->write.rbio.bvec_iter.bi_size >> 9, &ctxt->stats->sectors_error_corrected); } - if (unlikely(io->write.rbio.ret || - io->write.rbio.bio.bi_status || - io->write.data_opts.scrub)) { + /* + * If the extent has been bitrotted, we're going to have to give it a + * new checksum in order to move it - but the poison bit will ensure + * that userspace still gets the appropriate error. + */ + if (unlikely(rbio->ret == -BCH_ERR_data_read_csum_err && + (bch2_bkey_extent_flags(bkey_i_to_s_c(io->write.k.k)) & BIT_ULL(BCH_EXTENT_FLAG_poisoned)))) { + struct bch_extent_crc_unpacked crc = rbio->pick.crc; + struct nonce nonce = extent_nonce(rbio->version, crc); + + rbio->pick.crc.csum = bch2_checksum_bio(c, rbio->pick.crc.csum_type, + nonce, &rbio->bio); + rbio->ret = 0; + } + + if (unlikely(rbio->ret || io->write.data_opts.scrub)) { move_free(io); return; } if (trace_io_move_write_enabled()) { - struct bch_fs *c = io->write.op.c; struct printbuf buf = PRINTBUF; bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k)); @@ -275,7 +288,7 @@ void bch2_move_stats_init(struct bch_move_stats *stats, const char *name) } int bch2_move_extent(struct moving_context *ctxt, - struct move_bucket_in_flight *bucket_in_flight, + struct move_bucket *bucket_in_flight, struct btree_iter *iter, struct bkey_s_c k, struct bch_io_opts io_opts, @@ -398,7 +411,7 @@ err: return ret; } -static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, +struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, struct per_snapshot_io_opts *io_opts, struct bpos extent_pos, /* extent_iter, extent_k may be in reflink btree */ struct btree_iter *extent_iter, @@ -409,6 +422,9 @@ static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, struct bch_io_opts *opts_ret = &io_opts->fs_io_opts; int ret = 0; + if (extent_iter->min_depth) + return opts_ret; + if (extent_k.k->type == KEY_TYPE_reflink_v) goto out; @@ -559,11 +575,11 @@ static struct bkey_s_c bch2_lookup_indirect_extent_for_move(struct btree_trans * return k; } -static int bch2_move_data_btree(struct moving_context *ctxt, - struct bpos start, - struct bpos end, - move_pred_fn pred, void *arg, - enum btree_id btree_id) +int bch2_move_data_btree(struct moving_context *ctxt, + struct bpos start, + struct bpos end, + move_pred_fn pred, void *arg, + enum btree_id btree_id, unsigned level) { struct btree_trans *trans = ctxt->trans; struct bch_fs *c = trans->c; @@ -589,11 +605,56 @@ static int bch2_move_data_btree(struct moving_context *ctxt, ctxt->stats->pos = BBPOS(btree_id, start); } +retry_root: bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &iter, btree_id, start, - BTREE_ITER_prefetch| - BTREE_ITER_not_extents| - BTREE_ITER_all_snapshots); + + if (level == bch2_btree_id_root(c, btree_id)->level + 1) { + bch2_trans_node_iter_init(trans, &iter, btree_id, start, 0, level - 1, + BTREE_ITER_prefetch| + BTREE_ITER_not_extents| + BTREE_ITER_all_snapshots); + struct btree *b = bch2_btree_iter_peek_node(trans, &iter); + ret = PTR_ERR_OR_ZERO(b); + if (ret) + goto root_err; + + if (b != btree_node_root(c, b)) { + bch2_trans_iter_exit(trans, &iter); + goto retry_root; + } + + k = bkey_i_to_s_c(&b->key); + + io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, + iter.pos, &iter, k); + ret = PTR_ERR_OR_ZERO(io_opts); + if (ret) + goto root_err; + + memset(&data_opts, 0, sizeof(data_opts)); + if (!pred(c, arg, iter.btree_id, k, io_opts, &data_opts)) + goto out; + + + if (!data_opts.scrub) + ret = bch2_btree_node_rewrite_pos(trans, btree_id, level, + k.k->p, data_opts.target, 0); + else + ret = bch2_btree_node_scrub(trans, btree_id, level, k, data_opts.read_dev); + +root_err: + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { + bch2_trans_iter_exit(trans, &iter); + goto retry_root; + } + + goto out; + } + + bch2_trans_node_iter_init(trans, &iter, btree_id, start, 0, level, + BTREE_ITER_prefetch| + BTREE_ITER_not_extents| + BTREE_ITER_all_snapshots); if (ctxt->rate) bch2_ratelimit_reset(ctxt->rate); @@ -613,7 +674,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt, if (ret) break; - if (bkey_ge(bkey_start_pos(k.k), end)) + if (bkey_gt(bkey_start_pos(k.k), end)) break; if (ctxt->stats) @@ -653,7 +714,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt, continue; memset(&data_opts, 0, sizeof(data_opts)); - if (!pred(c, arg, k, io_opts, &data_opts)) + if (!pred(c, arg, extent_iter->btree_id, k, io_opts, &data_opts)) goto next; /* @@ -663,7 +724,14 @@ static int bch2_move_data_btree(struct moving_context *ctxt, bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret2 = bch2_move_extent(ctxt, NULL, extent_iter, k, *io_opts, data_opts); + if (!level) + ret2 = bch2_move_extent(ctxt, NULL, extent_iter, k, *io_opts, data_opts); + else if (!data_opts.scrub) + ret2 = bch2_btree_node_rewrite_pos(trans, btree_id, level, + k.k->p, data_opts.target, 0); + else + ret2 = bch2_btree_node_scrub(trans, btree_id, level, k, data_opts.read_dev); + if (ret2) { if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) continue; @@ -681,9 +749,10 @@ next: if (ctxt->stats) atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: - bch2_btree_iter_advance(trans, &iter); + if (!bch2_btree_iter_advance(trans, &iter)) + break; } - +out: bch2_trans_iter_exit(trans, &reflink_iter); bch2_trans_iter_exit(trans, &iter); bch2_bkey_buf_exit(&sk, c); @@ -713,7 +782,7 @@ int __bch2_move_data(struct moving_context *ctxt, ret = bch2_move_data_btree(ctxt, id == start.btree ? start.pos : POS_MIN, id == end.btree ? end.pos : POS_MAX, - pred, arg, id); + pred, arg, id, 0); if (ret) break; } @@ -740,11 +809,12 @@ int bch2_move_data(struct bch_fs *c, } static int __bch2_move_data_phys(struct moving_context *ctxt, - struct move_bucket_in_flight *bucket_in_flight, + struct move_bucket *bucket_in_flight, unsigned dev, u64 bucket_start, u64 bucket_end, unsigned data_types, + bool copygc, move_pred_fn pred, void *arg) { struct btree_trans *trans = ctxt->trans; @@ -755,6 +825,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, struct bkey_buf sk; struct bkey_s_c k; struct bkey_buf last_flushed; + u64 check_mismatch_done = bucket_start; int ret = 0; struct bch_dev *ca = bch2_dev_tryget(c, dev); @@ -765,8 +836,6 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, struct bpos bp_start = bucket_pos_to_bp_start(ca, POS(dev, bucket_start)); struct bpos bp_end = bucket_pos_to_bp_end(ca, POS(dev, bucket_end)); - bch2_dev_put(ca); - ca = NULL; bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); @@ -779,10 +848,6 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_start, 0); - bch_err_msg(c, ret, "looking up alloc key"); - if (ret) - goto err; - ret = bch2_btree_write_buffer_tryflush(trans); if (!bch2_err_matches(ret, EROFS)) bch_err_msg(c, ret, "flushing btree write buffer"); @@ -805,6 +870,14 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, if (!k.k || bkey_gt(k.k->p, bp_end)) break; + if (check_mismatch_done < bp_pos_to_bucket(ca, k.k->p).offset) { + while (check_mismatch_done < bp_pos_to_bucket(ca, k.k->p).offset) { + bch2_check_bucket_backpointer_mismatch(trans, ca, check_mismatch_done++, + copygc, &last_flushed); + } + continue; + } + if (k.k->type != KEY_TYPE_backpointer) goto next; @@ -837,7 +910,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, } struct data_update_opts data_opts = {}; - if (!pred(c, arg, k, &io_opts, &data_opts)) { + if (!pred(c, arg, bp.v->btree_id, k, &io_opts, &data_opts)) { bch2_trans_iter_exit(trans, &iter); goto next; } @@ -858,7 +931,8 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, if (!bp.v->level) ret = bch2_move_extent(ctxt, bucket_in_flight, &iter, k, io_opts, data_opts); else if (!data_opts.scrub) - ret = bch2_btree_node_rewrite_pos(trans, bp.v->btree_id, bp.v->level, k.k->p, 0); + ret = bch2_btree_node_rewrite_pos(trans, bp.v->btree_id, bp.v->level, + k.k->p, data_opts.target, 0); else ret = bch2_btree_node_scrub(trans, bp.v->btree_id, bp.v->level, k, data_opts.read_dev); @@ -879,33 +953,41 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, next: bch2_btree_iter_advance(trans, &bp_iter); } + + while (check_mismatch_done < bucket_end) + bch2_check_bucket_backpointer_mismatch(trans, ca, check_mismatch_done++, + copygc, &last_flushed); err: bch2_trans_iter_exit(trans, &bp_iter); bch2_bkey_buf_exit(&sk, c); bch2_bkey_buf_exit(&last_flushed, c); + bch2_dev_put(ca); return ret; } -static int bch2_move_data_phys(struct bch_fs *c, - unsigned dev, - u64 start, - u64 end, - unsigned data_types, - struct bch_ratelimit *rate, - struct bch_move_stats *stats, - struct write_point_specifier wp, - bool wait_on_copygc, - move_pred_fn pred, void *arg) +int bch2_move_data_phys(struct bch_fs *c, + unsigned dev, + u64 start, + u64 end, + unsigned data_types, + struct bch_ratelimit *rate, + struct bch_move_stats *stats, + struct write_point_specifier wp, + bool wait_on_copygc, + move_pred_fn pred, void *arg) { struct moving_context ctxt; bch2_trans_run(c, bch2_btree_write_buffer_flush_sync(trans)); bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); - ctxt.stats->phys = true; - ctxt.stats->data_type = (int) DATA_PROGRESS_DATA_TYPE_phys; + if (ctxt.stats) { + ctxt.stats->phys = true; + ctxt.stats->data_type = (int) DATA_PROGRESS_DATA_TYPE_phys; + } - int ret = __bch2_move_data_phys(&ctxt, NULL, dev, start, end, data_types, pred, arg); + int ret = __bch2_move_data_phys(&ctxt, NULL, dev, start, end, + data_types, false, pred, arg); bch2_moving_ctxt_exit(&ctxt); return ret; @@ -917,7 +999,8 @@ struct evacuate_bucket_arg { struct data_update_opts data_opts; }; -static bool evacuate_bucket_pred(struct bch_fs *c, void *_arg, struct bkey_s_c k, +static bool evacuate_bucket_pred(struct bch_fs *c, void *_arg, + enum btree_id btree, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { @@ -938,9 +1021,9 @@ static bool evacuate_bucket_pred(struct bch_fs *c, void *_arg, struct bkey_s_c k } int bch2_evacuate_bucket(struct moving_context *ctxt, - struct move_bucket_in_flight *bucket_in_flight, - struct bpos bucket, int gen, - struct data_update_opts data_opts) + struct move_bucket *bucket_in_flight, + struct bpos bucket, int gen, + struct data_update_opts data_opts) { struct evacuate_bucket_arg arg = { bucket, gen, data_opts, }; @@ -949,6 +1032,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, bucket.offset, bucket.offset + 1, ~0, + true, evacuate_bucket_pred, &arg); } @@ -1006,7 +1090,7 @@ retry: if (!pred(c, arg, b, &io_opts, &data_opts)) goto next; - ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret; + ret = bch2_btree_node_rewrite(trans, &iter, b, 0, 0) ?: ret; if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) @@ -1031,7 +1115,7 @@ next: } static bool rereplicate_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, + enum btree_id btree, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { @@ -1063,7 +1147,7 @@ static bool rereplicate_pred(struct bch_fs *c, void *arg, } static bool migrate_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, + enum btree_id btree, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { @@ -1090,7 +1174,7 @@ static bool rereplicate_btree_pred(struct bch_fs *c, void *arg, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { - return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts); + return rereplicate_pred(c, arg, b->c.btree_id, bkey_i_to_s_c(&b->key), io_opts, data_opts); } /* @@ -1146,7 +1230,7 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) } static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, + enum btree_id btree, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { @@ -1179,11 +1263,12 @@ static bool drop_extra_replicas_btree_pred(struct bch_fs *c, void *arg, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { - return drop_extra_replicas_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts); + return drop_extra_replicas_pred(c, arg, b->c.btree_id, bkey_i_to_s_c(&b->key), + io_opts, data_opts); } static bool scrub_pred(struct bch_fs *c, void *_arg, - struct bkey_s_c k, + enum btree_id btree, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h index 51e0505a8156..86b80499ac55 100644 --- a/fs/bcachefs/move.h +++ b/fs/bcachefs/move.h @@ -72,7 +72,7 @@ do { \ break; \ } while (1) -typedef bool (*move_pred_fn)(struct bch_fs *, void *, struct bkey_s_c, +typedef bool (*move_pred_fn)(struct bch_fs *, void *, enum btree_id, struct bkey_s_c, struct bch_io_opts *, struct data_update_opts *); extern const char * const bch2_data_ops_strs[]; @@ -116,12 +116,18 @@ int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *); int bch2_move_extent(struct moving_context *, - struct move_bucket_in_flight *, + struct move_bucket *, struct btree_iter *, struct bkey_s_c, struct bch_io_opts, struct data_update_opts); +struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *, + struct per_snapshot_io_opts *, struct bpos, + struct btree_iter *, struct bkey_s_c); + +int bch2_move_data_btree(struct moving_context *, struct bpos, struct bpos, + move_pred_fn, void *, enum btree_id, unsigned); int __bch2_move_data(struct moving_context *, struct bbpos, struct bbpos, @@ -135,8 +141,13 @@ int bch2_move_data(struct bch_fs *, bool, move_pred_fn, void *); +int bch2_move_data_phys(struct bch_fs *, unsigned, u64, u64, unsigned, + struct bch_ratelimit *, struct bch_move_stats *, + struct write_point_specifier, bool, + move_pred_fn, void *); + int bch2_evacuate_bucket(struct moving_context *, - struct move_bucket_in_flight *, + struct move_bucket *, struct bpos, int, struct data_update_opts); int bch2_data_job(struct bch_fs *, diff --git a/fs/bcachefs/move_types.h b/fs/bcachefs/move_types.h index 807f779f6f76..c5c62cd600de 100644 --- a/fs/bcachefs/move_types.h +++ b/fs/bcachefs/move_types.h @@ -36,14 +36,10 @@ struct move_bucket_key { }; struct move_bucket { + struct move_bucket *next; + struct rhash_head hash; struct move_bucket_key k; unsigned sectors; -}; - -struct move_bucket_in_flight { - struct move_bucket_in_flight *next; - struct rhash_head hash; - struct move_bucket bucket; atomic_t count; }; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 96873372b516..e7a2a13554d7 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -8,6 +8,7 @@ #include "bcachefs.h" #include "alloc_background.h" #include "alloc_foreground.h" +#include "backpointers.h" #include "btree_iter.h" #include "btree_update.h" #include "btree_write_buffer.h" @@ -27,47 +28,32 @@ #include <linux/wait.h> struct buckets_in_flight { - struct rhashtable table; - struct move_bucket_in_flight *first; - struct move_bucket_in_flight *last; - size_t nr; - size_t sectors; + struct rhashtable table; + struct move_bucket *first; + struct move_bucket *last; + size_t nr; + size_t sectors; + + DARRAY(struct move_bucket *) to_evacuate; }; static const struct rhashtable_params bch_move_bucket_params = { - .head_offset = offsetof(struct move_bucket_in_flight, hash), - .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), + .head_offset = offsetof(struct move_bucket, hash), + .key_offset = offsetof(struct move_bucket, k), .key_len = sizeof(struct move_bucket_key), .automatic_shrinking = true, }; -static struct move_bucket_in_flight * -move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b) +static void move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket *b) { - struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL); - int ret; - - if (!new) - return ERR_PTR(-ENOMEM); - - new->bucket = b; - - ret = rhashtable_lookup_insert_fast(&list->table, &new->hash, - bch_move_bucket_params); - if (ret) { - kfree(new); - return ERR_PTR(ret); - } - if (!list->first) - list->first = new; + list->first = b; else - list->last->next = new; + list->last->next = b; - list->last = new; + list->last = b; list->nr++; - list->sectors += b.sectors; - return new; + list->sectors += b->sectors; } static int bch2_bucket_is_movable(struct btree_trans *trans, @@ -89,9 +75,12 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, if (!ca) goto out; + if (bch2_bucket_bitmap_test(&ca->bucket_backpointer_mismatch, b->k.bucket.offset)) + goto out; + if (ca->mi.state != BCH_MEMBER_STATE_rw || !bch2_dev_is_online(ca)) - goto out_put; + goto out; struct bch_alloc_v4 _a; const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); @@ -100,19 +89,26 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); ret = lru_idx && lru_idx <= time; -out_put: - bch2_dev_put(ca); out: + bch2_dev_put(ca); bch2_trans_iter_exit(trans, &iter); return ret; } +static void move_bucket_free(struct buckets_in_flight *list, + struct move_bucket *b) +{ + int ret = rhashtable_remove_fast(&list->table, &b->hash, + bch_move_bucket_params); + BUG_ON(ret); + kfree(b); +} + static void move_buckets_wait(struct moving_context *ctxt, struct buckets_in_flight *list, bool flush) { - struct move_bucket_in_flight *i; - int ret; + struct move_bucket *i; while ((i = list->first)) { if (flush) @@ -126,12 +122,9 @@ static void move_buckets_wait(struct moving_context *ctxt, list->last = NULL; list->nr--; - list->sectors -= i->bucket.sectors; + list->sectors -= i->sectors; - ret = rhashtable_remove_fast(&list->table, &i->hash, - bch_move_bucket_params); - BUG_ON(ret); - kfree(i); + move_bucket_free(list, i); } bch2_trans_unlock_long(ctxt->trans); @@ -143,11 +136,8 @@ static bool bucket_in_flight(struct buckets_in_flight *list, return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params); } -typedef DARRAY(struct move_bucket) move_buckets; - static int bch2_copygc_get_buckets(struct moving_context *ctxt, - struct buckets_in_flight *buckets_in_flight, - move_buckets *buckets) + struct buckets_in_flight *buckets_in_flight) { struct btree_trans *trans = ctxt->trans; struct bch_fs *c = trans->c; @@ -164,8 +154,6 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret))) return ret; - bch2_trans_begin(trans); - ret = for_each_btree_key_max(trans, iter, BTREE_ID_lru, lru_pos(BCH_LRU_BUCKET_FRAGMENTATION, 0, 0), lru_pos(BCH_LRU_BUCKET_FRAGMENTATION, U64_MAX, LRU_TIME_MAX), @@ -184,20 +172,34 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, else if (bucket_in_flight(buckets_in_flight, b.k)) in_flight++; else { - ret2 = darray_push(buckets, b); + struct move_bucket *b_i = kmalloc(sizeof(*b_i), GFP_KERNEL); + ret2 = b_i ? 0 : -ENOMEM; if (ret2) goto err; + + *b_i = b; + + ret2 = darray_push(&buckets_in_flight->to_evacuate, b_i); + if (ret2) { + kfree(b_i); + goto err; + } + + ret2 = rhashtable_lookup_insert_fast(&buckets_in_flight->table, &b_i->hash, + bch_move_bucket_params); + BUG_ON(ret2); + sectors += b.sectors; } - ret2 = buckets->nr >= nr_to_get; + ret2 = buckets_in_flight->to_evacuate.nr >= nr_to_get; err: ret2; })); pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i", buckets_in_flight->nr, buckets_in_flight->sectors, - saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret); + saw, in_flight, not_movable, buckets_in_flight->to_evacuate.nr, sectors, nr_to_get, ret); return ret < 0 ? ret : 0; } @@ -212,40 +214,30 @@ static int bch2_copygc(struct moving_context *ctxt, struct data_update_opts data_opts = { .btree_insert_flags = BCH_WATERMARK_copygc, }; - move_buckets buckets = { 0 }; - struct move_bucket_in_flight *f; u64 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen); u64 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved); int ret = 0; - ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets); + ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight); if (ret) goto err; - darray_for_each(buckets, i) { + darray_for_each(buckets_in_flight->to_evacuate, i) { if (kthread_should_stop() || freezing(current)) break; - f = move_bucket_in_flight_add(buckets_in_flight, *i); - ret = PTR_ERR_OR_ZERO(f); - if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */ - ret = 0; - continue; - } - if (ret == -ENOMEM) { /* flush IO, continue later */ - ret = 0; - break; - } + struct move_bucket *b = *i; + *i = NULL; + + move_bucket_in_flight_add(buckets_in_flight, b); - ret = bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket, - f->bucket.k.gen, data_opts); + ret = bch2_evacuate_bucket(ctxt, b, b->k.bucket, b->k.gen, data_opts); if (ret) goto err; *did_work = true; } err: - /* no entries in LRU btree found, or got to end: */ if (bch2_err_matches(ret, ENOENT)) ret = 0; @@ -255,12 +247,34 @@ err: sectors_seen = atomic64_read(&ctxt->stats->sectors_seen) - sectors_seen; sectors_moved = atomic64_read(&ctxt->stats->sectors_moved) - sectors_moved; - trace_and_count(c, copygc, c, buckets.nr, sectors_seen, sectors_moved); + trace_and_count(c, copygc, c, buckets_in_flight->to_evacuate.nr, sectors_seen, sectors_moved); - darray_exit(&buckets); + darray_for_each(buckets_in_flight->to_evacuate, i) + if (*i) + move_bucket_free(buckets_in_flight, *i); + darray_exit(&buckets_in_flight->to_evacuate); return ret; } +static u64 bch2_copygc_dev_wait_amount(struct bch_dev *ca) +{ + struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca); + struct bch_dev_usage usage; + + for (unsigned i = 0; i < BCH_DATA_NR; i++) + usage.buckets[i] = usage_full.d[i].buckets; + + s64 fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) * + ca->mi.bucket_size) >> 1); + s64 fragmented = 0; + + for (unsigned i = 0; i < BCH_DATA_NR; i++) + if (data_type_movable(i)) + fragmented += usage_full.d[i].fragmented; + + return max(0LL, fragmented_allowed - fragmented); +} + /* * Copygc runs when the amount of fragmented data is above some arbitrary * threshold: @@ -275,27 +289,14 @@ err: * often and continually reduce the amount of fragmented space as the device * fills up. So, we increase the threshold by half the current free space. */ -unsigned long bch2_copygc_wait_amount(struct bch_fs *c) +u64 bch2_copygc_wait_amount(struct bch_fs *c) { - s64 wait = S64_MAX, fragmented_allowed, fragmented; - - for_each_rw_member(c, ca) { - struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca); - struct bch_dev_usage usage; - - for (unsigned i = 0; i < BCH_DATA_NR; i++) - usage.buckets[i] = usage_full.d[i].buckets; - - fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) * - ca->mi.bucket_size) >> 1); - fragmented = 0; + u64 wait = U64_MAX; - for (unsigned i = 0; i < BCH_DATA_NR; i++) - if (data_type_movable(i)) - fragmented += usage_full.d[i].fragmented; - - wait = min(wait, max(0LL, fragmented_allowed - fragmented)); - } + rcu_read_lock(); + for_each_rw_member_rcu(c, ca) + wait = min(wait, bch2_copygc_dev_wait_amount(ca)); + rcu_read_unlock(); return wait; } @@ -318,14 +319,22 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c) c->copygc_wait_at) << 9); prt_newline(out); - prt_printf(out, "Currently calculated wait:\t"); - prt_human_readable_u64(out, bch2_copygc_wait_amount(c)); - prt_newline(out); + bch2_printbuf_make_room(out, 4096); rcu_read_lock(); + out->atomic++; + + prt_printf(out, "Currently calculated wait:\n"); + for_each_rw_member_rcu(c, ca) { + prt_printf(out, " %s:\t", ca->name); + prt_human_readable_u64(out, bch2_copygc_dev_wait_amount(ca)); + prt_newline(out); + } + struct task_struct *t = rcu_dereference(c->copygc_thread); if (t) get_task_struct(t); + --out->atomic; rcu_read_unlock(); if (t) { @@ -340,19 +349,13 @@ static int bch2_copygc_thread(void *arg) struct moving_context ctxt; struct bch_move_stats move_stats; struct io_clock *clock = &c->io_clock[WRITE]; - struct buckets_in_flight *buckets; + struct buckets_in_flight buckets = {}; u64 last, wait; - int ret = 0; - buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL); - if (!buckets) - return -ENOMEM; - ret = rhashtable_init(&buckets->table, &bch_move_bucket_params); + int ret = rhashtable_init(&buckets.table, &bch_move_bucket_params); bch_err_msg(c, ret, "allocating copygc buckets in flight"); - if (ret) { - kfree(buckets); + if (ret) return ret; - } set_freezable(); @@ -360,7 +363,7 @@ static int bch2_copygc_thread(void *arg) * Data move operations can't run until after check_snapshots has * completed, and bch2_snapshot_is_ancestor() is available. */ - kthread_wait_freezable(c->recovery_pass_done > BCH_RECOVERY_PASS_check_snapshots || + kthread_wait_freezable(c->recovery.pass_done > BCH_RECOVERY_PASS_check_snapshots || kthread_should_stop()); bch2_move_stats_init(&move_stats, "copygc"); @@ -375,13 +378,13 @@ static int bch2_copygc_thread(void *arg) cond_resched(); if (!c->opts.copygc_enabled) { - move_buckets_wait(&ctxt, buckets, true); + move_buckets_wait(&ctxt, &buckets, true); kthread_wait_freezable(c->opts.copygc_enabled || kthread_should_stop()); } if (unlikely(freezing(current))) { - move_buckets_wait(&ctxt, buckets, true); + move_buckets_wait(&ctxt, &buckets, true); __refrigerator(false); continue; } @@ -392,7 +395,7 @@ static int bch2_copygc_thread(void *arg) if (wait > clock->max_slop) { c->copygc_wait_at = last; c->copygc_wait = last + wait; - move_buckets_wait(&ctxt, buckets, true); + move_buckets_wait(&ctxt, &buckets, true); trace_and_count(c, copygc_wait, c, wait, last + wait); bch2_kthread_io_clock_wait(clock, last + wait, MAX_SCHEDULE_TIMEOUT); @@ -402,7 +405,7 @@ static int bch2_copygc_thread(void *arg) c->copygc_wait = 0; c->copygc_running = true; - ret = bch2_copygc(&ctxt, buckets, &did_work); + ret = bch2_copygc(&ctxt, &buckets, &did_work); c->copygc_running = false; wake_up(&c->copygc_running_wq); @@ -413,16 +416,14 @@ static int bch2_copygc_thread(void *arg) if (min_member_capacity == U64_MAX) min_member_capacity = 128 * 2048; - move_buckets_wait(&ctxt, buckets, true); + move_buckets_wait(&ctxt, &buckets, true); bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6), MAX_SCHEDULE_TIMEOUT); } } - move_buckets_wait(&ctxt, buckets, true); - - rhashtable_destroy(&buckets->table); - kfree(buckets); + move_buckets_wait(&ctxt, &buckets, true); + rhashtable_destroy(&buckets.table); bch2_moving_ctxt_exit(&ctxt); bch2_move_stats_exit(&move_stats, c); diff --git a/fs/bcachefs/movinggc.h b/fs/bcachefs/movinggc.h index d1885cf67a45..b9683d22bab0 100644 --- a/fs/bcachefs/movinggc.h +++ b/fs/bcachefs/movinggc.h @@ -2,7 +2,7 @@ #ifndef _BCACHEFS_MOVINGGC_H #define _BCACHEFS_MOVINGGC_H -unsigned long bch2_copygc_wait_amount(struct bch_fs *); +u64 bch2_copygc_wait_amount(struct bch_fs *); void bch2_copygc_wait_to_text(struct printbuf *, struct bch_fs *); static inline void bch2_copygc_wakeup(struct bch_fs *c) diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c index 52c58c6d53d2..a84b69d6caef 100644 --- a/fs/bcachefs/namei.c +++ b/fs/bcachefs/namei.c @@ -11,6 +11,14 @@ #include <linux/posix_acl.h> +static inline subvol_inum parent_inum(subvol_inum inum, struct bch_inode_unpacked *inode) +{ + return (subvol_inum) { + .subvol = inode->bi_parent_subvol ?: inum.subvol, + .inum = inode->bi_dir, + }; +} + static inline int is_subdir_for_nlink(struct bch_inode_unpacked *inode) { return S_ISDIR(inode->bi_mode) && !inode->bi_subvol; @@ -49,7 +57,7 @@ int bch2_create_trans(struct btree_trans *trans, if (!(flags & BCH_CREATE_SNAPSHOT)) { /* Normal create path - allocate a new inode: */ - bch2_inode_init_late(new_inode, now, uid, gid, mode, rdev, dir_u); + bch2_inode_init_late(c, new_inode, now, uid, gid, mode, rdev, dir_u); if (flags & BCH_CREATE_TMPFILE) new_inode->bi_flags |= BCH_INODE_unlinked; @@ -158,7 +166,6 @@ int bch2_create_trans(struct btree_trans *trans, name, dir_target, &dir_offset, - &dir_u->bi_size, STR_HASH_must_create|BTREE_ITER_with_updates) ?: bch2_inode_write(trans, &dir_iter, dir_u); if (ret) @@ -225,7 +232,6 @@ int bch2_link_trans(struct btree_trans *trans, mode_to_type(inode_u->bi_mode), name, inum.inum, &dir_offset, - &dir_u->bi_size, STR_HASH_must_create); if (ret) goto err; @@ -406,8 +412,7 @@ int bch2_rename_trans(struct btree_trans *trans, src_hash = bch2_hash_info_init(c, src_dir_u); - if (dst_dir.inum != src_dir.inum || - dst_dir.subvol != src_dir.subvol) { + if (!subvol_inum_eq(dst_dir, src_dir)) { ret = bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir, BTREE_ITER_intent); if (ret) @@ -499,32 +504,41 @@ int bch2_rename_trans(struct btree_trans *trans, } } - if (bch2_reinherit_attrs(src_inode_u, dst_dir_u) && - S_ISDIR(src_inode_u->bi_mode)) { - ret = -EXDEV; - goto err; - } + if (!subvol_inum_eq(dst_dir, src_dir)) { + if (bch2_reinherit_attrs(src_inode_u, dst_dir_u) && + S_ISDIR(src_inode_u->bi_mode)) { + ret = -EXDEV; + goto err; + } - if (mode == BCH_RENAME_EXCHANGE && - bch2_reinherit_attrs(dst_inode_u, src_dir_u) && - S_ISDIR(dst_inode_u->bi_mode)) { - ret = -EXDEV; - goto err; - } + if (mode == BCH_RENAME_EXCHANGE && + bch2_reinherit_attrs(dst_inode_u, src_dir_u) && + S_ISDIR(dst_inode_u->bi_mode)) { + ret = -EXDEV; + goto err; + } - if (is_subdir_for_nlink(src_inode_u)) { - src_dir_u->bi_nlink--; - dst_dir_u->bi_nlink++; - } + ret = bch2_maybe_propagate_has_case_insensitive(trans, src_inum, src_inode_u) ?: + (mode == BCH_RENAME_EXCHANGE + ? bch2_maybe_propagate_has_case_insensitive(trans, dst_inum, dst_inode_u) + : 0); + if (ret) + goto err; - if (S_ISDIR(src_inode_u->bi_mode) && - !src_inode_u->bi_subvol) - src_inode_u->bi_depth = dst_dir_u->bi_depth + 1; + if (is_subdir_for_nlink(src_inode_u)) { + src_dir_u->bi_nlink--; + dst_dir_u->bi_nlink++; + } - if (mode == BCH_RENAME_EXCHANGE && - S_ISDIR(dst_inode_u->bi_mode) && - !dst_inode_u->bi_subvol) - dst_inode_u->bi_depth = src_dir_u->bi_depth + 1; + if (S_ISDIR(src_inode_u->bi_mode) && + !src_inode_u->bi_subvol) + src_inode_u->bi_depth = dst_dir_u->bi_depth + 1; + + if (mode == BCH_RENAME_EXCHANGE && + S_ISDIR(dst_inode_u->bi_mode) && + !dst_inode_u->bi_subvol) + dst_inode_u->bi_depth = src_dir_u->bi_depth + 1; + } if (dst_inum.inum && is_subdir_for_nlink(dst_inode_u)) { dst_dir_u->bi_nlink--; @@ -595,31 +609,39 @@ static inline void reverse_bytes(void *b, size_t n) } } -/* XXX: we don't yet attempt to print paths when we don't know the subvol */ -int bch2_inum_to_path(struct btree_trans *trans, subvol_inum inum, struct printbuf *path) +static int __bch2_inum_to_path(struct btree_trans *trans, + u32 subvol, u64 inum, u32 snapshot, + struct printbuf *path) { unsigned orig_pos = path->pos; int ret = 0; - while (!(inum.subvol == BCACHEFS_ROOT_SUBVOL && - inum.inum == BCACHEFS_ROOT_INO)) { + while (true) { + if (!snapshot) { + ret = bch2_subvolume_get_snapshot(trans, subvol, &snapshot); + if (ret) + goto disconnected; + } + struct bch_inode_unpacked inode; - ret = bch2_inode_find_by_inum_trans(trans, inum, &inode); + ret = bch2_inode_find_by_inum_snapshot(trans, inum, snapshot, &inode, 0); if (ret) goto disconnected; + if (inode.bi_subvol == BCACHEFS_ROOT_SUBVOL && + inode.bi_inum == BCACHEFS_ROOT_INO) + break; + if (!inode.bi_dir && !inode.bi_dir_offset) { ret = -BCH_ERR_ENOENT_inode_no_backpointer; goto disconnected; } - inum.subvol = inode.bi_parent_subvol ?: inum.subvol; - inum.inum = inode.bi_dir; - - u32 snapshot; - ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); - if (ret) - goto disconnected; + inum = inode.bi_dir; + if (inode.bi_parent_subvol) { + subvol = inode.bi_parent_subvol; + snapshot = 0; + } struct btree_iter d_iter; struct bkey_s_c_dirent d = bch2_bkey_get_iter_typed(trans, &d_iter, @@ -656,6 +678,20 @@ disconnected: goto out; } +int bch2_inum_to_path(struct btree_trans *trans, + subvol_inum inum, + struct printbuf *path) +{ + return __bch2_inum_to_path(trans, inum.subvol, inum.inum, 0, path); +} + +int bch2_inum_snapshot_to_path(struct btree_trans *trans, u64 inum, u32 snapshot, + snapshot_id_list *snapshot_overwrites, + struct printbuf *path) +{ + return __bch2_inum_to_path(trans, 0, inum, snapshot, path); +} + /* fsck */ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans, @@ -831,3 +867,149 @@ fsck_err: bch_err_fn(c, ret); return ret; } + +/* + * BCH_INODE_has_case_insensitive: + * We have to track whether directories have any descendent directory that is + * casefolded - for overlayfs: + */ + +static int bch2_propagate_has_case_insensitive(struct btree_trans *trans, subvol_inum inum) +{ + struct btree_iter iter = {}; + int ret = 0; + + while (true) { + struct bch_inode_unpacked inode; + ret = bch2_inode_peek(trans, &iter, &inode, inum, + BTREE_ITER_intent|BTREE_ITER_with_updates); + if (ret) + break; + + if (inode.bi_flags & BCH_INODE_has_case_insensitive) + break; + + inode.bi_flags |= BCH_INODE_has_case_insensitive; + ret = bch2_inode_write(trans, &iter, &inode); + if (ret) + break; + + bch2_trans_iter_exit(trans, &iter); + if (subvol_inum_eq(inum, BCACHEFS_ROOT_SUBVOL_INUM)) + break; + + inum = parent_inum(inum, &inode); + } + + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +int bch2_maybe_propagate_has_case_insensitive(struct btree_trans *trans, subvol_inum inum, + struct bch_inode_unpacked *inode) +{ + if (!bch2_inode_casefold(trans->c, inode)) + return 0; + + inode->bi_flags |= BCH_INODE_has_case_insensitive; + + return bch2_propagate_has_case_insensitive(trans, parent_inum(inum, inode)); +} + +int bch2_check_inode_has_case_insensitive(struct btree_trans *trans, + struct bch_inode_unpacked *inode, + snapshot_id_list *snapshot_overwrites, + bool *do_update) +{ + struct printbuf buf = PRINTBUF; + bool repairing_parents = false; + int ret = 0; + + if (!S_ISDIR(inode->bi_mode)) { + /* + * Old versions set bi_casefold for non dirs, but that's + * unnecessary and wasteful + */ + if (inode->bi_casefold) { + inode->bi_casefold = 0; + *do_update = true; + } + return 0; + } + + if (trans->c->sb.version < bcachefs_metadata_version_inode_has_case_insensitive) + return 0; + + if (bch2_inode_casefold(trans->c, inode) && + !(inode->bi_flags & BCH_INODE_has_case_insensitive)) { + prt_printf(&buf, "casefolded dir with has_case_insensitive not set\ninum %llu:%u ", + inode->bi_inum, inode->bi_snapshot); + + ret = bch2_inum_snapshot_to_path(trans, inode->bi_inum, inode->bi_snapshot, + snapshot_overwrites, &buf); + if (ret) + goto err; + + if (fsck_err(trans, inode_has_case_insensitive_not_set, "%s", buf.buf)) { + inode->bi_flags |= BCH_INODE_has_case_insensitive; + *do_update = true; + } + } + + if (!(inode->bi_flags & BCH_INODE_has_case_insensitive)) + goto out; + + struct bch_inode_unpacked dir = *inode; + u32 snapshot = dir.bi_snapshot; + + while (!(dir.bi_inum == BCACHEFS_ROOT_INO && + dir.bi_subvol == BCACHEFS_ROOT_SUBVOL)) { + if (dir.bi_parent_subvol) { + ret = bch2_subvolume_get_snapshot(trans, dir.bi_parent_subvol, &snapshot); + if (ret) + goto err; + + snapshot_overwrites = NULL; + } + + ret = bch2_inode_find_by_inum_snapshot(trans, dir.bi_dir, snapshot, &dir, 0); + if (ret) + goto err; + + if (!(dir.bi_flags & BCH_INODE_has_case_insensitive)) { + prt_printf(&buf, "parent of casefolded dir with has_case_insensitive not set\n"); + + ret = bch2_inum_snapshot_to_path(trans, dir.bi_inum, dir.bi_snapshot, + snapshot_overwrites, &buf); + if (ret) + goto err; + + if (fsck_err(trans, inode_parent_has_case_insensitive_not_set, "%s", buf.buf)) { + dir.bi_flags |= BCH_INODE_has_case_insensitive; + ret = __bch2_fsck_write_inode(trans, &dir); + if (ret) + goto err; + } + } + + /* + * We only need to check the first parent, unless we find an + * inconsistency + */ + if (!repairing_parents) + break; + } +out: +err: +fsck_err: + printbuf_exit(&buf); + if (ret) + return ret; + + if (repairing_parents) { + return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; + } + + return 0; +} diff --git a/fs/bcachefs/namei.h b/fs/bcachefs/namei.h index 2e6f6364767f..ae6ebc2d0785 100644 --- a/fs/bcachefs/namei.h +++ b/fs/bcachefs/namei.h @@ -43,6 +43,8 @@ bool bch2_reinherit_attrs(struct bch_inode_unpacked *, struct bch_inode_unpacked *); int bch2_inum_to_path(struct btree_trans *, subvol_inum, struct printbuf *); +int bch2_inum_snapshot_to_path(struct btree_trans *, u64, u32, + snapshot_id_list *, struct printbuf *); int __bch2_check_dirent_target(struct btree_trans *, struct btree_iter *, @@ -69,4 +71,9 @@ static inline int bch2_check_dirent_target(struct btree_trans *trans, return __bch2_check_dirent_target(trans, dirent_iter, d, target, in_fsck); } +int bch2_maybe_propagate_has_case_insensitive(struct btree_trans *, subvol_inum, + struct bch_inode_unpacked *); +int bch2_check_inode_has_case_insensitive(struct btree_trans *, struct bch_inode_unpacked *, + snapshot_id_list *, bool *); + #endif /* _BCACHEFS_NAMEI_H */ diff --git a/fs/bcachefs/nocow_locking.c b/fs/bcachefs/nocow_locking.c index 3c21981a4a1c..962218fa68ec 100644 --- a/fs/bcachefs/nocow_locking.c +++ b/fs/bcachefs/nocow_locking.c @@ -133,12 +133,10 @@ void bch2_fs_nocow_locking_exit(struct bch_fs *c) BUG_ON(atomic_read(&l->l[j])); } -int bch2_fs_nocow_locking_init(struct bch_fs *c) +void bch2_fs_nocow_locking_init_early(struct bch_fs *c) { struct bucket_nocow_lock_table *t = &c->nocow_locks; for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++) spin_lock_init(&l->lock); - - return 0; } diff --git a/fs/bcachefs/nocow_locking.h b/fs/bcachefs/nocow_locking.h index f9d6a426a960..48b8a003c0d2 100644 --- a/fs/bcachefs/nocow_locking.h +++ b/fs/bcachefs/nocow_locking.h @@ -45,6 +45,6 @@ static inline bool bch2_bucket_nocow_trylock(struct bucket_nocow_lock_table *t, void bch2_nocow_locks_to_text(struct printbuf *, struct bucket_nocow_lock_table *); void bch2_fs_nocow_locking_exit(struct bch_fs *); -int bch2_fs_nocow_locking_init(struct bch_fs *); +void bch2_fs_nocow_locking_init_early(struct bch_fs *); #endif /* _BCACHEFS_NOCOW_LOCKING_H */ diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c index af3258814822..b1cf88905b81 100644 --- a/fs/bcachefs/opts.c +++ b/fs/bcachefs/opts.c @@ -7,7 +7,9 @@ #include "compress.h" #include "disk_groups.h" #include "error.h" +#include "movinggc.h" #include "opts.h" +#include "rebalance.h" #include "recovery_passes.h" #include "super-io.h" #include "util.h" @@ -19,6 +21,11 @@ const char * const bch2_error_actions[] = { NULL }; +const char * const bch2_degraded_actions[] = { + BCH_DEGRADED_ACTIONS() + NULL +}; + const char * const bch2_fsck_fix_opts[] = { BCH_FIX_ERRORS_OPTS() NULL @@ -273,20 +280,20 @@ int bch2_opt_lookup(const char *name) return -1; } -struct synonym { +struct opt_synonym { const char *s1, *s2; }; -static const struct synonym bch_opt_synonyms[] = { +static const struct opt_synonym bch2_opt_synonyms[] = { { "quota", "usrquota" }, }; static int bch2_mount_opt_lookup(const char *name) { - const struct synonym *i; + const struct opt_synonym *i; - for (i = bch_opt_synonyms; - i < bch_opt_synonyms + ARRAY_SIZE(bch_opt_synonyms); + for (i = bch2_opt_synonyms; + i < bch2_opt_synonyms + ARRAY_SIZE(bch2_opt_synonyms); i++) if (!strcmp(name, i->s1)) name = i->s2; @@ -294,6 +301,30 @@ static int bch2_mount_opt_lookup(const char *name) return bch2_opt_lookup(name); } +struct opt_val_synonym { + const char *opt, *v1, *v2; +}; + +static const struct opt_val_synonym bch2_opt_val_synonyms[] = { + { "degraded", "true", "yes" }, + { "degraded", "false", "no" }, + { "degraded", "1", "yes" }, + { "degraded", "0", "no" }, +}; + +static const char *bch2_opt_val_synonym_lookup(const char *opt, const char *val) +{ + const struct opt_val_synonym *i; + + for (i = bch2_opt_val_synonyms; + i < bch2_opt_val_synonyms + ARRAY_SIZE(bch2_opt_val_synonyms); + i++) + if (!strcmp(opt, i->opt) && !strcmp(val, i->v1)) + return i->v2; + + return val; +} + int bch2_opt_validate(const struct bch_option *opt, u64 v, struct printbuf *err) { if (v < opt->min) { @@ -337,21 +368,22 @@ int bch2_opt_parse(struct bch_fs *c, { ssize_t ret; + if (err) + printbuf_indent_add_nextline(err, 2); + switch (opt->type) { case BCH_OPT_BOOL: - if (val) { - ret = lookup_constant(bool_names, val, -BCH_ERR_option_not_bool); - if (ret != -BCH_ERR_option_not_bool) { - *res = ret; - } else { - if (err) - prt_printf(err, "%s: must be bool", opt->attr.name); - return ret; - } + if (!val) + val = "1"; + + ret = lookup_constant(bool_names, val, -BCH_ERR_option_not_bool); + if (ret != -BCH_ERR_option_not_bool) { + *res = ret; } else { - *res = 1; + if (err) + prt_printf(err, "%s: must be bool", opt->attr.name); + return ret; } - break; case BCH_OPT_UINT: if (!val) { @@ -360,9 +392,15 @@ int bch2_opt_parse(struct bch_fs *c, return -EINVAL; } - ret = opt->flags & OPT_HUMAN_READABLE - ? bch2_strtou64_h(val, res) - : kstrtou64(val, 10, res); + if (*val != '-') { + ret = opt->flags & OPT_HUMAN_READABLE + ? bch2_strtou64_h(val, res) + : kstrtou64(val, 10, res); + } else { + prt_printf(err, "%s: must be a non-negative number", opt->attr.name); + return -BCH_ERR_option_negative; + } + if (ret < 0) { if (err) prt_printf(err, "%s: must be a number", @@ -480,7 +518,7 @@ void bch2_opts_to_text(struct printbuf *out, } } -int bch2_opt_check_may_set(struct bch_fs *c, struct bch_dev *ca, int id, u64 v) +int bch2_opt_hook_pre_set(struct bch_fs *c, struct bch_dev *ca, enum bch_opt_id id, u64 v) { int ret = 0; @@ -498,15 +536,17 @@ int bch2_opt_check_may_set(struct bch_fs *c, struct bch_dev *ca, int id, u64 v) if (v) bch2_check_set_feature(c, BCH_FEATURE_ec); break; + default: + break; } return ret; } -int bch2_opts_check_may_set(struct bch_fs *c) +int bch2_opts_hooks_pre_set(struct bch_fs *c) { for (unsigned i = 0; i < bch2_opts_nr; i++) { - int ret = bch2_opt_check_may_set(c, NULL, i, bch2_opt_get_by_id(&c->opts, i)); + int ret = bch2_opt_hook_pre_set(c, NULL, i, bch2_opt_get_by_id(&c->opts, i)); if (ret) return ret; } @@ -514,6 +554,61 @@ int bch2_opts_check_may_set(struct bch_fs *c) return 0; } +void bch2_opt_hook_post_set(struct bch_fs *c, struct bch_dev *ca, u64 inum, + struct bch_opts *new_opts, enum bch_opt_id id) +{ + switch (id) { + case Opt_foreground_target: + if (new_opts->foreground_target && + !new_opts->background_target) + bch2_set_rebalance_needs_scan(c, inum); + break; + case Opt_compression: + if (new_opts->compression && + !new_opts->background_compression) + bch2_set_rebalance_needs_scan(c, inum); + break; + case Opt_background_target: + if (new_opts->background_target) + bch2_set_rebalance_needs_scan(c, inum); + break; + case Opt_background_compression: + if (new_opts->background_compression) + bch2_set_rebalance_needs_scan(c, inum); + break; + case Opt_rebalance_enabled: + bch2_rebalance_wakeup(c); + break; + case Opt_copygc_enabled: + bch2_copygc_wakeup(c); + break; + case Opt_discard: + if (!ca) { + mutex_lock(&c->sb_lock); + for_each_member_device(c, ca) { + struct bch_member *m = + bch2_members_v2_get_mut(ca->disk_sb.sb, ca->dev_idx); + SET_BCH_MEMBER_DISCARD(m, c->opts.discard); + } + + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + } + break; + case Opt_version_upgrade: + /* + * XXX: in the future we'll likely want to do compatible + * upgrades at runtime as well, but right now there's nothing + * that does that: + */ + if (new_opts->version_upgrade == BCH_VERSION_UPGRADE_incompatible) + bch2_sb_upgrade_incompat(c); + break; + default: + break; + } +} + int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts, struct printbuf *parse_later, const char *name, const char *val) @@ -536,6 +631,12 @@ int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts, if (id < 0) return 0; + /* must have a value for synonym lookup - but OPT_FN is weird */ + if (!val && bch2_opt_table[id].type != BCH_OPT_FN) + val = "1"; + + val = bch2_opt_val_synonym_lookup(name, val); + if (!(bch2_opt_table[id].flags & OPT_MOUNT)) goto bad_opt; @@ -667,9 +768,11 @@ int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb) return 0; } -void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx, +bool __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx, const struct bch_option *opt, u64 v) { + bool changed = false; + if (opt->flags & OPT_SB_FIELD_SECTORS) v >>= 9; @@ -679,26 +782,35 @@ void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx, if (opt->flags & OPT_SB_FIELD_ONE_BIAS) v++; - if ((opt->flags & OPT_FS) && opt->set_sb && dev_idx < 0) + if ((opt->flags & OPT_FS) && opt->set_sb && dev_idx < 0) { + changed = v != opt->get_sb(sb); + opt->set_sb(sb, v); + } if ((opt->flags & OPT_DEVICE) && opt->set_member && dev_idx >= 0) { if (WARN(!bch2_member_exists(sb, dev_idx), "tried to set device option %s on nonexistent device %i", opt->attr.name, dev_idx)) - return; + return false; - opt->set_member(bch2_members_v2_get_mut(sb, dev_idx), v); + struct bch_member *m = bch2_members_v2_get_mut(sb, dev_idx); + changed = v != opt->get_member(m); + opt->set_member(m, v); } + + return changed; } -void bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca, +bool bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca, const struct bch_option *opt, u64 v) { mutex_lock(&c->sb_lock); - __bch2_opt_set_sb(c->disk_sb.sb, ca ? ca->dev_idx : -1, opt, v); - bch2_write_super(c); + bool changed = __bch2_opt_set_sb(c->disk_sb.sb, ca ? ca->dev_idx : -1, opt, v); + if (changed) + bch2_write_super(c); mutex_unlock(&c->sb_lock); + return changed; } /* io opts: */ diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h index dfb14810124c..2a02606254b3 100644 --- a/fs/bcachefs/opts.h +++ b/fs/bcachefs/opts.h @@ -11,6 +11,7 @@ struct bch_fs; extern const char * const bch2_error_actions[]; +extern const char * const bch2_degraded_actions[]; extern const char * const bch2_fsck_fix_opts[]; extern const char * const bch2_version_upgrade_opts[]; extern const char * const bch2_sb_features[]; @@ -307,14 +308,9 @@ enum fsck_err_opts { NULL, "Enable project quotas") \ x(degraded, u8, \ OPT_FS|OPT_MOUNT, \ - OPT_BOOL(), \ - BCH2_NO_SB_OPT, false, \ + OPT_STR(bch2_degraded_actions), \ + BCH_SB_DEGRADED_ACTION, BCH_DEGRADED_ask, \ NULL, "Allow mounting in degraded mode") \ - x(very_degraded, u8, \ - OPT_FS|OPT_MOUNT, \ - OPT_BOOL(), \ - BCH2_NO_SB_OPT, false, \ - NULL, "Allow mounting in when data will be missing") \ x(no_splitbrain_check, u8, \ OPT_FS|OPT_MOUNT, \ OPT_BOOL(), \ @@ -454,7 +450,7 @@ enum fsck_err_opts { BCH2_NO_SB_OPT, false, \ NULL, "Reconstruct alloc btree") \ x(version_upgrade, u8, \ - OPT_FS|OPT_MOUNT, \ + OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ OPT_STR(bch2_version_upgrade_opts), \ BCH_SB_VERSION_UPGRADE, BCH_VERSION_UPGRADE_compatible, \ NULL, "Set superblock to latest version,\n" \ @@ -494,6 +490,17 @@ enum fsck_err_opts { BCH2_NO_SB_OPT, true, \ NULL, "Enable rebalance: disable for debugging, or to\n"\ "quiet the system when doing performance testing\n")\ + x(rebalance_on_ac_only, u8, \ + OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ + OPT_BOOL(), \ + BCH_SB_REBALANCE_AC_ONLY, false, \ + NULL, "Enable rebalance while on mains power only\n") \ + x(auto_snapshot_deletion, u8, \ + OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ + OPT_BOOL(), \ + BCH2_NO_SB_OPT, true, \ + NULL, "Enable automatic snapshot deletion: disable for debugging, or to\n"\ + "quiet the system when doing performance testing\n")\ x(no_data_io, u8, \ OPT_MOUNT, \ OPT_BOOL(), \ @@ -522,7 +529,7 @@ enum fsck_err_opts { BCH_MEMBER_DATA_ALLOWED, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\ "types", "Allowed data types for this device: journal, btree, and/or user")\ x(discard, u8, \ - OPT_MOUNT|OPT_DEVICE|OPT_RUNTIME, \ + OPT_MOUNT|OPT_FS|OPT_DEVICE|OPT_RUNTIME, \ OPT_BOOL(), \ BCH_MEMBER_DISCARD, true, \ NULL, "Enable discard/TRIM support") \ @@ -530,7 +537,7 @@ enum fsck_err_opts { OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ OPT_BOOL(), \ BCH2_NO_SB_OPT, true, \ - NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\ + NULL, "BTREE_ITER_prefetch causes btree nodes to be\n"\ " prefetched sequentially") struct bch_opts { @@ -616,10 +623,10 @@ void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64); u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id, int); int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *); -void __bch2_opt_set_sb(struct bch_sb *, int, const struct bch_option *, u64); +bool __bch2_opt_set_sb(struct bch_sb *, int, const struct bch_option *, u64); struct bch_dev; -void bch2_opt_set_sb(struct bch_fs *, struct bch_dev *, const struct bch_option *, u64); +bool bch2_opt_set_sb(struct bch_fs *, struct bch_dev *, const struct bch_option *, u64); int bch2_opt_lookup(const char *); int bch2_opt_validate(const struct bch_option *, u64, struct printbuf *); @@ -636,8 +643,11 @@ void bch2_opts_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, unsigned, unsigned, unsigned); -int bch2_opt_check_may_set(struct bch_fs *, struct bch_dev *, int, u64); -int bch2_opts_check_may_set(struct bch_fs *); +int bch2_opt_hook_pre_set(struct bch_fs *, struct bch_dev *, enum bch_opt_id, u64); +int bch2_opts_hooks_pre_set(struct bch_fs *); +void bch2_opt_hook_post_set(struct bch_fs *, struct bch_dev *, u64, + struct bch_opts *, enum bch_opt_id); + int bch2_parse_one_mount_opt(struct bch_fs *, struct bch_opts *, struct printbuf *, const char *, const char *); int bch2_parse_mount_opts(struct bch_fs *, struct bch_opts *, struct printbuf *, diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 623273556aa9..de1ec9e0caa0 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -80,11 +80,13 @@ static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c, unsigned ptr_bit = 1; unsigned rewrite_ptrs = 0; + rcu_read_lock(); bkey_for_each_ptr(ptrs, ptr) { if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target)) rewrite_ptrs |= ptr_bit; ptr_bit <<= 1; } + rcu_read_unlock(); return rewrite_ptrs; } @@ -95,6 +97,9 @@ static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + if (bch2_bkey_extent_ptrs_flags(ptrs) & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) + return 0; + return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) | bch2_bkey_ptrs_need_move(c, opts, ptrs); } @@ -107,6 +112,9 @@ u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) if (!opts) return 0; + if (bch2_bkey_extent_ptrs_flags(ptrs) & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) + return 0; + const union bch_extent_entry *entry; struct extent_ptr_decoded p; u64 sectors = 0; @@ -126,10 +134,14 @@ u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) } } incompressible: - if (opts->background_target) + if (opts->background_target) { + rcu_read_lock(); bkey_for_each_ptr_decode(k.k, ptrs, p, entry) - if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target)) + if (!p.ptr.cached && + !bch2_dev_in_target(c, p.ptr.dev, opts->background_target)) sectors += p.crc.compressed_size; + rcu_read_unlock(); + } return sectors; } @@ -447,22 +459,11 @@ out: return ret; } -static bool rebalance_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, - struct bch_io_opts *io_opts, - struct data_update_opts *data_opts) -{ - data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k); - data_opts->target = io_opts->background_target; - data_opts->write_flags |= BCH_WRITE_only_specified_devs; - return data_opts->rewrite_ptrs != 0; -} - static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie) { struct btree_trans *trans = ctxt->trans; + struct bch_fs *c = trans->c; struct bch_fs_rebalance *r = &trans->c->rebalance; - int ret; bch2_move_stats_init(&r->scan_stats, "rebalance_scan"); ctxt->stats = &r->scan_stats; @@ -477,11 +478,34 @@ static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie) r->state = BCH_REBALANCE_scanning; - ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?: - commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_clear_rebalance_needs_scan(trans, inum, cookie)); + struct per_snapshot_io_opts snapshot_io_opts; + per_snapshot_io_opts_init(&snapshot_io_opts, c); + + int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, + r->scan_start.pos, r->scan_end.pos, + BTREE_ITER_all_snapshots| + BTREE_ITER_not_extents| + BTREE_ITER_prefetch, k, ({ + ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos); + struct bch_io_opts *io_opts = bch2_move_get_io_opts(trans, + &snapshot_io_opts, iter.pos, &iter, k); + PTR_ERR_OR_ZERO(io_opts); + })) ?: + commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_clear_rebalance_needs_scan(trans, inum, cookie)); + + per_snapshot_io_opts_exit(&snapshot_io_opts); bch2_move_stats_exit(&r->scan_stats, trans->c); + + /* + * Ensure that the rebalance_work entries we created are seen by the + * next iteration of do_rebalance(), so we don't end up stuck in + * rebalance_wait(): + */ + atomic64_inc(&r->scan_stats.sectors_seen); + bch2_btree_write_buffer_flush_sync(trans); + return ret; } @@ -506,6 +530,13 @@ static void rebalance_wait(struct bch_fs *c) bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT); } +static bool bch2_rebalance_enabled(struct bch_fs *c) +{ + return c->opts.rebalance_enabled && + !(c->opts.rebalance_on_ac_only && + c->rebalance.on_battery); +} + static int do_rebalance(struct moving_context *ctxt) { struct btree_trans *trans = ctxt->trans; @@ -525,9 +556,9 @@ static int do_rebalance(struct moving_context *ctxt) BTREE_ITER_all_snapshots); while (!bch2_move_ratelimit(ctxt)) { - if (!c->opts.rebalance_enabled) { + if (!bch2_rebalance_enabled(c)) { bch2_moving_ctxt_flush_all(ctxt); - kthread_wait_freezable(c->opts.rebalance_enabled || + kthread_wait_freezable(bch2_rebalance_enabled(c) || kthread_should_stop()); } @@ -585,7 +616,7 @@ static int bch2_rebalance_thread(void *arg) * Data move operations can't run until after check_snapshots has * completed, and bch2_snapshot_is_ancestor() is available. */ - kthread_wait_freezable(c->recovery_pass_done > BCH_RECOVERY_PASS_check_snapshots || + kthread_wait_freezable(c->recovery.pass_done > BCH_RECOVERY_PASS_check_snapshots || kthread_should_stop()); bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats, @@ -702,7 +733,156 @@ int bch2_rebalance_start(struct bch_fs *c) return 0; } -void bch2_fs_rebalance_init(struct bch_fs *c) +#ifdef CONFIG_POWER_SUPPLY +#include <linux/power_supply.h> + +static int bch2_rebalance_power_notifier(struct notifier_block *nb, + unsigned long event, void *data) { - bch2_pd_controller_init(&c->rebalance.pd); + struct bch_fs *c = container_of(nb, struct bch_fs, rebalance.power_notifier); + + c->rebalance.on_battery = !power_supply_is_system_supplied(); + bch2_rebalance_wakeup(c); + return NOTIFY_OK; +} +#endif + +void bch2_fs_rebalance_exit(struct bch_fs *c) +{ +#ifdef CONFIG_POWER_SUPPLY + power_supply_unreg_notifier(&c->rebalance.power_notifier); +#endif +} + +int bch2_fs_rebalance_init(struct bch_fs *c) +{ + struct bch_fs_rebalance *r = &c->rebalance; + + bch2_pd_controller_init(&r->pd); + +#ifdef CONFIG_POWER_SUPPLY + r->power_notifier.notifier_call = bch2_rebalance_power_notifier; + int ret = power_supply_reg_notifier(&r->power_notifier); + if (ret) + return ret; + + r->on_battery = !power_supply_is_system_supplied(); +#endif + return 0; +} + +static int check_rebalance_work_one(struct btree_trans *trans, + struct btree_iter *extent_iter, + struct btree_iter *rebalance_iter, + struct bkey_buf *last_flushed) +{ + struct bch_fs *c = trans->c; + struct bkey_s_c extent_k, rebalance_k; + struct printbuf buf = PRINTBUF; + + int ret = bkey_err(extent_k = bch2_btree_iter_peek(trans, extent_iter)) ?: + bkey_err(rebalance_k = bch2_btree_iter_peek(trans, rebalance_iter)); + if (ret) + return ret; + + if (!extent_k.k && + extent_iter->btree_id == BTREE_ID_reflink && + (!rebalance_k.k || + rebalance_k.k->p.inode >= BCACHEFS_ROOT_INO)) { + bch2_trans_iter_exit(trans, extent_iter); + bch2_trans_iter_init(trans, extent_iter, + BTREE_ID_extents, POS_MIN, + BTREE_ITER_prefetch| + BTREE_ITER_all_snapshots); + return -BCH_ERR_transaction_restart_nested; + } + + if (!extent_k.k && !rebalance_k.k) + return 1; + + int cmp = bpos_cmp(extent_k.k ? extent_k.k->p : SPOS_MAX, + rebalance_k.k ? rebalance_k.k->p : SPOS_MAX); + + struct bkey deleted; + bkey_init(&deleted); + + if (cmp < 0) { + deleted.p = extent_k.k->p; + rebalance_k.k = &deleted; + } else if (cmp > 0) { + deleted.p = rebalance_k.k->p; + extent_k.k = &deleted; + } + + bool should_have_rebalance = + bch2_bkey_sectors_need_rebalance(c, extent_k) != 0; + bool have_rebalance = rebalance_k.k->type == KEY_TYPE_set; + + if (should_have_rebalance != have_rebalance) { + ret = bch2_btree_write_buffer_maybe_flush(trans, extent_k, last_flushed); + if (ret) + return ret; + + bch2_bkey_val_to_text(&buf, c, extent_k); + } + + if (fsck_err_on(!should_have_rebalance && have_rebalance, + trans, rebalance_work_incorrectly_set, + "rebalance work incorrectly set\n%s", buf.buf)) { + ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, + extent_k.k->p, false); + if (ret) + goto err; + } + + if (fsck_err_on(should_have_rebalance && !have_rebalance, + trans, rebalance_work_incorrectly_unset, + "rebalance work incorrectly unset\n%s", buf.buf)) { + ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, + extent_k.k->p, true); + if (ret) + goto err; + } + + if (cmp <= 0) + bch2_btree_iter_advance(trans, extent_iter); + if (cmp >= 0) + bch2_btree_iter_advance(trans, rebalance_iter); +err: +fsck_err: + printbuf_exit(&buf); + return ret; +} + +int bch2_check_rebalance_work(struct bch_fs *c) +{ + struct btree_trans *trans = bch2_trans_get(c); + struct btree_iter rebalance_iter, extent_iter; + int ret = 0; + + bch2_trans_iter_init(trans, &extent_iter, + BTREE_ID_reflink, POS_MIN, + BTREE_ITER_prefetch); + bch2_trans_iter_init(trans, &rebalance_iter, + BTREE_ID_rebalance_work, POS_MIN, + BTREE_ITER_prefetch); + + struct bkey_buf last_flushed; + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + + while (!ret) { + bch2_trans_begin(trans); + + ret = check_rebalance_work_one(trans, &extent_iter, &rebalance_iter, &last_flushed); + + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + ret = 0; + } + + bch2_bkey_buf_exit(&last_flushed, c); + bch2_trans_iter_exit(trans, &extent_iter); + bch2_trans_iter_exit(trans, &rebalance_iter); + bch2_trans_put(trans); + return ret < 0 ? ret : 0; } diff --git a/fs/bcachefs/rebalance.h b/fs/bcachefs/rebalance.h index e5e8eb4a2dd1..5d9214fe1a22 100644 --- a/fs/bcachefs/rebalance.h +++ b/fs/bcachefs/rebalance.h @@ -52,6 +52,10 @@ void bch2_rebalance_status_to_text(struct printbuf *, struct bch_fs *); void bch2_rebalance_stop(struct bch_fs *); int bch2_rebalance_start(struct bch_fs *); -void bch2_fs_rebalance_init(struct bch_fs *); + +void bch2_fs_rebalance_exit(struct bch_fs *); +int bch2_fs_rebalance_init(struct bch_fs *); + +int bch2_check_rebalance_work(struct bch_fs *); #endif /* _BCACHEFS_REBALANCE_H */ diff --git a/fs/bcachefs/rebalance_types.h b/fs/bcachefs/rebalance_types.h index fe5098c17dfc..33d77286f1d5 100644 --- a/fs/bcachefs/rebalance_types.h +++ b/fs/bcachefs/rebalance_types.h @@ -30,6 +30,11 @@ struct bch_fs_rebalance { struct bbpos scan_start; struct bbpos scan_end; struct bch_move_stats scan_stats; + + bool on_battery; +#ifdef CONFIG_POWER_SUPPLY + struct notifier_block power_notifier; +#endif }; #endif /* _BCACHEFS_REBALANCE_TYPES_H */ diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index d6c4ef819d40..4fca57575565 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -33,8 +33,9 @@ #include <linux/sort.h> #include <linux/stat.h> - -int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) +int bch2_btree_lost_data(struct bch_fs *c, + struct printbuf *msg, + enum btree_id btree) { u64 b = BIT_ULL(btree); int ret = 0; @@ -43,32 +44,32 @@ int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); if (!(c->sb.btrees_lost_data & b)) { - struct printbuf buf = PRINTBUF; - bch2_btree_id_to_text(&buf, btree); - bch_err(c, "flagging btree %s lost data", buf.buf); - printbuf_exit(&buf); + prt_printf(msg, "flagging btree "); + bch2_btree_id_to_text(msg, btree); + prt_printf(msg, " lost data\n"); + ext->btrees_lost_data |= cpu_to_le64(b); } /* Once we have runtime self healing for topology errors we won't need this: */ - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_topology) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret; /* Btree node accounting will be off: */ __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent); - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_allocations, 0) ?: ret; #ifdef CONFIG_BCACHEFS_DEBUG /* * These are much more minor, and don't need to be corrected right away, * but in debug mode we want the next fsck run to be clean: */ - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_lrus) ?: ret; - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_backpointers_to_extents) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_lrus, 0) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_backpointers_to_extents, 0) ?: ret; #endif switch (btree) { case BTREE_ID_alloc: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); @@ -78,26 +79,30 @@ int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); goto out; case BTREE_ID_backpointers: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_btree_backpointers) ?: ret; - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_extents_to_backpointers) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_btree_backpointers, 0) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_extents_to_backpointers, 0) ?: ret; goto out; case BTREE_ID_need_discard: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; goto out; case BTREE_ID_freespace: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; goto out; case BTREE_ID_bucket_gens: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; goto out; case BTREE_ID_lru: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; goto out; case BTREE_ID_accounting: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_allocations, 0) ?: ret; + goto out; + case BTREE_ID_snapshots: + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_reconstruct_snapshots, 0) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret; goto out; default: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_scan_for_btree_nodes) ?: ret; + ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret; goto out; } out: @@ -114,11 +119,8 @@ static void kill_btree(struct bch_fs *c, enum btree_id btree) } /* for -o reconstruct_alloc: */ -static void bch2_reconstruct_alloc(struct bch_fs *c) +void bch2_reconstruct_alloc(struct bch_fs *c) { - bch2_journal_log_msg(c, "dropping alloc info"); - bch_info(c, "dropping and reconstructing all alloc info"); - mutex_lock(&c->sb_lock); struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); @@ -160,6 +162,8 @@ static void bch2_reconstruct_alloc(struct bch_fs *c) c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); + c->disk_sb.sb->features[0] &= ~cpu_to_le64(BIT_ULL(BCH_FEATURE_no_alloc_info)); + bch2_write_super(c); mutex_unlock(&c->sb_lock); @@ -282,7 +286,12 @@ static int bch2_journal_replay_key(struct btree_trans *trans, goto out; if (k->k->k.type == KEY_TYPE_accounting) { - ret = bch2_trans_update_buffered(trans, BTREE_ID_accounting, k->k); + struct bkey_i *n = bch2_trans_subbuf_alloc(trans, &trans->accounting, k->k->k.u64s); + ret = PTR_ERR_OR_ZERO(n); + if (ret) + goto out; + + bkey_copy(n, k->k); goto out; } @@ -430,7 +439,7 @@ int bch2_journal_replay(struct bch_fs *c) trans = NULL; if (!c->opts.retain_recovery_info && - c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) + c->recovery.pass_done >= BCH_RECOVERY_PASS_journal_replay) bch2_journal_keys_put_initial(c); replay_now_at(j, j->replay_journal_seq_end); @@ -585,9 +594,6 @@ static int read_btree_roots(struct bch_fs *c) buf.buf, bch2_err_str(ret))) { if (btree_id_is_alloc(i)) r->error = 0; - - ret = bch2_btree_lost_data(c, i); - BUG_ON(ret); } } @@ -667,7 +673,7 @@ static bool check_version_upgrade(struct bch_fs *c) bch2_recovery_passes_from_stable(le64_to_cpu(passes))); } - bch_info(c, "%s", buf.buf); + bch_notice(c, "%s", buf.buf); printbuf_exit(&buf); ret = true; @@ -683,7 +689,7 @@ static bool check_version_upgrade(struct bch_fs *c) bch2_version_to_text(&buf, c->sb.version_incompat_allowed); prt_newline(&buf); - bch_info(c, "%s", buf.buf); + bch_notice(c, "%s", buf.buf); printbuf_exit(&buf); ret = true; @@ -790,11 +796,11 @@ int bch2_fs_recovery(struct bch_fs *c) bch2_write_super(c); mutex_unlock(&c->sb_lock); - if (c->opts.fsck) - set_bit(BCH_FS_fsck_running, &c->flags); if (c->sb.clean) set_bit(BCH_FS_clean_recovery, &c->flags); - set_bit(BCH_FS_recovery_running, &c->flags); + if (c->opts.fsck) + set_bit(BCH_FS_in_fsck, &c->flags); + set_bit(BCH_FS_in_recovery, &c->flags); ret = bch2_blacklist_table_initialize(c); if (ret) { @@ -889,8 +895,37 @@ use_clean: if (ret) goto err; - if (c->opts.reconstruct_alloc) + ret = bch2_fs_resize_on_mount(c); + if (ret) { + up_write(&c->state_lock); + goto err; + } + + if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) { + bch_info(c, "filesystem is an unresized image file, mounting ro"); + c->opts.read_only = true; + } + + if (!c->opts.read_only && + (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))) { + bch_info(c, "mounting a filesystem with no alloc info read-write; will recreate"); + bch2_reconstruct_alloc(c); + } else if (c->opts.reconstruct_alloc) { + bch2_journal_log_msg(c, "dropping alloc info"); + bch_info(c, "dropping and reconstructing all alloc info"); + + bch2_reconstruct_alloc(c); + } + + if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) { + /* We can't go RW to fix errors without alloc info */ + if (c->opts.fix_errors == FSCK_FIX_yes || + c->opts.fix_errors == FSCK_FIX_ask) + c->opts.fix_errors = FSCK_FIX_no; + if (c->opts.errors == BCH_ON_ERROR_fix_safe) + c->opts.errors = BCH_ON_ERROR_continue; + } /* * After an unclean shutdown, skip then next few journal sequence @@ -933,8 +968,10 @@ use_clean: set_bit(BCH_FS_btree_running, &c->flags); ret = bch2_sb_set_upgrade_extra(c); + if (ret) + goto err; - ret = bch2_run_recovery_passes(c); + ret = bch2_run_recovery_passes(c, 0); if (ret) goto err; @@ -945,8 +982,7 @@ use_clean: * multithreaded use: */ set_bit(BCH_FS_may_go_rw, &c->flags); - clear_bit(BCH_FS_fsck_running, &c->flags); - clear_bit(BCH_FS_recovery_running, &c->flags); + clear_bit(BCH_FS_in_fsck, &c->flags); /* in case we don't run journal replay, i.e. norecovery mode */ set_bit(BCH_FS_accounting_replay_done, &c->flags); @@ -969,9 +1005,8 @@ use_clean: bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); clear_bit(BCH_FS_errors_fixed, &c->flags); - c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; - - ret = bch2_run_recovery_passes(c); + ret = bch2_run_recovery_passes(c, + BCH_RECOVERY_PASS_check_alloc_info); if (ret) goto err; @@ -1015,7 +1050,7 @@ use_clean: if (c->opts.fsck && !test_bit(BCH_FS_error, &c->flags) && - c->recovery_pass_done == BCH_RECOVERY_PASS_NR - 1 && + c->recovery.pass_done == BCH_RECOVERY_PASS_NR - 1 && ext->btrees_lost_data) { ext->btrees_lost_data = 0; write_sb = true; @@ -1076,8 +1111,17 @@ out: return ret; err: fsck_err: - bch2_fs_emergency_read_only(c); - goto out; + { + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "error in recovery: %s", bch2_err_str(ret)); + bch2_fs_emergency_read_only2(c, &buf); + + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + } + return ret; } int bch2_fs_initialize(struct bch_fs *c) @@ -1193,7 +1237,7 @@ int bch2_fs_initialize(struct bch_fs *c) if (ret) goto err; - c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1; + c->recovery.pass_done = BCH_RECOVERY_PASS_NR - 1; bch2_copygc_wakeup(c); bch2_rebalance_wakeup(c); @@ -1216,7 +1260,7 @@ int bch2_fs_initialize(struct bch_fs *c) bch2_write_super(c); mutex_unlock(&c->sb_lock); - c->curr_recovery_pass = BCH_RECOVERY_PASS_NR; + c->recovery.curr_pass = BCH_RECOVERY_PASS_NR; return 0; err: bch_err_fn(c, ret); diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h index b0d55754b21b..c023f52fc2d6 100644 --- a/fs/bcachefs/recovery.h +++ b/fs/bcachefs/recovery.h @@ -2,7 +2,8 @@ #ifndef _BCACHEFS_RECOVERY_H #define _BCACHEFS_RECOVERY_H -int bch2_btree_lost_data(struct bch_fs *, enum btree_id); +int bch2_btree_lost_data(struct bch_fs *, struct printbuf *, enum btree_id); +void bch2_reconstruct_alloc(struct bch_fs *); int bch2_journal_replay(struct bch_fs *); diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c index 22f72bb5b853..dabb29b08ad0 100644 --- a/fs/bcachefs/recovery_passes.c +++ b/fs/bcachefs/recovery_passes.c @@ -28,6 +28,145 @@ const char * const bch2_recovery_passes[] = { NULL }; +static const u8 passes_to_stable_map[] = { +#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n, + BCH_RECOVERY_PASSES() +#undef x +}; + +static const u8 passes_from_stable_map[] = { +#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n, + BCH_RECOVERY_PASSES() +#undef x +}; + +static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass) +{ + return passes_to_stable_map[pass]; +} + +u64 bch2_recovery_passes_to_stable(u64 v) +{ + u64 ret = 0; + for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++) + if (v & BIT_ULL(i)) + ret |= BIT_ULL(passes_to_stable_map[i]); + return ret; +} + +static enum bch_recovery_pass bch2_recovery_pass_from_stable(enum bch_recovery_pass_stable pass) +{ + return pass < ARRAY_SIZE(passes_from_stable_map) + ? passes_from_stable_map[pass] + : 0; +} + +u64 bch2_recovery_passes_from_stable(u64 v) +{ + u64 ret = 0; + for (unsigned i = 0; i < ARRAY_SIZE(passes_from_stable_map); i++) + if (v & BIT_ULL(i)) + ret |= BIT_ULL(passes_from_stable_map[i]); + return ret; +} + +static int bch2_sb_recovery_passes_validate(struct bch_sb *sb, struct bch_sb_field *f, + enum bch_validate_flags flags, struct printbuf *err) +{ + return 0; +} + +static void bch2_sb_recovery_passes_to_text(struct printbuf *out, + struct bch_sb *sb, + struct bch_sb_field *f) +{ + struct bch_sb_field_recovery_passes *r = + field_to_type(f, recovery_passes); + unsigned nr = recovery_passes_nr_entries(r); + + if (out->nr_tabstops < 1) + printbuf_tabstop_push(out, 32); + if (out->nr_tabstops < 2) + printbuf_tabstop_push(out, 16); + + prt_printf(out, "Pass\tLast run\tLast runtime\n"); + + for (struct recovery_pass_entry *i = r->start; i < r->start + nr; i++) { + if (!i->last_run) + continue; + + unsigned idx = i - r->start; + + prt_printf(out, "%s\t", bch2_recovery_passes[bch2_recovery_pass_from_stable(idx)]); + + bch2_prt_datetime(out, le64_to_cpu(i->last_run)); + prt_tab(out); + + bch2_pr_time_units(out, le32_to_cpu(i->last_runtime) * NSEC_PER_SEC); + prt_newline(out); + } +} + +static void bch2_sb_recovery_pass_complete(struct bch_fs *c, + enum bch_recovery_pass pass, + s64 start_time) +{ + enum bch_recovery_pass_stable stable = bch2_recovery_pass_to_stable(pass); + s64 end_time = ktime_get_real_seconds(); + + mutex_lock(&c->sb_lock); + struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); + __clear_bit_le64(stable, ext->recovery_passes_required); + + struct bch_sb_field_recovery_passes *r = + bch2_sb_field_get(c->disk_sb.sb, recovery_passes); + + if (stable >= recovery_passes_nr_entries(r)) { + unsigned u64s = struct_size(r, start, stable + 1) / sizeof(u64); + + r = bch2_sb_field_resize(&c->disk_sb, recovery_passes, u64s); + if (!r) { + bch_err(c, "error creating recovery_passes sb section"); + goto out; + } + } + + r->start[stable].last_run = cpu_to_le64(end_time); + r->start[stable].last_runtime = cpu_to_le32(max(0, end_time - start_time)); +out: + bch2_write_super(c); + mutex_unlock(&c->sb_lock); +} + +static bool bch2_recovery_pass_want_ratelimit(struct bch_fs *c, enum bch_recovery_pass pass) +{ + enum bch_recovery_pass_stable stable = bch2_recovery_pass_to_stable(pass); + bool ret = false; + + lockdep_assert_held(&c->sb_lock); + + struct bch_sb_field_recovery_passes *r = + bch2_sb_field_get(c->disk_sb.sb, recovery_passes); + + if (stable < recovery_passes_nr_entries(r)) { + struct recovery_pass_entry *i = r->start + stable; + + /* + * Ratelimit if the last runtime was more than 1% of the time + * since we last ran + */ + ret = (u64) le32_to_cpu(i->last_runtime) * 100 > + ktime_get_real_seconds() - le64_to_cpu(i->last_run); + } + + return ret; +} + +const struct bch_sb_field_ops bch_sb_field_ops_recovery_passes = { + .validate = bch2_sb_recovery_passes_validate, + .to_text = bch2_sb_recovery_passes_to_text +}; + /* Fake recovery pass, so that scan_for_btree_nodes isn't 0: */ static int bch2_recovery_pass_empty(struct bch_fs *c) { @@ -47,11 +186,36 @@ static int bch2_set_may_go_rw(struct bch_fs *c) set_bit(BCH_FS_may_go_rw, &c->flags); - if (keys->nr || !c->opts.read_only || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes) + if (keys->nr || + !c->opts.read_only || + !c->sb.clean || + c->opts.recovery_passes || + (c->opts.fsck && !(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)))) { + if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) { + bch_info(c, "mounting a filesystem with no alloc info read-write; will recreate"); + bch2_reconstruct_alloc(c); + } + return bch2_fs_read_write_early(c); + } return 0; } +/* + * Make sure root inode is readable while we're still in recovery and can rewind + * for repair: + */ +static int bch2_lookup_root_inode(struct bch_fs *c) +{ + subvol_inum inum = BCACHEFS_ROOT_SUBVOL_INUM; + struct bch_inode_unpacked inode_u; + struct bch_subvolume subvol; + + return bch2_trans_do(c, + bch2_subvolume_get(trans, inum.subvol, true, &subvol) ?: + bch2_inode_find_by_inum_trans(trans, inum, &inode_u)); +} + struct recovery_pass_fn { int (*fn)(struct bch_fs *); unsigned when; @@ -63,252 +227,351 @@ static struct recovery_pass_fn recovery_pass_fns[] = { #undef x }; -static const u8 passes_to_stable_map[] = { -#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n, - BCH_RECOVERY_PASSES() -#undef x -}; +static u64 bch2_recovery_passes_match(unsigned flags) +{ + u64 ret = 0; -static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass) + for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) + if (recovery_pass_fns[i].when & flags) + ret |= BIT_ULL(i); + return ret; +} + +u64 bch2_fsck_recovery_passes(void) { - return passes_to_stable_map[pass]; + return bch2_recovery_passes_match(PASS_FSCK); } -u64 bch2_recovery_passes_to_stable(u64 v) +static void bch2_run_async_recovery_passes(struct bch_fs *c) { - u64 ret = 0; - for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++) - if (v & BIT_ULL(i)) - ret |= BIT_ULL(passes_to_stable_map[i]); - return ret; + if (!down_trylock(&c->recovery.run_lock)) + return; + + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_async_recovery_passes)) + goto unlock; + + if (queue_work(system_long_wq, &c->recovery.work)) + return; + + enumerated_ref_put(&c->writes, BCH_WRITE_REF_async_recovery_passes); +unlock: + up(&c->recovery.run_lock); } -u64 bch2_recovery_passes_from_stable(u64 v) +static bool recovery_pass_needs_set(struct bch_fs *c, + enum bch_recovery_pass pass, + enum bch_run_recovery_pass_flags *flags) { - static const u8 map[] = { -#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n, - BCH_RECOVERY_PASSES() -#undef x - }; + struct bch_fs_recovery *r = &c->recovery; + bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags); + bool persistent = !in_recovery || !(*flags & RUN_RECOVERY_PASS_nopersistent); - u64 ret = 0; - for (unsigned i = 0; i < ARRAY_SIZE(map); i++) - if (v & BIT_ULL(i)) - ret |= BIT_ULL(map[i]); - return ret; + if ((*flags & RUN_RECOVERY_PASS_ratelimit) && + !bch2_recovery_pass_want_ratelimit(c, pass)) + *flags &= ~RUN_RECOVERY_PASS_ratelimit; + + /* + * If RUN_RECOVERY_PASS_nopersistent is set, we don't want to do + * anything if the pass has already run: these mean we need a prior pass + * to run before we continue to repair, we don't expect that pass to fix + * the damage we encountered. + * + * Otherwise, we run run_explicit_recovery_pass when we find damage, so + * it should run again even if it's already run: + */ + + if (persistent + ? !(c->sb.recovery_passes_required & BIT_ULL(pass)) + : !((r->passes_to_run|r->passes_complete) & BIT_ULL(pass))) + return true; + + if (!(*flags & RUN_RECOVERY_PASS_ratelimit) && + (r->passes_ratelimiting & BIT_ULL(pass))) + return true; + + return false; } /* * For when we need to rewind recovery passes and run a pass we skipped: */ -static int __bch2_run_explicit_recovery_pass(struct bch_fs *c, - enum bch_recovery_pass pass) +int __bch2_run_explicit_recovery_pass(struct bch_fs *c, + struct printbuf *out, + enum bch_recovery_pass pass, + enum bch_run_recovery_pass_flags flags) { - if (c->curr_recovery_pass == ARRAY_SIZE(recovery_pass_fns)) - return -BCH_ERR_not_in_recovery; + struct bch_fs_recovery *r = &c->recovery; + int ret = 0; - if (c->recovery_passes_complete & BIT_ULL(pass)) - return 0; + lockdep_assert_held(&c->sb_lock); - bool print = !(c->opts.recovery_passes & BIT_ULL(pass)); + bch2_printbuf_make_room(out, 1024); + out->atomic++; - if (pass < BCH_RECOVERY_PASS_set_may_go_rw && - c->curr_recovery_pass >= BCH_RECOVERY_PASS_set_may_go_rw) { - if (print) - bch_info(c, "need recovery pass %s (%u), but already rw", - bch2_recovery_passes[pass], pass); - return -BCH_ERR_cannot_rewind_recovery; - } + unsigned long lockflags; + spin_lock_irqsave(&r->lock, lockflags); - if (print) - bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)", - bch2_recovery_passes[pass], pass, - bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass); + if (!recovery_pass_needs_set(c, pass, &flags)) + goto out; - c->opts.recovery_passes |= BIT_ULL(pass); + bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags); + bool rewind = in_recovery && r->curr_pass > pass; + bool ratelimit = flags & RUN_RECOVERY_PASS_ratelimit; - if (c->curr_recovery_pass > pass) { - c->next_recovery_pass = pass; - c->recovery_passes_complete &= (1ULL << pass) >> 1; - return -BCH_ERR_restart_recovery; - } else { - return 0; + if (!(in_recovery && (flags & RUN_RECOVERY_PASS_nopersistent))) { + struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); + __set_bit_le64(bch2_recovery_pass_to_stable(pass), ext->recovery_passes_required); } -} - -int bch2_run_explicit_recovery_pass(struct bch_fs *c, - enum bch_recovery_pass pass) -{ - unsigned long flags; - spin_lock_irqsave(&c->recovery_pass_lock, flags); - int ret = __bch2_run_explicit_recovery_pass(c, pass); - spin_unlock_irqrestore(&c->recovery_pass_lock, flags); - return ret; -} -int bch2_run_explicit_recovery_pass_persistent_locked(struct bch_fs *c, - enum bch_recovery_pass pass) -{ - lockdep_assert_held(&c->sb_lock); + if (pass < BCH_RECOVERY_PASS_set_may_go_rw && + (!in_recovery || r->curr_pass >= BCH_RECOVERY_PASS_set_may_go_rw)) { + prt_printf(out, "need recovery pass %s (%u), but already rw\n", + bch2_recovery_passes[pass], pass); + ret = -BCH_ERR_cannot_rewind_recovery; + goto out; + } - struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); - __set_bit_le64(bch2_recovery_pass_to_stable(pass), ext->recovery_passes_required); + if (ratelimit) + r->passes_ratelimiting |= BIT_ULL(pass); + else + r->passes_ratelimiting &= ~BIT_ULL(pass); - return bch2_run_explicit_recovery_pass(c, pass); -} + if (in_recovery && !ratelimit) { + prt_printf(out, "running recovery pass %s (%u), currently at %s (%u)%s\n", + bch2_recovery_passes[pass], pass, + bch2_recovery_passes[r->curr_pass], r->curr_pass, + rewind ? " - rewinding" : ""); -int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c, - enum bch_recovery_pass pass) -{ - enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass); + r->passes_to_run |= BIT_ULL(pass); - mutex_lock(&c->sb_lock); - struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); + if (rewind) { + r->next_pass = pass; + r->passes_complete &= (1ULL << pass) >> 1; + ret = -BCH_ERR_restart_recovery; + } + } else { + prt_printf(out, "scheduling recovery pass %s (%u)%s\n", + bch2_recovery_passes[pass], pass, + ratelimit ? " - ratelimiting" : ""); - if (!test_bit_le64(s, ext->recovery_passes_required)) { - __set_bit_le64(s, ext->recovery_passes_required); - bch2_write_super(c); + struct recovery_pass_fn *p = recovery_pass_fns + pass; + if (p->when & PASS_ONLINE) + bch2_run_async_recovery_passes(c); } - mutex_unlock(&c->sb_lock); - - return bch2_run_explicit_recovery_pass(c, pass); +out: + spin_unlock_irqrestore(&r->lock, lockflags); + --out->atomic; + return ret; } -static void bch2_clear_recovery_pass_required(struct bch_fs *c, - enum bch_recovery_pass pass) +int bch2_run_explicit_recovery_pass(struct bch_fs *c, + struct printbuf *out, + enum bch_recovery_pass pass, + enum bch_run_recovery_pass_flags flags) { - enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass); + int ret = 0; - mutex_lock(&c->sb_lock); - struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); + scoped_guard(mutex, &c->sb_lock) { + if (!recovery_pass_needs_set(c, pass, &flags)) + return 0; - if (test_bit_le64(s, ext->recovery_passes_required)) { - __clear_bit_le64(s, ext->recovery_passes_required); + ret = __bch2_run_explicit_recovery_pass(c, out, pass, flags); bch2_write_super(c); } - mutex_unlock(&c->sb_lock); -} -u64 bch2_fsck_recovery_passes(void) -{ - u64 ret = 0; - - for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) - if (recovery_pass_fns[i].when & PASS_FSCK) - ret |= BIT_ULL(i); return ret; } -static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) +int bch2_run_print_explicit_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) { - struct recovery_pass_fn *p = recovery_pass_fns + pass; + enum bch_run_recovery_pass_flags flags = RUN_RECOVERY_PASS_nopersistent; - if (c->opts.recovery_passes_exclude & BIT_ULL(pass)) - return false; - if (c->opts.recovery_passes & BIT_ULL(pass)) - return true; - if ((p->when & PASS_FSCK) && c->opts.fsck) - return true; - if ((p->when & PASS_UNCLEAN) && !c->sb.clean) - return true; - if (p->when & PASS_ALWAYS) - return true; - return false; + if (!recovery_pass_needs_set(c, pass, &flags)) + return 0; + + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + mutex_lock(&c->sb_lock); + int ret = __bch2_run_explicit_recovery_pass(c, &buf, pass, + RUN_RECOVERY_PASS_nopersistent); + mutex_unlock(&c->sb_lock); + + bch2_print_str(c, KERN_NOTICE, buf.buf); + printbuf_exit(&buf); + return ret; } static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) { + struct bch_fs_recovery *r = &c->recovery; struct recovery_pass_fn *p = recovery_pass_fns + pass; - int ret; if (!(p->when & PASS_SILENT)) bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."), bch2_recovery_passes[pass]); - ret = p->fn(c); - if (ret) + + s64 start_time = ktime_get_real_seconds(); + int ret = p->fn(c); + + r->passes_to_run &= ~BIT_ULL(pass); + + if (ret) { + r->passes_failing |= BIT_ULL(pass); return ret; + } + + r->passes_failing = 0; + + if (!test_bit(BCH_FS_error, &c->flags)) + bch2_sb_recovery_pass_complete(c, pass, start_time); + if (!(p->when & PASS_SILENT)) bch2_print(c, KERN_CONT " done\n"); return 0; } -int bch2_run_online_recovery_passes(struct bch_fs *c) +static int __bch2_run_recovery_passes(struct bch_fs *c, u64 orig_passes_to_run, + bool online) { - for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) { - struct recovery_pass_fn *p = recovery_pass_fns + i; - - if (!(p->when & PASS_ONLINE)) - continue; + struct bch_fs_recovery *r = &c->recovery; + int ret = 0; - int ret = bch2_run_recovery_pass(c, i); - if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) { - i = c->curr_recovery_pass; - continue; - } - if (ret) - return ret; - } + spin_lock_irq(&r->lock); - return 0; -} + if (online) + orig_passes_to_run &= bch2_recovery_passes_match(PASS_ONLINE); -int bch2_run_recovery_passes(struct bch_fs *c) -{ - int ret = 0; + if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) + orig_passes_to_run &= ~bch2_recovery_passes_match(PASS_ALLOC); /* - * We can't allow set_may_go_rw to be excluded; that would cause us to - * use the journal replay keys for updates where it's not expected. + * A failed recovery pass will be retried after another pass succeeds - + * but not this iteration. + * + * This is because some passes depend on repair done by other passes: we + * may want to retry, but we don't want to loop on failing passes. */ - c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw; - spin_lock_irq(&c->recovery_pass_lock); + orig_passes_to_run &= ~r->passes_failing; - while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns) && !ret) { - unsigned prev_done = c->recovery_pass_done; - unsigned pass = c->curr_recovery_pass; + r->passes_to_run = orig_passes_to_run; - c->next_recovery_pass = pass + 1; + while (r->passes_to_run) { + unsigned prev_done = r->pass_done; + unsigned pass = __ffs64(r->passes_to_run); + r->curr_pass = pass; + r->next_pass = r->curr_pass + 1; + r->passes_to_run &= ~BIT_ULL(pass); - if (c->opts.recovery_pass_last && - c->curr_recovery_pass > c->opts.recovery_pass_last) - break; + spin_unlock_irq(&r->lock); - if (should_run_recovery_pass(c, pass)) { - spin_unlock_irq(&c->recovery_pass_lock); - ret = bch2_run_recovery_pass(c, pass) ?: - bch2_journal_flush(&c->journal); - - if (!ret && !test_bit(BCH_FS_error, &c->flags)) - bch2_clear_recovery_pass_required(c, pass); - spin_lock_irq(&c->recovery_pass_lock); - - if (c->next_recovery_pass < c->curr_recovery_pass) { - /* - * bch2_run_explicit_recovery_pass() was called: we - * can't always catch -BCH_ERR_restart_recovery because - * it may have been called from another thread (btree - * node read completion) - */ - ret = 0; - c->recovery_passes_complete &= ~(~0ULL << c->curr_recovery_pass); - } else { - c->recovery_passes_complete |= BIT_ULL(pass); - c->recovery_pass_done = max(c->recovery_pass_done, pass); - } + int ret2 = bch2_run_recovery_pass(c, pass) ?: + bch2_journal_flush(&c->journal); + + spin_lock_irq(&r->lock); + + if (r->next_pass < r->curr_pass) { + /* Rewind: */ + r->passes_to_run |= orig_passes_to_run & (~0ULL << r->next_pass); + } else if (!ret2) { + r->pass_done = max(r->pass_done, pass); + r->passes_complete |= BIT_ULL(pass); + } else { + ret = ret2; } - c->curr_recovery_pass = c->next_recovery_pass; + if (ret && !online) + break; if (prev_done <= BCH_RECOVERY_PASS_check_snapshots && - c->recovery_pass_done > BCH_RECOVERY_PASS_check_snapshots) { + r->pass_done > BCH_RECOVERY_PASS_check_snapshots) { bch2_copygc_wakeup(c); bch2_rebalance_wakeup(c); } } - spin_unlock_irq(&c->recovery_pass_lock); + clear_bit(BCH_FS_in_recovery, &c->flags); + spin_unlock_irq(&r->lock); return ret; } + +static void bch2_async_recovery_passes_work(struct work_struct *work) +{ + struct bch_fs *c = container_of(work, struct bch_fs, recovery.work); + struct bch_fs_recovery *r = &c->recovery; + + __bch2_run_recovery_passes(c, + c->sb.recovery_passes_required & ~r->passes_ratelimiting, + true); + + up(&r->run_lock); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_async_recovery_passes); +} + +int bch2_run_online_recovery_passes(struct bch_fs *c, u64 passes) +{ + return __bch2_run_recovery_passes(c, c->sb.recovery_passes_required|passes, true); +} + +int bch2_run_recovery_passes(struct bch_fs *c, enum bch_recovery_pass from) +{ + u64 passes = + bch2_recovery_passes_match(PASS_ALWAYS) | + (!c->sb.clean ? bch2_recovery_passes_match(PASS_UNCLEAN) : 0) | + (c->opts.fsck ? bch2_recovery_passes_match(PASS_FSCK) : 0) | + c->opts.recovery_passes | + c->sb.recovery_passes_required; + + if (c->opts.recovery_pass_last) + passes &= BIT_ULL(c->opts.recovery_pass_last + 1) - 1; + + /* + * We can't allow set_may_go_rw to be excluded; that would cause us to + * use the journal replay keys for updates where it's not expected. + */ + c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw; + passes &= ~c->opts.recovery_passes_exclude; + + passes &= ~(BIT_ULL(from) - 1); + + down(&c->recovery.run_lock); + int ret = __bch2_run_recovery_passes(c, passes, false); + up(&c->recovery.run_lock); + + return ret; +} + +static void prt_passes(struct printbuf *out, const char *msg, u64 passes) +{ + prt_printf(out, "%s:\t", msg); + prt_bitflags(out, bch2_recovery_passes, passes); + prt_newline(out); +} + +void bch2_recovery_pass_status_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct bch_fs_recovery *r = &c->recovery; + + printbuf_tabstop_push(out, 32); + prt_passes(out, "Scheduled passes", c->sb.recovery_passes_required); + prt_passes(out, "Scheduled online passes", c->sb.recovery_passes_required & + bch2_recovery_passes_match(PASS_ONLINE)); + prt_passes(out, "Complete passes", r->passes_complete); + prt_passes(out, "Failing passes", r->passes_failing); + + if (r->curr_pass) { + prt_printf(out, "Current pass:\t%s\n", bch2_recovery_passes[r->curr_pass]); + prt_passes(out, "Current passes", r->passes_to_run); + } +} + +void bch2_fs_recovery_passes_init(struct bch_fs *c) +{ + spin_lock_init(&c->recovery.lock); + sema_init(&c->recovery.run_lock, 1); + + INIT_WORK(&c->recovery.work, bch2_async_recovery_passes_work); +} diff --git a/fs/bcachefs/recovery_passes.h b/fs/bcachefs/recovery_passes.h index 7d7339c8fa29..dc0d2014ff9b 100644 --- a/fs/bcachefs/recovery_passes.h +++ b/fs/bcachefs/recovery_passes.h @@ -3,16 +3,32 @@ extern const char * const bch2_recovery_passes[]; +extern const struct bch_sb_field_ops bch_sb_field_ops_recovery_passes; + u64 bch2_recovery_passes_to_stable(u64 v); u64 bch2_recovery_passes_from_stable(u64 v); u64 bch2_fsck_recovery_passes(void); -int bch2_run_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass); -int bch2_run_explicit_recovery_pass_persistent_locked(struct bch_fs *, enum bch_recovery_pass); -int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *, enum bch_recovery_pass); +enum bch_run_recovery_pass_flags { + RUN_RECOVERY_PASS_nopersistent = BIT(0), + RUN_RECOVERY_PASS_ratelimit = BIT(1), +}; + +int bch2_run_print_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass); + +int __bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *, + enum bch_recovery_pass, + enum bch_run_recovery_pass_flags); +int bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *, + enum bch_recovery_pass, + enum bch_run_recovery_pass_flags); + +int bch2_run_online_recovery_passes(struct bch_fs *, u64); +int bch2_run_recovery_passes(struct bch_fs *, enum bch_recovery_pass); + +void bch2_recovery_pass_status_to_text(struct printbuf *, struct bch_fs *); -int bch2_run_online_recovery_passes(struct bch_fs *); -int bch2_run_recovery_passes(struct bch_fs *); +void bch2_fs_recovery_passes_init(struct bch_fs *); #endif /* _BCACHEFS_RECOVERY_PASSES_H */ diff --git a/fs/bcachefs/recovery_passes_format.h b/fs/bcachefs/recovery_passes_format.h new file mode 100644 index 000000000000..c434eafbca19 --- /dev/null +++ b/fs/bcachefs/recovery_passes_format.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_RECOVERY_PASSES_FORMAT_H +#define _BCACHEFS_RECOVERY_PASSES_FORMAT_H + +#define PASS_SILENT BIT(0) +#define PASS_FSCK BIT(1) +#define PASS_UNCLEAN BIT(2) +#define PASS_ALWAYS BIT(3) +#define PASS_ONLINE BIT(4) +#define PASS_ALLOC BIT(5) +#define PASS_FSCK_ALLOC (PASS_FSCK|PASS_ALLOC) + +#ifdef CONFIG_BCACHEFS_DEBUG +#define PASS_FSCK_DEBUG BIT(1) +#else +#define PASS_FSCK_DEBUG 0 +#endif + +/* + * Passes may be reordered, but the second field is a persistent identifier and + * must never change: + */ +#define BCH_RECOVERY_PASSES() \ + x(recovery_pass_empty, 41, PASS_SILENT) \ + x(scan_for_btree_nodes, 37, 0) \ + x(check_topology, 4, 0) \ + x(accounting_read, 39, PASS_ALWAYS) \ + x(alloc_read, 0, PASS_ALWAYS) \ + x(stripes_read, 1, 0) \ + x(initialize_subvolumes, 2, 0) \ + x(snapshots_read, 3, PASS_ALWAYS) \ + x(check_allocations, 5, PASS_FSCK_ALLOC) \ + x(trans_mark_dev_sbs, 6, PASS_ALWAYS|PASS_SILENT|PASS_ALLOC) \ + x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT|PASS_ALLOC) \ + x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \ + x(journal_replay, 9, PASS_ALWAYS) \ + x(check_alloc_info, 10, PASS_ONLINE|PASS_FSCK_ALLOC) \ + x(check_lrus, 11, PASS_ONLINE|PASS_FSCK_ALLOC) \ + x(check_btree_backpointers, 12, PASS_ONLINE|PASS_FSCK_ALLOC) \ + x(check_backpointers_to_extents, 13, PASS_ONLINE|PASS_FSCK_DEBUG) \ + x(check_extents_to_backpointers, 14, PASS_ONLINE|PASS_FSCK_ALLOC) \ + x(check_alloc_to_lru_refs, 15, PASS_ONLINE|PASS_FSCK_ALLOC) \ + x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \ + x(bucket_gens_init, 17, 0) \ + x(reconstruct_snapshots, 38, 0) \ + x(check_snapshot_trees, 18, PASS_ONLINE|PASS_FSCK) \ + x(check_snapshots, 19, PASS_ONLINE|PASS_FSCK) \ + x(check_subvols, 20, PASS_ONLINE|PASS_FSCK) \ + x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \ + x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \ + x(fs_upgrade_for_subvolumes, 22, 0) \ + x(check_inodes, 24, PASS_FSCK) \ + x(check_extents, 25, PASS_FSCK) \ + x(check_indirect_extents, 26, PASS_ONLINE|PASS_FSCK) \ + x(check_dirents, 27, PASS_FSCK) \ + x(check_xattrs, 28, PASS_FSCK) \ + x(check_root, 29, PASS_ONLINE|PASS_FSCK) \ + x(check_unreachable_inodes, 40, PASS_FSCK) \ + x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \ + x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \ + x(check_nlinks, 31, PASS_FSCK) \ + x(check_rebalance_work, 43, PASS_ONLINE|PASS_FSCK) \ + x(resume_logged_ops, 23, PASS_ALWAYS) \ + x(delete_dead_inodes, 32, PASS_ALWAYS) \ + x(fix_reflink_p, 33, 0) \ + x(set_fs_needs_rebalance, 34, 0) \ + x(lookup_root_inode, 42, PASS_ALWAYS|PASS_SILENT) + +/* We normally enumerate recovery passes in the order we run them: */ +enum bch_recovery_pass { +#define x(n, id, when) BCH_RECOVERY_PASS_##n, + BCH_RECOVERY_PASSES() +#undef x + BCH_RECOVERY_PASS_NR +}; + +/* But we also need stable identifiers that can be used in the superblock */ +enum bch_recovery_pass_stable { +#define x(n, id, when) BCH_RECOVERY_PASS_STABLE_##n = id, + BCH_RECOVERY_PASSES() +#undef x +}; + +struct recovery_pass_entry { + __le64 last_run; + __le32 last_runtime; + __le32 flags; +}; + +struct bch_sb_field_recovery_passes { + struct bch_sb_field field; + struct recovery_pass_entry start[]; +}; + +static inline unsigned +recovery_passes_nr_entries(struct bch_sb_field_recovery_passes *r) +{ + return r + ? ((vstruct_end(&r->field) - (void *) &r->start[0]) / + sizeof(struct recovery_pass_entry)) + : 0; +} + +#endif /* _BCACHEFS_RECOVERY_PASSES_FORMAT_H */ diff --git a/fs/bcachefs/recovery_passes_types.h b/fs/bcachefs/recovery_passes_types.h index e89b9c783285..aa9526938cc3 100644 --- a/fs/bcachefs/recovery_passes_types.h +++ b/fs/bcachefs/recovery_passes_types.h @@ -2,79 +2,26 @@ #ifndef _BCACHEFS_RECOVERY_PASSES_TYPES_H #define _BCACHEFS_RECOVERY_PASSES_TYPES_H -#define PASS_SILENT BIT(0) -#define PASS_FSCK BIT(1) -#define PASS_UNCLEAN BIT(2) -#define PASS_ALWAYS BIT(3) -#define PASS_ONLINE BIT(4) - -#ifdef CONFIG_BCACHEFS_DEBUG -#define PASS_FSCK_DEBUG BIT(1) -#else -#define PASS_FSCK_DEBUG 0 -#endif - -/* - * Passes may be reordered, but the second field is a persistent identifier and - * must never change: - */ -#define BCH_RECOVERY_PASSES() \ - x(recovery_pass_empty, 41, PASS_SILENT) \ - x(scan_for_btree_nodes, 37, 0) \ - x(check_topology, 4, 0) \ - x(accounting_read, 39, PASS_ALWAYS) \ - x(alloc_read, 0, PASS_ALWAYS) \ - x(stripes_read, 1, 0) \ - x(initialize_subvolumes, 2, 0) \ - x(snapshots_read, 3, PASS_ALWAYS) \ - x(check_allocations, 5, PASS_FSCK) \ - x(trans_mark_dev_sbs, 6, PASS_ALWAYS|PASS_SILENT) \ - x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT) \ - x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \ - x(journal_replay, 9, PASS_ALWAYS) \ - x(check_alloc_info, 10, PASS_ONLINE|PASS_FSCK) \ - x(check_lrus, 11, PASS_ONLINE|PASS_FSCK) \ - x(check_btree_backpointers, 12, PASS_ONLINE|PASS_FSCK) \ - x(check_backpointers_to_extents, 13, PASS_ONLINE|PASS_FSCK_DEBUG) \ - x(check_extents_to_backpointers, 14, PASS_ONLINE|PASS_FSCK) \ - x(check_alloc_to_lru_refs, 15, PASS_ONLINE|PASS_FSCK) \ - x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \ - x(bucket_gens_init, 17, 0) \ - x(reconstruct_snapshots, 38, 0) \ - x(check_snapshot_trees, 18, PASS_ONLINE|PASS_FSCK) \ - x(check_snapshots, 19, PASS_ONLINE|PASS_FSCK) \ - x(check_subvols, 20, PASS_ONLINE|PASS_FSCK) \ - x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \ - x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \ - x(fs_upgrade_for_subvolumes, 22, 0) \ - x(check_inodes, 24, PASS_FSCK) \ - x(check_extents, 25, PASS_FSCK) \ - x(check_indirect_extents, 26, PASS_ONLINE|PASS_FSCK) \ - x(check_dirents, 27, PASS_FSCK) \ - x(check_xattrs, 28, PASS_FSCK) \ - x(check_root, 29, PASS_ONLINE|PASS_FSCK) \ - x(check_unreachable_inodes, 40, PASS_FSCK) \ - x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \ - x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \ - x(check_nlinks, 31, PASS_FSCK) \ - x(resume_logged_ops, 23, PASS_ALWAYS) \ - x(delete_dead_inodes, 32, PASS_ALWAYS) \ - x(fix_reflink_p, 33, 0) \ - x(set_fs_needs_rebalance, 34, 0) - -/* We normally enumerate recovery passes in the order we run them: */ -enum bch_recovery_pass { -#define x(n, id, when) BCH_RECOVERY_PASS_##n, - BCH_RECOVERY_PASSES() -#undef x - BCH_RECOVERY_PASS_NR -}; - -/* But we also need stable identifiers that can be used in the superblock */ -enum bch_recovery_pass_stable { -#define x(n, id, when) BCH_RECOVERY_PASS_STABLE_##n = id, - BCH_RECOVERY_PASSES() -#undef x +struct bch_fs_recovery { + /* + * Two different uses: + * "Has this fsck pass?" - i.e. should this type of error be an + * emergency read-only + * And, in certain situations fsck will rewind to an earlier pass: used + * for signaling to the toplevel code which pass we want to run now. + */ + enum bch_recovery_pass curr_pass; + enum bch_recovery_pass next_pass; + /* never rewinds version of curr_pass */ + enum bch_recovery_pass pass_done; + u64 passes_to_run; + /* bitmask of recovery passes that we actually ran */ + u64 passes_complete; + u64 passes_failing; + u64 passes_ratelimiting; + spinlock_t lock; + struct semaphore run_lock; + struct work_struct work; }; #endif /* _BCACHEFS_RECOVERY_PASSES_TYPES_H */ diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c index 710178e3da4c..3a13dbcab6ba 100644 --- a/fs/bcachefs/reflink.c +++ b/fs/bcachefs/reflink.c @@ -3,6 +3,7 @@ #include "bkey_buf.h" #include "btree_update.h" #include "buckets.h" +#include "enumerated_ref.h" #include "error.h" #include "extents.h" #include "inode.h" @@ -610,7 +611,7 @@ s64 bch2_remap_range(struct bch_fs *c, !bch2_request_incompat_feature(c, bcachefs_metadata_version_reflink_p_may_update_opts); int ret = 0, ret2 = 0; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_reflink)) return -BCH_ERR_erofs_no_writes; bch2_check_set_feature(c, BCH_FEATURE_reflink); @@ -761,7 +762,7 @@ err: bch2_bkey_buf_exit(&new_src, c); bch2_bkey_buf_exit(&new_dst, c); - bch2_write_ref_put(c, BCH_WRITE_REF_reflink); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_reflink); return dst_done ?: ret ?: ret2; } diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h index fa27ec59a647..7c0c9c842b4e 100644 --- a/fs/bcachefs/sb-counters_format.h +++ b/fs/bcachefs/sb-counters_format.h @@ -16,6 +16,7 @@ enum counters_flags { x(io_read_split, 33, TYPE_COUNTER) \ x(io_read_reuse_race, 34, TYPE_COUNTER) \ x(io_read_retry, 32, TYPE_COUNTER) \ + x(io_read_fail_and_poison, 82, TYPE_COUNTER) \ x(io_write, 1, TYPE_SECTORS) \ x(io_move, 2, TYPE_SECTORS) \ x(io_move_read, 35, TYPE_SECTORS) \ @@ -24,6 +25,7 @@ enum counters_flags { x(io_move_fail, 38, TYPE_COUNTER) \ x(io_move_write_fail, 82, TYPE_COUNTER) \ x(io_move_start_fail, 39, TYPE_COUNTER) \ + x(io_move_created_rebalance, 83, TYPE_COUNTER) \ x(bucket_invalidate, 3, TYPE_COUNTER) \ x(bucket_discard, 4, TYPE_COUNTER) \ x(bucket_discard_fast, 79, TYPE_COUNTER) \ diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c index badd0e17ada5..861fce1630f0 100644 --- a/fs/bcachefs/sb-downgrade.c +++ b/fs/bcachefs/sb-downgrade.c @@ -100,7 +100,11 @@ BCH_FSCK_ERR_ptr_to_missing_backpointer) \ x(stripe_backpointers, \ BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\ - BCH_FSCK_ERR_ptr_to_missing_backpointer) + BCH_FSCK_ERR_ptr_to_missing_backpointer) \ + x(inode_has_case_insensitive, \ + BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \ + BCH_FSCK_ERR_inode_has_case_insensitive_not_set, \ + BCH_FSCK_ERR_inode_parent_has_case_insensitive_not_set) #define DOWNGRADE_TABLE() \ x(bucket_stripe_sectors, \ @@ -374,6 +378,9 @@ int bch2_sb_downgrade_update(struct bch_fs *c) if (BCH_VERSION_MAJOR(src->version) != BCH_VERSION_MAJOR(le16_to_cpu(c->disk_sb.sb->version))) continue; + if (src->version < c->sb.version_incompat) + continue; + struct bch_sb_field_downgrade_entry *dst; unsigned bytes = sizeof(*dst) + sizeof(dst->errors[0]) * src->nr_errors; diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h index 3b69a924086f..0bfb151da9cf 100644 --- a/fs/bcachefs/sb-errors_format.h +++ b/fs/bcachefs/sb-errors_format.h @@ -209,6 +209,7 @@ enum bch_fsck_flags { x(subvol_to_missing_root, 188, 0) \ x(subvol_root_wrong_bi_subvol, 189, FSCK_AUTOFIX) \ x(bkey_in_missing_snapshot, 190, 0) \ + x(bkey_in_deleted_snapshot, 315, FSCK_AUTOFIX) \ x(inode_pos_inode_nonzero, 191, 0) \ x(inode_pos_blockdev_range, 192, 0) \ x(inode_alloc_cursor_inode_bad, 301, 0) \ @@ -216,6 +217,7 @@ enum bch_fsck_flags { x(inode_str_hash_invalid, 194, 0) \ x(inode_v3_fields_start_bad, 195, 0) \ x(inode_snapshot_mismatch, 196, 0) \ + x(snapshot_key_missing_inode_snapshot, 314, 0) \ x(inode_unlinked_but_clean, 197, 0) \ x(inode_unlinked_but_nlink_nonzero, 198, 0) \ x(inode_unlinked_and_not_open, 281, 0) \ @@ -237,6 +239,8 @@ enum bch_fsck_flags { x(inode_unreachable, 210, FSCK_AUTOFIX) \ x(inode_journal_seq_in_future, 299, FSCK_AUTOFIX) \ x(inode_i_sectors_underflow, 312, FSCK_AUTOFIX) \ + x(inode_has_case_insensitive_not_set, 316, FSCK_AUTOFIX) \ + x(inode_parent_has_case_insensitive_not_set, 317, FSCK_AUTOFIX) \ x(vfs_inode_i_blocks_underflow, 311, FSCK_AUTOFIX) \ x(vfs_inode_i_blocks_not_zero_at_truncate, 313, FSCK_AUTOFIX) \ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \ @@ -262,6 +266,7 @@ enum bch_fsck_flags { x(dirent_to_overwritten_inode, 302, 0) \ x(dirent_to_missing_subvol, 230, 0) \ x(dirent_to_itself, 231, 0) \ + x(dirent_casefold_mismatch, 318, FSCK_AUTOFIX) \ x(quota_type_invalid, 232, 0) \ x(xattr_val_size_too_small, 233, 0) \ x(xattr_val_size_too_big, 234, 0) \ @@ -301,6 +306,7 @@ enum bch_fsck_flags { x(btree_ptr_v2_written_0, 268, 0) \ x(subvol_snapshot_bad, 269, 0) \ x(subvol_inode_bad, 270, 0) \ + x(subvol_missing, 308, FSCK_AUTOFIX) \ x(alloc_key_stripe_sectors_wrong, 271, FSCK_AUTOFIX) \ x(accounting_mismatch, 272, FSCK_AUTOFIX) \ x(accounting_replicas_not_marked, 273, 0) \ @@ -322,7 +328,7 @@ enum bch_fsck_flags { x(dirent_stray_data_after_cf_name, 305, 0) \ x(rebalance_work_incorrectly_set, 309, FSCK_AUTOFIX) \ x(rebalance_work_incorrectly_unset, 310, FSCK_AUTOFIX) \ - x(MAX, 314, 0) + x(MAX, 319, 0) enum bch_sb_error_id { #define x(t, n, ...) BCH_FSCK_ERR_##t = n, diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c index 72779912939b..3398906660a5 100644 --- a/fs/bcachefs/sb-members.c +++ b/fs/bcachefs/sb-members.c @@ -5,11 +5,31 @@ #include "disk_groups.h" #include "error.h" #include "opts.h" +#include "recovery_passes.h" #include "replicas.h" #include "sb-members.h" #include "super-io.h" -void bch2_dev_missing(struct bch_fs *c, unsigned dev) +int bch2_dev_missing_bkey(struct bch_fs *c, struct bkey_s_c k, unsigned dev) +{ + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "pointer to nonexistent device %u in key\n", dev); + bch2_bkey_val_to_text(&buf, c, k); + + bool print = bch2_count_fsck_err(c, ptr_to_invalid_device, &buf); + + int ret = bch2_run_explicit_recovery_pass(c, &buf, + BCH_RECOVERY_PASS_check_allocations, 0); + + if (print) + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + return ret; +} + +void bch2_dev_missing_atomic(struct bch_fs *c, unsigned dev) { if (dev != BCH_SB_MEMBER_INVALID) bch2_fs_inconsistent(c, "pointer to nonexistent device %u", dev); @@ -119,6 +139,11 @@ int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb) struct bch_sb_field_members_v1 *mi1; struct bch_sb_field_members_v2 *mi2; + if (BCH_SB_VERSION_INCOMPAT(disk_sb->sb) > bcachefs_metadata_version_extent_flags) { + bch2_sb_field_resize(disk_sb, members_v1, 0); + return 0; + } + mi1 = bch2_sb_field_resize(disk_sb, members_v1, DIV_ROUND_UP(sizeof(*mi1) + BCH_MEMBER_V1_BYTES * disk_sb->sb->nr_devices, sizeof(u64))); @@ -170,6 +195,12 @@ static int validate_member(struct printbuf *err, return -BCH_ERR_invalid_sb_members; } + if (BCH_MEMBER_FREESPACE_INITIALIZED(&m) && + sb->features[0] & cpu_to_le64(BIT_ULL(BCH_FEATURE_no_alloc_info))) { + prt_printf(err, "device %u: freespace initialized but fs has no alloc info", i); + return -BCH_ERR_invalid_sb_members; + } + return 0; } @@ -191,17 +222,11 @@ static void member_to_text(struct printbuf *out, printbuf_indent_add(out, 2); prt_printf(out, "Label:\t"); - if (BCH_MEMBER_GROUP(&m)) { - unsigned idx = BCH_MEMBER_GROUP(&m) - 1; - - if (idx < disk_groups_nr(gi)) - prt_printf(out, "%s (%u)", - gi->entries[idx].label, idx); - else - prt_printf(out, "(bad disk labels section)"); - } else { + if (BCH_MEMBER_GROUP(&m)) + bch2_disk_path_to_text_sb(out, sb, + BCH_MEMBER_GROUP(&m) - 1); + else prt_printf(out, "(none)"); - } prt_newline(out); prt_printf(out, "UUID:\t"); @@ -268,6 +293,7 @@ static void member_to_text(struct printbuf *out, prt_printf(out, "Discard:\t%llu\n", BCH_MEMBER_DISCARD(&m)); prt_printf(out, "Freespace initialized:\t%llu\n", BCH_MEMBER_FREESPACE_INITIALIZED(&m)); + prt_printf(out, "Resize on mount:\t%llu\n", BCH_MEMBER_RESIZE_ON_MOUNT(&m)); printbuf_indent_sub(out, 2); } @@ -493,6 +519,7 @@ int bch2_sb_member_alloc(struct bch_fs *c) unsigned u64s; int best = -1; u64 best_last_mount = 0; + unsigned nr_deleted = 0; if (dev_idx < BCH_SB_MEMBERS_MAX) goto have_slot; @@ -503,7 +530,10 @@ int bch2_sb_member_alloc(struct bch_fs *c) continue; struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx); - if (bch2_member_alive(&m)) + + nr_deleted += uuid_equal(&m.uuid, &BCH_SB_MEMBER_DELETED_UUID); + + if (!bch2_is_zero(&m.uuid, sizeof(m.uuid))) continue; u64 last_mount = le64_to_cpu(m.last_mount); @@ -517,6 +547,10 @@ int bch2_sb_member_alloc(struct bch_fs *c) goto have_slot; } + if (nr_deleted) + bch_err(c, "unable to allocate new member, but have %u deleted: run fsck", + nr_deleted); + return -BCH_ERR_ENOSPC_sb_members; have_slot: nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices); @@ -532,3 +566,22 @@ have_slot: c->disk_sb.sb->nr_devices = nr_devices; return dev_idx; } + +void bch2_sb_members_clean_deleted(struct bch_fs *c) +{ + mutex_lock(&c->sb_lock); + bool write_sb = false; + + for (unsigned i = 0; i < c->sb.nr_devices; i++) { + struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, i); + + if (uuid_equal(&m->uuid, &BCH_SB_MEMBER_DELETED_UUID)) { + memset(&m->uuid, 0, sizeof(m->uuid)); + write_sb = true; + } + } + + if (write_sb) + bch2_write_super(c); + mutex_unlock(&c->sb_lock); +} diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h index 42786657522c..6bd9b86aee5b 100644 --- a/fs/bcachefs/sb-members.h +++ b/fs/bcachefs/sb-members.h @@ -4,6 +4,7 @@ #include "darray.h" #include "bkey_types.h" +#include "enumerated_ref.h" extern char * const bch2_member_error_strs[]; @@ -20,7 +21,7 @@ struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i); static inline bool bch2_dev_is_online(struct bch_dev *ca) { - return !percpu_ref_is_zero(&ca->io_ref[READ]); + return !enumerated_ref_is_zero(&ca->io_ref[READ]); } static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned); @@ -104,6 +105,12 @@ static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev * for (struct bch_dev *_ca = NULL; \ (_ca = __bch2_next_dev((_c), _ca, (_mask)));) +#define for_each_online_member_rcu(_c, _ca) \ + for_each_member_device_rcu(_c, _ca, &(_c)->online_devs) + +#define for_each_rw_member_rcu(_c, _ca) \ + for_each_member_device_rcu(_c, _ca, &(_c)->rw_devs[BCH_DATA_free]) + static inline void bch2_dev_get(struct bch_dev *ca) { #ifdef CONFIG_BCACHEFS_DEBUG @@ -157,33 +164,33 @@ static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, struct bch_dev *ca, unsigned state_mask, - int rw) + int rw, unsigned ref_idx) { rcu_read_lock(); if (ca) - percpu_ref_put(&ca->io_ref[rw]); + enumerated_ref_put(&ca->io_ref[rw], ref_idx); while ((ca = __bch2_next_dev(c, ca, NULL)) && (!((1 << ca->mi.state) & state_mask) || - !percpu_ref_tryget(&ca->io_ref[rw]))) + !enumerated_ref_tryget(&ca->io_ref[rw], ref_idx))) ; rcu_read_unlock(); return ca; } -#define __for_each_online_member(_c, _ca, state_mask, rw) \ +#define __for_each_online_member(_c, _ca, state_mask, rw, ref_idx) \ for (struct bch_dev *_ca = NULL; \ - (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw));) + (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw, ref_idx));) -#define for_each_online_member(c, ca) \ - __for_each_online_member(c, ca, ~0, READ) +#define for_each_online_member(c, ca, ref_idx) \ + __for_each_online_member(c, ca, ~0, READ, ref_idx) -#define for_each_rw_member(c, ca) \ - __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE) +#define for_each_rw_member(c, ca, ref_idx) \ + __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE, ref_idx) -#define for_each_readable_member(c, ca) \ - __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ) +#define for_each_readable_member(c, ca, ref_idx) \ + __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ, ref_idx) static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev) { @@ -218,13 +225,15 @@ static inline struct bch_dev *bch2_dev_rcu_noerror(struct bch_fs *c, unsigned de : NULL; } -void bch2_dev_missing(struct bch_fs *, unsigned); +int bch2_dev_missing_bkey(struct bch_fs *, struct bkey_s_c, unsigned); + +void bch2_dev_missing_atomic(struct bch_fs *, unsigned); static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev) { struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev); if (unlikely(!ca)) - bch2_dev_missing(c, dev); + bch2_dev_missing_atomic(c, dev); return ca; } @@ -242,7 +251,7 @@ static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev) { struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev); if (unlikely(!ca)) - bch2_dev_missing(c, dev); + bch2_dev_missing_atomic(c, dev); return ca; } @@ -285,13 +294,14 @@ static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev return bch2_dev_tryget(c, dev_idx); } -static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw) +static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, + int rw, unsigned ref_idx) { might_sleep(); rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu(c, dev); - if (ca && !percpu_ref_tryget(&ca->io_ref[rw])) + if (ca && !enumerated_ref_tryget(&ca->io_ref[rw], ref_idx)) ca = NULL; rcu_read_unlock(); @@ -301,27 +311,17 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, return ca; if (ca) - percpu_ref_put(&ca->io_ref[rw]); + enumerated_ref_put(&ca->io_ref[rw], ref_idx); return NULL; } -/* XXX kill, move to struct bch_fs */ -static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) -{ - struct bch_devs_mask devs; - - memset(&devs, 0, sizeof(devs)); - for_each_online_member(c, ca) - __set_bit(ca->dev_idx, devs.d); - return devs; -} - extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1; extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2; static inline bool bch2_member_alive(struct bch_member *m) { - return !bch2_is_zero(&m->uuid, sizeof(m->uuid)); + return !bch2_is_zero(&m->uuid, sizeof(m->uuid)) && + !uuid_equal(&m->uuid, &BCH_SB_MEMBER_DELETED_UUID); } static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev) @@ -351,6 +351,7 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) ? BCH_MEMBER_DURABILITY(mi) - 1 : 1, .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi), + .resize_on_mount = BCH_MEMBER_RESIZE_ON_MOUNT(mi), .valid = bch2_member_alive(mi), .btree_bitmap_shift = mi->btree_bitmap_shift, .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap), @@ -381,5 +382,6 @@ bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c); void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c); int bch2_sb_member_alloc(struct bch_fs *); +void bch2_sb_members_clean_deleted(struct bch_fs *); #endif /* _BCACHEFS_SB_MEMBERS_H */ diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h index 3affec823b3f..fb72ad730518 100644 --- a/fs/bcachefs/sb-members_format.h +++ b/fs/bcachefs/sb-members_format.h @@ -13,6 +13,10 @@ */ #define BCH_SB_MEMBER_INVALID 255 +#define BCH_SB_MEMBER_DELETED_UUID \ + UUID_INIT(0xffffffff, 0xffff, 0xffff, \ + 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef) + #define BCH_MIN_NR_NBUCKETS (1 << 6) #define BCH_IOPS_MEASUREMENTS() \ @@ -88,6 +92,8 @@ LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28) LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30) LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED, struct bch_member, flags, 30, 31) +LE64_BITMASK(BCH_MEMBER_RESIZE_ON_MOUNT, + struct bch_member, flags, 31, 32) #if 0 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20); diff --git a/fs/bcachefs/sb-members_types.h b/fs/bcachefs/sb-members_types.h index c0eda888fe39..d6443e186872 100644 --- a/fs/bcachefs/sb-members_types.h +++ b/fs/bcachefs/sb-members_types.h @@ -13,6 +13,7 @@ struct bch_member_cpu { u8 data_allowed; u8 durability; u8 freespace_initialized; + u8 resize_on_mount; u8 valid; u8 btree_bitmap_shift; u64 btree_allocated_bitmap; diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c index fec569c7deb1..00d62d1190ef 100644 --- a/fs/bcachefs/snapshot.c +++ b/fs/bcachefs/snapshot.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "bbpos.h" #include "bkey_buf.h" #include "btree_cache.h" #include "btree_key_cache.h" #include "btree_update.h" #include "buckets.h" +#include "enumerated_ref.h" #include "errcode.h" #include "error.h" #include "fs.h" @@ -141,7 +143,7 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor) rcu_read_lock(); struct snapshot_table *t = rcu_dereference(c->snapshots); - if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) { + if (unlikely(c->recovery.pass_done < BCH_RECOVERY_PASS_check_snapshots)) { ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor); goto out; } @@ -209,9 +211,14 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c, { struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k); - prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u", - BCH_SNAPSHOT_SUBVOL(s.v), - BCH_SNAPSHOT_DELETED(s.v), + if (BCH_SNAPSHOT_SUBVOL(s.v)) + prt_str(out, "subvol "); + if (BCH_SNAPSHOT_WILL_DELETE(s.v)) + prt_str(out, "will_delete "); + if (BCH_SNAPSHOT_DELETED(s.v)) + prt_str(out, "deleted "); + + prt_printf(out, "parent %10u children %10u %10u subvol %u tree %u", le32_to_cpu(s.v->parent), le32_to_cpu(s.v->children[0]), le32_to_cpu(s.v->children[1]), @@ -281,6 +288,16 @@ fsck_err: return ret; } +static int bch2_snapshot_table_make_room(struct bch_fs *c, u32 id) +{ + mutex_lock(&c->snapshot_table_lock); + int ret = snapshot_t_mut(c, id) + ? 0 + : -BCH_ERR_ENOMEM_mark_snapshot; + mutex_unlock(&c->snapshot_table_lock); + return ret; +} + static int __bch2_mark_snapshot(struct btree_trans *trans, enum btree_id btree, unsigned level, struct bkey_s_c old, struct bkey_s_c new, @@ -302,7 +319,9 @@ static int __bch2_mark_snapshot(struct btree_trans *trans, if (new.k->type == KEY_TYPE_snapshot) { struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new); - t->live = true; + t->state = !BCH_SNAPSHOT_DELETED(s.v) + ? SNAPSHOT_ID_live + : SNAPSHOT_ID_deleted; t->parent = le32_to_cpu(s.v->parent); t->children[0] = le32_to_cpu(s.v->children[0]); t->children[1] = le32_to_cpu(s.v->children[1]); @@ -327,9 +346,9 @@ static int __bch2_mark_snapshot(struct btree_trans *trans, parent - id - 1 < IS_ANCESTOR_BITMAP) __set_bit(parent - id - 1, t->is_ancestor); - if (BCH_SNAPSHOT_DELETED(s.v)) { + if (BCH_SNAPSHOT_WILL_DELETE(s.v)) { set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); - if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots) + if (c->recovery.pass_done > BCH_RECOVERY_PASS_delete_dead_snapshots) bch2_delete_dead_snapshots_async(c); } } else { @@ -390,22 +409,31 @@ static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id) return 0; } -u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root) +u32 bch2_snapshot_oldest_subvol(struct bch_fs *c, u32 snapshot_root, + snapshot_id_list *skip) { - u32 id = snapshot_root; - u32 subvol = 0, s; - + u32 id, subvol = 0, s; +retry: + id = snapshot_root; rcu_read_lock(); while (id && bch2_snapshot_exists(c, id)) { - s = snapshot_t(c, id)->subvol; - - if (s && (!subvol || s < subvol)) - subvol = s; + if (!(skip && snapshot_list_has_id(skip, id))) { + s = snapshot_t(c, id)->subvol; + if (s && (!subvol || s < subvol)) + subvol = s; + } id = bch2_snapshot_tree_next(c, id); + if (id == snapshot_root) + break; } rcu_read_unlock(); + if (!subvol && skip) { + skip = NULL; + goto retry; + } + return subvol; } @@ -437,7 +465,7 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans, if (!ret && !found) { struct bkey_i_subvolume *u; - *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root); + *subvol_id = bch2_snapshot_oldest_subvol(c, snapshot_root, NULL); u = bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_subvolumes, POS(0, *subvol_id), @@ -654,7 +682,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans, u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot); ret = PTR_ERR_OR_ZERO(u) ?: bch2_snapshot_tree_create(trans, root_id, - bch2_snapshot_tree_oldest_subvol(c, root_id), + bch2_snapshot_oldest_subvol(c, root_id, NULL), &tree_id); if (ret) goto err; @@ -699,6 +727,9 @@ static int check_snapshot(struct btree_trans *trans, memset(&s, 0, sizeof(s)); memcpy(&s, k.v, min(sizeof(s), bkey_val_bytes(k.k))); + if (BCH_SNAPSHOT_DELETED(&s)) + return 0; + id = le32_to_cpu(s.parent); if (id) { ret = bch2_snapshot_lookup(trans, id, &v); @@ -736,7 +767,7 @@ static int check_snapshot(struct btree_trans *trans, } bool should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) && - !BCH_SNAPSHOT_DELETED(&s); + !BCH_SNAPSHOT_WILL_DELETE(&s); if (should_have_subvol) { id = le32_to_cpu(s.subvol); @@ -887,9 +918,8 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) } bch2_trans_iter_exit(trans, &iter); - return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?: - bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, - bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0); + return bch2_snapshot_table_make_room(c, id) ?: + bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0); } /* Figure out which snapshot nodes belong in the same tree: */ @@ -987,7 +1017,7 @@ int bch2_reconstruct_snapshots(struct bch_fs *c) snapshot_id_list_to_text(&buf, t); darray_for_each(*t, id) { - if (fsck_err_on(!bch2_snapshot_exists(c, *id), + if (fsck_err_on(bch2_snapshot_id_state(c, *id) == SNAPSHOT_ID_empty, trans, snapshot_node_missing, "snapshot node %u from tree %s missing, recreate?", *id, buf.buf)) { if (t->nr > 1) { @@ -1012,22 +1042,38 @@ err: return ret; } -int bch2_check_key_has_snapshot(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k) +int __bch2_check_key_has_snapshot(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; int ret = 0; + enum snapshot_id_state state = bch2_snapshot_id_state(c, k.k->p.snapshot); + + /* Snapshot was definitively deleted, this error is marked autofix */ + if (fsck_err_on(state == SNAPSHOT_ID_deleted, + trans, bkey_in_deleted_snapshot, + "key in deleted snapshot %s, delete?", + (bch2_btree_id_to_text(&buf, iter->btree_id), + prt_char(&buf, ' '), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + ret = bch2_btree_delete_at(trans, iter, + BTREE_UPDATE_internal_snapshot_node) ?: 1; - if (fsck_err_on(!bch2_snapshot_exists(c, k.k->p.snapshot), + /* + * Snapshot missing: we should have caught this with btree_lost_data and + * kicked off reconstruct_snapshots, so if we end up here we have no + * idea what happened: + */ + if (fsck_err_on(state == SNAPSHOT_ID_empty, trans, bkey_in_missing_snapshot, "key in missing snapshot %s, delete?", (bch2_btree_id_to_text(&buf, iter->btree_id), prt_char(&buf, ' '), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) ret = bch2_btree_delete_at(trans, iter, - BTREE_UPDATE_internal_snapshot_node) ?: 1; + BTREE_UPDATE_internal_snapshot_node) ?: 1; fsck_err: printbuf_exit(&buf); return ret; @@ -1051,10 +1097,10 @@ int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id) } /* already deleted? */ - if (BCH_SNAPSHOT_DELETED(&s->v)) + if (BCH_SNAPSHOT_WILL_DELETE(&s->v)) goto err; - SET_BCH_SNAPSHOT_DELETED(&s->v, true); + SET_BCH_SNAPSHOT_WILL_DELETE(&s->v, true); SET_BCH_SNAPSHOT_SUBVOL(&s->v, false); s->v.subvol = 0; err: @@ -1074,24 +1120,25 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) struct btree_iter iter, p_iter = {}; struct btree_iter c_iter = {}; struct btree_iter tree_iter = {}; - struct bkey_s_c_snapshot s; u32 parent_id, child_id; unsigned i; int ret = 0; - s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id), - BTREE_ITER_intent, snapshot); - ret = bkey_err(s); + struct bkey_i_snapshot *s = + bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id), + BTREE_ITER_intent, snapshot); + ret = PTR_ERR_OR_ZERO(s); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, "missing snapshot %u", id); if (ret) goto err; - BUG_ON(s.v->children[1]); + BUG_ON(BCH_SNAPSHOT_DELETED(&s->v)); + BUG_ON(s->v.children[1]); - parent_id = le32_to_cpu(s.v->parent); - child_id = le32_to_cpu(s.v->children[0]); + parent_id = le32_to_cpu(s->v.parent); + child_id = le32_to_cpu(s->v.children[0]); if (parent_id) { struct bkey_i_snapshot *parent; @@ -1149,24 +1196,38 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) */ struct bkey_i_snapshot_tree *s_t; - BUG_ON(s.v->children[1]); + BUG_ON(s->v.children[1]); s_t = bch2_bkey_get_mut_typed(trans, &tree_iter, - BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)), + BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s->v.tree)), 0, snapshot_tree); ret = PTR_ERR_OR_ZERO(s_t); if (ret) goto err; - if (s.v->children[0]) { - s_t->v.root_snapshot = s.v->children[0]; + if (s->v.children[0]) { + s_t->v.root_snapshot = s->v.children[0]; } else { s_t->k.type = KEY_TYPE_deleted; set_bkey_val_u64s(&s_t->k, 0); } } - ret = bch2_btree_delete_at(trans, &iter, 0); + if (!bch2_request_incompat_feature(c, bcachefs_metadata_version_snapshot_deletion_v2)) { + SET_BCH_SNAPSHOT_DELETED(&s->v, true); + s->v.parent = 0; + s->v.children[0] = 0; + s->v.children[1] = 0; + s->v.subvol = 0; + s->v.tree = 0; + s->v.depth = 0; + s->v.skip[0] = 0; + s->v.skip[1] = 0; + s->v.skip[2] = 0; + } else { + s->k.type = KEY_TYPE_deleted; + set_bkey_val_u64s(&s->k, 0); + } err: bch2_trans_iter_exit(trans, &tree_iter); bch2_trans_iter_exit(trans, &p_iter); @@ -1336,12 +1397,6 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent, * that key to snapshot leaf nodes, where we can mutate it */ -struct snapshot_interior_delete { - u32 id; - u32 live_child; -}; -typedef DARRAY(struct snapshot_interior_delete) interior_delete_list; - static inline u32 interior_delete_has_id(interior_delete_list *l, u32 id) { darray_for_each(*l, i) @@ -1375,28 +1430,34 @@ static unsigned __live_child(struct snapshot_table *t, u32 id, return 0; } -static unsigned live_child(struct bch_fs *c, u32 id, - snapshot_id_list *delete_leaves, - interior_delete_list *delete_interior) +static unsigned live_child(struct bch_fs *c, u32 id) { + struct snapshot_delete *d = &c->snapshot_delete; + rcu_read_lock(); u32 ret = __live_child(rcu_dereference(c->snapshots), id, - delete_leaves, delete_interior); + &d->delete_leaves, &d->delete_interior); rcu_read_unlock(); return ret; } +static bool snapshot_id_dying(struct snapshot_delete *d, unsigned id) +{ + return snapshot_list_has_id(&d->delete_leaves, id) || + interior_delete_has_id(&d->delete_interior, id) != 0; +} + static int delete_dead_snapshots_process_key(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_s_c k, - snapshot_id_list *delete_leaves, - interior_delete_list *delete_interior) + struct bkey_s_c k) { - if (snapshot_list_has_id(delete_leaves, k.k->p.snapshot)) + struct snapshot_delete *d = &trans->c->snapshot_delete; + + if (snapshot_list_has_id(&d->delete_leaves, k.k->p.snapshot)) return bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node); - u32 live_child = interior_delete_has_id(delete_interior, k.k->p.snapshot); + u32 live_child = interior_delete_has_id(&d->delete_interior, k.k->p.snapshot); if (live_child) { struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); int ret = PTR_ERR_OR_ZERO(new); @@ -1427,49 +1488,208 @@ static int delete_dead_snapshots_process_key(struct btree_trans *trans, return 0; } +static bool skip_unrelated_snapshot_tree(struct btree_trans *trans, struct btree_iter *iter, u64 *prev_inum) +{ + struct bch_fs *c = trans->c; + struct snapshot_delete *d = &c->snapshot_delete; + + u64 inum = iter->btree_id != BTREE_ID_inodes + ? iter->pos.inode + : iter->pos.offset; + + if (*prev_inum == inum) + return false; + + *prev_inum = inum; + + bool ret = !snapshot_list_has_id(&d->deleting_from_trees, + bch2_snapshot_tree(c, iter->pos.snapshot)); + if (unlikely(ret)) { + struct bpos pos = iter->pos; + pos.snapshot = 0; + if (iter->btree_id != BTREE_ID_inodes) + pos.offset = U64_MAX; + bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(pos)); + } + + return ret; +} + +static int delete_dead_snapshot_keys_v1(struct btree_trans *trans) +{ + struct bch_fs *c = trans->c; + struct snapshot_delete *d = &c->snapshot_delete; + + for (d->pos.btree = 0; d->pos.btree < BTREE_ID_NR; d->pos.btree++) { + struct disk_reservation res = { 0 }; + u64 prev_inum = 0; + + d->pos.pos = POS_MIN; + + if (!btree_type_has_snapshots(d->pos.btree)) + continue; + + int ret = for_each_btree_key_commit(trans, iter, + d->pos.btree, POS_MIN, + BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, + &res, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + d->pos.pos = iter.pos; + + if (skip_unrelated_snapshot_tree(trans, &iter, &prev_inum)) + continue; + + delete_dead_snapshots_process_key(trans, &iter, k); + })); + + bch2_disk_reservation_put(c, &res); + + if (ret) + return ret; + } + + return 0; +} + +static int delete_dead_snapshot_keys_range(struct btree_trans *trans, enum btree_id btree, + struct bpos start, struct bpos end) +{ + struct bch_fs *c = trans->c; + struct snapshot_delete *d = &c->snapshot_delete; + struct disk_reservation res = { 0 }; + + d->pos.btree = btree; + d->pos.pos = POS_MIN; + + int ret = for_each_btree_key_max_commit(trans, iter, + btree, start, end, + BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, + &res, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + d->pos.pos = iter.pos; + delete_dead_snapshots_process_key(trans, &iter, k); + })); + + bch2_disk_reservation_put(c, &res); + return ret; +} + +static int delete_dead_snapshot_keys_v2(struct btree_trans *trans) +{ + struct bch_fs *c = trans->c; + struct snapshot_delete *d = &c->snapshot_delete; + struct disk_reservation res = { 0 }; + u64 prev_inum = 0; + int ret = 0; + + struct btree_iter iter; + bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, POS_MIN, + BTREE_ITER_prefetch|BTREE_ITER_all_snapshots); + + while (1) { + struct bkey_s_c k; + ret = lockrestart_do(trans, + bkey_err(k = bch2_btree_iter_peek(trans, &iter))); + if (ret) + break; + + if (!k.k) + break; + + d->pos.btree = iter.btree_id; + d->pos.pos = iter.pos; + + if (skip_unrelated_snapshot_tree(trans, &iter, &prev_inum)) + continue; + + if (snapshot_id_dying(d, k.k->p.snapshot)) { + struct bpos start = POS(k.k->p.offset, 0); + struct bpos end = POS(k.k->p.offset, U64_MAX); + + ret = delete_dead_snapshot_keys_range(trans, BTREE_ID_extents, start, end) ?: + delete_dead_snapshot_keys_range(trans, BTREE_ID_dirents, start, end) ?: + delete_dead_snapshot_keys_range(trans, BTREE_ID_xattrs, start, end); + if (ret) + break; + + bch2_btree_iter_set_pos(trans, &iter, POS(0, k.k->p.offset + 1)); + } else { + bch2_btree_iter_advance(trans, &iter); + } + } + bch2_trans_iter_exit(trans, &iter); + + if (ret) + goto err; + + prev_inum = 0; + ret = for_each_btree_key_commit(trans, iter, + BTREE_ID_inodes, POS_MIN, + BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, + &res, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + d->pos.btree = iter.btree_id; + d->pos.pos = iter.pos; + + if (skip_unrelated_snapshot_tree(trans, &iter, &prev_inum)) + continue; + + delete_dead_snapshots_process_key(trans, &iter, k); + })); +err: + bch2_disk_reservation_put(c, &res); + return ret; +} + /* * For a given snapshot, if it doesn't have a subvolume that points to it, and * it doesn't have child snapshot nodes - it's now redundant and we can mark it * as deleted. */ -static int check_should_delete_snapshot(struct btree_trans *trans, struct bkey_s_c k, - snapshot_id_list *delete_leaves, - interior_delete_list *delete_interior) +static int check_should_delete_snapshot(struct btree_trans *trans, struct bkey_s_c k) { if (k.k->type != KEY_TYPE_snapshot) return 0; struct bch_fs *c = trans->c; + struct snapshot_delete *d = &c->snapshot_delete; struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k); unsigned live_children = 0; + int ret = 0; if (BCH_SNAPSHOT_SUBVOL(s.v)) return 0; + if (BCH_SNAPSHOT_DELETED(s.v)) + return 0; + + mutex_lock(&d->progress_lock); for (unsigned i = 0; i < 2; i++) { u32 child = le32_to_cpu(s.v->children[i]); live_children += child && - !snapshot_list_has_id(delete_leaves, child); + !snapshot_list_has_id(&d->delete_leaves, child); } + u32 tree = bch2_snapshot_tree(c, s.k->p.offset); + if (live_children == 0) { - return snapshot_list_add(c, delete_leaves, s.k->p.offset); + ret = snapshot_list_add_nodup(c, &d->deleting_from_trees, tree) ?: + snapshot_list_add(c, &d->delete_leaves, s.k->p.offset); } else if (live_children == 1) { - struct snapshot_interior_delete d = { + struct snapshot_interior_delete n = { .id = s.k->p.offset, - .live_child = live_child(c, s.k->p.offset, delete_leaves, delete_interior), + .live_child = live_child(c, s.k->p.offset), }; - if (!d.live_child) { - bch_err(c, "error finding live child of snapshot %u", d.id); - return -EINVAL; + if (!n.live_child) { + bch_err(c, "error finding live child of snapshot %u", n.id); + ret = -EINVAL; + } else { + ret = snapshot_list_add_nodup(c, &d->deleting_from_trees, tree) ?: + darray_push(&d->delete_interior, n); } - - return darray_push(delete_interior, d); - } else { - return 0; } + mutex_unlock(&d->progress_lock); + + return ret; } static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n, @@ -1498,6 +1718,9 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, struct bkey_i_snapshot *s; int ret; + if (!bch2_snapshot_exists(c, k.k->p.offset)) + return 0; + if (k.k->type != KEY_TYPE_snapshot) return 0; @@ -1545,39 +1768,56 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, return bch2_trans_update(trans, iter, &s->k_i, 0); } -int bch2_delete_dead_snapshots(struct bch_fs *c) +static void bch2_snapshot_delete_nodes_to_text(struct printbuf *out, struct snapshot_delete *d) { - if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) + prt_printf(out, "deleting from trees"); + darray_for_each(d->deleting_from_trees, i) + prt_printf(out, " %u", *i); + + prt_printf(out, "deleting leaves"); + darray_for_each(d->delete_leaves, i) + prt_printf(out, " %u", *i); + prt_newline(out); + + prt_printf(out, "interior"); + darray_for_each(d->delete_interior, i) + prt_printf(out, " %u->%u", i->id, i->live_child); + prt_newline(out); +} + +int __bch2_delete_dead_snapshots(struct bch_fs *c) +{ + struct snapshot_delete *d = &c->snapshot_delete; + int ret = 0; + + if (!mutex_trylock(&d->lock)) return 0; + if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) + goto out_unlock; + struct btree_trans *trans = bch2_trans_get(c); - snapshot_id_list delete_leaves = {}; - interior_delete_list delete_interior = {}; - int ret = 0; /* * For every snapshot node: If we have no live children and it's not * pointed to by a subvolume, delete it: */ + d->running = true; + d->pos = BBPOS_MIN; + ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, POS_MIN, 0, k, - check_should_delete_snapshot(trans, k, &delete_leaves, &delete_interior)); + check_should_delete_snapshot(trans, k)); if (!bch2_err_matches(ret, EROFS)) bch_err_msg(c, ret, "walking snapshots"); if (ret) goto err; - if (!delete_leaves.nr && !delete_interior.nr) + if (!d->delete_leaves.nr && !d->delete_interior.nr) goto err; { struct printbuf buf = PRINTBUF; - prt_printf(&buf, "deleting leaves"); - darray_for_each(delete_leaves, i) - prt_printf(&buf, " %u", *i); - - prt_printf(&buf, " interior"); - darray_for_each(delete_interior, i) - prt_printf(&buf, " %u->%u", i->id, i->live_child); + bch2_snapshot_delete_nodes_to_text(&buf, d); ret = commit_do(trans, NULL, NULL, 0, bch2_trans_log_msg(trans, &buf)); printbuf_exit(&buf); @@ -1585,29 +1825,15 @@ int bch2_delete_dead_snapshots(struct bch_fs *c) goto err; } - for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) { - struct disk_reservation res = { 0 }; - - if (!btree_type_has_snapshots(btree)) - continue; - - ret = for_each_btree_key_commit(trans, iter, - btree, POS_MIN, - BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, - &res, NULL, BCH_TRANS_COMMIT_no_enospc, - delete_dead_snapshots_process_key(trans, &iter, k, - &delete_leaves, - &delete_interior)); - - bch2_disk_reservation_put(c, &res); - - if (!bch2_err_matches(ret, EROFS)) - bch_err_msg(c, ret, "deleting keys from dying snapshots"); - if (ret) - goto err; - } + ret = !bch2_request_incompat_feature(c, bcachefs_metadata_version_snapshot_deletion_v2) + ? delete_dead_snapshot_keys_v2(trans) + : delete_dead_snapshot_keys_v1(trans); + if (!bch2_err_matches(ret, EROFS)) + bch_err_msg(c, ret, "deleting keys from dying snapshots"); + if (ret) + goto err; - darray_for_each(delete_leaves, i) { + darray_for_each(d->delete_leaves, i) { ret = commit_do(trans, NULL, NULL, 0, bch2_snapshot_node_delete(trans, *i)); if (!bch2_err_matches(ret, EROFS)) @@ -1624,11 +1850,11 @@ int bch2_delete_dead_snapshots(struct bch_fs *c) ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN, BTREE_ITER_intent, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &delete_interior)); + bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &d->delete_interior)); if (ret) goto err; - darray_for_each(delete_interior, i) { + darray_for_each(d->delete_interior, i) { ret = commit_do(trans, NULL, NULL, 0, bch2_snapshot_node_delete(trans, i->id)); if (!bch2_err_matches(ret, EROFS)) @@ -1637,33 +1863,66 @@ int bch2_delete_dead_snapshots(struct bch_fs *c) goto err; } err: - darray_exit(&delete_interior); - darray_exit(&delete_leaves); + mutex_lock(&d->progress_lock); + darray_exit(&d->deleting_from_trees); + darray_exit(&d->delete_interior); + darray_exit(&d->delete_leaves); + d->running = false; + mutex_unlock(&d->progress_lock); bch2_trans_put(trans); +out_unlock: + mutex_unlock(&d->lock); if (!bch2_err_matches(ret, EROFS)) bch_err_fn(c, ret); return ret; } +int bch2_delete_dead_snapshots(struct bch_fs *c) +{ + if (!c->opts.auto_snapshot_deletion) + return 0; + + return __bch2_delete_dead_snapshots(c); +} + void bch2_delete_dead_snapshots_work(struct work_struct *work) { - struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work); + struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete.work); set_worker_desc("bcachefs-delete-dead-snapshots/%s", c->name); bch2_delete_dead_snapshots(c); - bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_delete_dead_snapshots); } void bch2_delete_dead_snapshots_async(struct bch_fs *c) { - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots)) + if (!c->opts.auto_snapshot_deletion) + return; + + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_delete_dead_snapshots)) return; BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags)); - if (!queue_work(c->write_ref_wq, &c->snapshot_delete_work)) - bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); + if (!queue_work(c->write_ref_wq, &c->snapshot_delete.work)) + enumerated_ref_put(&c->writes, BCH_WRITE_REF_delete_dead_snapshots); +} + +void bch2_snapshot_delete_status_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct snapshot_delete *d = &c->snapshot_delete; + + if (!d->running) { + prt_str(out, "(not running)"); + return; + } + + mutex_lock(&d->progress_lock); + bch2_snapshot_delete_nodes_to_text(out, d); + + bch2_bbpos_to_text(out, d->pos); + mutex_unlock(&d->progress_lock); } int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, @@ -1704,7 +1963,7 @@ static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct return 0; struct bkey_s_c_snapshot snap = bkey_s_c_to_snapshot(k); - if (BCH_SNAPSHOT_DELETED(snap.v) || + if (BCH_SNAPSHOT_WILL_DELETE(snap.v) || interior_snapshot_needs_delete(snap)) set_bit(BCH_FS_need_delete_dead_snapshots, &trans->c->flags); @@ -1733,10 +1992,6 @@ int bch2_snapshots_read(struct bch_fs *c) BUG_ON(!test_bit(BCH_FS_new_fs, &c->flags) && test_bit(BCH_FS_may_go_rw, &c->flags)); - if (bch2_err_matches(ret, EIO) || - (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_snapshots))) - ret = bch2_run_explicit_recovery_pass_persistent(c, BCH_RECOVERY_PASS_reconstruct_snapshots); - return ret; } @@ -1744,3 +1999,11 @@ void bch2_fs_snapshots_exit(struct bch_fs *c) { kvfree(rcu_dereference_protected(c->snapshots, true)); } + +void bch2_fs_snapshots_init_early(struct bch_fs *c) +{ + INIT_WORK(&c->snapshot_delete.work, bch2_delete_dead_snapshots_work); + mutex_init(&c->snapshot_delete.lock); + mutex_init(&c->snapshot_delete.progress_lock); + mutex_init(&c->snapshots_unlinked_lock); +} diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h index 81180181d7c9..382a171f5413 100644 --- a/fs/bcachefs/snapshot.h +++ b/fs/bcachefs/snapshot.h @@ -105,7 +105,7 @@ static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n) return id; } -u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *, u32); +u32 bch2_snapshot_oldest_subvol(struct bch_fs *, u32, snapshot_id_list *); u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32); static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id) @@ -120,21 +120,26 @@ static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id) return id; } -static inline bool __bch2_snapshot_exists(struct bch_fs *c, u32 id) +static inline enum snapshot_id_state __bch2_snapshot_id_state(struct bch_fs *c, u32 id) { const struct snapshot_t *s = snapshot_t(c, id); - return s ? s->live : 0; + return s ? s->state : SNAPSHOT_ID_empty; } -static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id) +static inline enum snapshot_id_state bch2_snapshot_id_state(struct bch_fs *c, u32 id) { rcu_read_lock(); - bool ret = __bch2_snapshot_exists(c, id); + enum snapshot_id_state ret = __bch2_snapshot_id_state(c, id); rcu_read_unlock(); return ret; } +static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id) +{ + return bch2_snapshot_id_state(c, id) == SNAPSHOT_ID_live; +} + static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id) { rcu_read_lock(); @@ -241,10 +246,19 @@ int bch2_snapshot_node_create(struct btree_trans *, u32, int bch2_check_snapshot_trees(struct bch_fs *); int bch2_check_snapshots(struct bch_fs *); int bch2_reconstruct_snapshots(struct bch_fs *); -int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c); + +int __bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c); + +static inline int bch2_check_key_has_snapshot(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k) +{ + return likely(bch2_snapshot_exists(trans->c, k.k->p.snapshot)) + ? 0 + : __bch2_check_key_has_snapshot(trans, iter, k); +} int bch2_snapshot_node_set_deleted(struct btree_trans *, u32); -void bch2_delete_dead_snapshots_work(struct work_struct *); int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos); @@ -259,7 +273,14 @@ static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans, return __bch2_key_has_snapshot_overwrites(trans, id, pos); } +int __bch2_delete_dead_snapshots(struct bch_fs *); +int bch2_delete_dead_snapshots(struct bch_fs *); +void bch2_delete_dead_snapshots_work(struct work_struct *); +void bch2_delete_dead_snapshots_async(struct bch_fs *); +void bch2_snapshot_delete_status_to_text(struct printbuf *, struct bch_fs *); + int bch2_snapshots_read(struct bch_fs *); void bch2_fs_snapshots_exit(struct bch_fs *); +void bch2_fs_snapshots_init_early(struct bch_fs *); #endif /* _BCACHEFS_SNAPSHOT_H */ diff --git a/fs/bcachefs/snapshot_format.h b/fs/bcachefs/snapshot_format.h index aabcd3a74cd9..9bccae1f3590 100644 --- a/fs/bcachefs/snapshot_format.h +++ b/fs/bcachefs/snapshot_format.h @@ -15,10 +15,10 @@ struct bch_snapshot { bch_le128 btime; }; -LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1) - +LE32_BITMASK(BCH_SNAPSHOT_WILL_DELETE, struct bch_snapshot, flags, 0, 1) /* True if a subvolume points to this snapshot node: */ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) +LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 2, 3) /* * Snapshot trees: diff --git a/fs/bcachefs/snapshot_types.h b/fs/bcachefs/snapshot_types.h new file mode 100644 index 000000000000..0ab698f13e5c --- /dev/null +++ b/fs/bcachefs/snapshot_types.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_SNAPSHOT_TYPES_H +#define _BCACHEFS_SNAPSHOT_TYPES_H + +#include "bbpos_types.h" +#include "darray.h" +#include "subvolume_types.h" + +typedef DARRAY(u32) snapshot_id_list; + +#define IS_ANCESTOR_BITMAP 128 + +struct snapshot_t { + enum snapshot_id_state { + SNAPSHOT_ID_empty, + SNAPSHOT_ID_live, + SNAPSHOT_ID_deleted, + } state; + u32 parent; + u32 skip[3]; + u32 depth; + u32 children[2]; + u32 subvol; /* Nonzero only if a subvolume points to this node: */ + u32 tree; + unsigned long is_ancestor[BITS_TO_LONGS(IS_ANCESTOR_BITMAP)]; +}; + +struct snapshot_table { + struct rcu_head rcu; + size_t nr; +#ifndef RUST_BINDGEN + DECLARE_FLEX_ARRAY(struct snapshot_t, s); +#else + struct snapshot_t s[0]; +#endif +}; + +struct snapshot_interior_delete { + u32 id; + u32 live_child; +}; +typedef DARRAY(struct snapshot_interior_delete) interior_delete_list; + +struct snapshot_delete { + struct mutex lock; + struct work_struct work; + + struct mutex progress_lock; + snapshot_id_list deleting_from_trees; + snapshot_id_list delete_leaves; + interior_delete_list delete_interior; + + bool running; + struct bbpos pos; +}; + +#endif /* _BCACHEFS_SNAPSHOT_TYPES_H */ diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c index a90bf7b8a2b4..0cbf5508a32c 100644 --- a/fs/bcachefs/str_hash.c +++ b/fs/bcachefs/str_hash.c @@ -101,17 +101,25 @@ static noinline int hash_pick_winner(struct btree_trans *trans, } } -static int repair_inode_hash_info(struct btree_trans *trans, - struct bch_inode_unpacked *snapshot_root) +/* + * str_hash lookups across snapshots break in wild ways if hash_info in + * different snapshot versions doesn't match - so if we find one mismatch, check + * them all + */ +int bch2_repair_inode_hash_info(struct btree_trans *trans, + struct bch_inode_unpacked *snapshot_root) { + struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_s_c k; + struct printbuf buf = PRINTBUF; + bool need_commit = false; int ret = 0; - for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, - SPOS(0, snapshot_root->bi_inum, snapshot_root->bi_snapshot - 1), - BTREE_ITER_all_snapshots, k, ret) { - if (k.k->p.offset != snapshot_root->bi_inum) + for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, + POS(0, snapshot_root->bi_inum), + BTREE_ITER_all_snapshots, k, ret) { + if (bpos_ge(k.k->p, SPOS(0, snapshot_root->bi_inum, snapshot_root->bi_snapshot))) break; if (!bkey_is_inode(k.k)) continue; @@ -121,19 +129,72 @@ static int repair_inode_hash_info(struct btree_trans *trans, if (ret) break; - if (fsck_err_on(inode.bi_hash_seed != snapshot_root->bi_hash_seed || - INODE_STR_HASH(&inode) != INODE_STR_HASH(snapshot_root), - trans, inode_snapshot_mismatch, - "inode hash info in different snapshots don't match")) { + if (inode.bi_hash_seed == snapshot_root->bi_hash_seed && + INODE_STR_HASH(&inode) == INODE_STR_HASH(snapshot_root)) { +#ifdef CONFIG_BCACHEFS_DEBUG + struct bch_hash_info hash1 = bch2_hash_info_init(c, snapshot_root); + struct bch_hash_info hash2 = bch2_hash_info_init(c, &inode); + + BUG_ON(hash1.type != hash2.type || + memcmp(&hash1.siphash_key, + &hash2.siphash_key, + sizeof(hash1.siphash_key))); +#endif + continue; + } + + printbuf_reset(&buf); + prt_printf(&buf, "inode %llu hash info in snapshots %u %u don't match\n", + snapshot_root->bi_inum, + inode.bi_snapshot, + snapshot_root->bi_snapshot); + + bch2_prt_str_hash_type(&buf, INODE_STR_HASH(&inode)); + prt_printf(&buf, " %llx\n", inode.bi_hash_seed); + + bch2_prt_str_hash_type(&buf, INODE_STR_HASH(snapshot_root)); + prt_printf(&buf, " %llx", snapshot_root->bi_hash_seed); + + if (fsck_err(trans, inode_snapshot_mismatch, "%s", buf.buf)) { inode.bi_hash_seed = snapshot_root->bi_hash_seed; SET_INODE_STR_HASH(&inode, INODE_STR_HASH(snapshot_root)); - ret = __bch2_fsck_write_inode(trans, &inode) ?: - bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: - -BCH_ERR_transaction_restart_nested; - break; + + ret = __bch2_fsck_write_inode(trans, &inode); + if (ret) + break; + need_commit = true; } } + + if (ret) + goto err; + + if (!need_commit) { + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "inode %llu hash info mismatch with root, but mismatch not found\n", + snapshot_root->bi_inum); + + prt_printf(&buf, "root snapshot %u ", snapshot_root->bi_snapshot); + bch2_prt_str_hash_type(&buf, INODE_STR_HASH(snapshot_root)); + prt_printf(&buf, " %llx\n", snapshot_root->bi_hash_seed); +#if 0 + prt_printf(&buf, "vs snapshot %u ", hash_info->inum_snapshot); + bch2_prt_str_hash_type(&buf, hash_info->type); + prt_printf(&buf, " %llx %llx", hash_info->siphash_key.k0, hash_info->siphash_key.k1); +#endif + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + ret = -BCH_ERR_fsck_repair_unimplemented; + goto err; + } + + ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; +err: fsck_err: + printbuf_exit(&buf); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -145,46 +206,18 @@ fsck_err: static noinline int check_inode_hash_info_matches_root(struct btree_trans *trans, u64 inum, struct bch_hash_info *hash_info) { - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - int ret = 0; - - for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, U32_MAX), - BTREE_ITER_all_snapshots, k, ret) { - if (k.k->p.offset != inum) - break; - if (bkey_is_inode(k.k)) - goto found; - } - bch_err(c, "%s(): inum %llu not found", __func__, inum); - ret = -BCH_ERR_fsck_repair_unimplemented; - goto err; -found:; - struct bch_inode_unpacked inode; - ret = bch2_inode_unpack(k, &inode); + struct bch_inode_unpacked snapshot_root; + int ret = bch2_inode_find_snapshot_root(trans, inum, &snapshot_root); if (ret) - goto err; + return ret; + + struct bch_hash_info hash_root = bch2_hash_info_init(trans->c, &snapshot_root); + if (hash_info->type != hash_root.type || + memcmp(&hash_info->siphash_key, + &hash_root.siphash_key, + sizeof(hash_root.siphash_key))) + ret = bch2_repair_inode_hash_info(trans, &snapshot_root); - struct bch_hash_info hash2 = bch2_hash_info_init(c, &inode); - if (hash_info->type != hash2.type || - memcmp(&hash_info->siphash_key, &hash2.siphash_key, sizeof(hash2.siphash_key))) { - ret = repair_inode_hash_info(trans, &inode); - if (!ret) { - bch_err(c, "inode hash info mismatch with root, but mismatch not found\n" - "%u %llx %llx\n" - "%u %llx %llx", - hash_info->type, - hash_info->siphash_key.k0, - hash_info->siphash_key.k1, - hash2.type, - hash2.siphash_key.k0, - hash2.siphash_key.k1); - ret = -BCH_ERR_fsck_repair_unimplemented; - } - } -err: - bch2_trans_iter_exit(trans, &iter); return ret; } diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h index 0c1a00539bd1..6762b3627e1b 100644 --- a/fs/bcachefs/str_hash.h +++ b/fs/bcachefs/str_hash.h @@ -32,6 +32,7 @@ bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt) } struct bch_hash_info { + u32 inum_snapshot; u8 type; struct unicode_map *cf_encoding; /* @@ -45,11 +46,12 @@ static inline struct bch_hash_info bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi) { struct bch_hash_info info = { - .type = INODE_STR_HASH(bi), + .inum_snapshot = bi->bi_snapshot, + .type = INODE_STR_HASH(bi), #ifdef CONFIG_UNICODE - .cf_encoding = bch2_inode_casefold(c, bi) ? c->cf_encoding : NULL, + .cf_encoding = bch2_inode_casefold(c, bi) ? c->cf_encoding : NULL, #endif - .siphash_key = { .k0 = bi->bi_hash_seed } + .siphash_key = { .k0 = bi->bi_hash_seed } }; if (unlikely(info.type == BCH_STR_HASH_siphash_old)) { @@ -392,6 +394,8 @@ int bch2_hash_delete(struct btree_trans *trans, return ret; } +int bch2_repair_inode_hash_info(struct btree_trans *, struct bch_inode_unpacked *); + struct snapshots_seen; int __bch2_str_hash_check_key(struct btree_trans *, struct snapshots_seen *, diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index d0209f7658bb..35c9f86a73c1 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -3,6 +3,7 @@ #include "bcachefs.h" #include "btree_key_cache.h" #include "btree_update.h" +#include "enumerated_ref.h" #include "errcode.h" #include "error.h" #include "fs.h" @@ -14,6 +15,22 @@ static int bch2_subvolume_delete(struct btree_trans *, u32); +static int bch2_subvolume_missing(struct bch_fs *c, u32 subvolid) +{ + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "missing subvolume %u", subvolid); + bool print = bch2_count_fsck_err(c, subvol_missing, &buf); + + int ret = bch2_run_explicit_recovery_pass(c, &buf, + BCH_RECOVERY_PASS_check_inodes, 0); + if (print) + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + return ret; +} + static struct bpos subvolume_children_pos(struct bkey_s_c k) { if (k.k->type != KEY_TYPE_subvolume) @@ -45,7 +62,7 @@ static int check_subvol(struct btree_trans *trans, ret = bch2_snapshot_lookup(trans, snapid, &snapshot); if (bch2_err_matches(ret, ENOENT)) - return bch2_run_explicit_recovery_pass(c, + return bch2_run_print_explicit_recovery_pass(c, BCH_RECOVERY_PASS_reconstruct_snapshots) ?: ret; if (ret) return ret; @@ -292,9 +309,8 @@ bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol, int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol), BTREE_ITER_cached| BTREE_ITER_with_updates, subvolume, s); - bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) && - inconsistent_if_not_found, - trans->c, "missing subvolume %u", subvol); + if (bch2_err_matches(ret, ENOENT) && inconsistent_if_not_found) + ret = bch2_subvolume_missing(trans->c, subvol) ?: ret; return ret; } @@ -344,8 +360,8 @@ int __bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid, subvolume); ret = bkey_err(subvol); - bch2_fs_inconsistent_on(warn && bch2_err_matches(ret, ENOENT), trans->c, - "missing subvolume %u", subvolid); + if (bch2_err_matches(ret, ENOENT)) + ret = bch2_subvolume_missing(trans->c, subvolid) ?: ret; if (likely(!ret)) *snapid = le32_to_cpu(subvol.v->snapshot); @@ -418,8 +434,8 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid) BTREE_ITER_cached|BTREE_ITER_intent, subvolume); int ret = bkey_err(subvol); - bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, - "missing subvolume %u", subvolid); + if (bch2_err_matches(ret, ENOENT)) + ret = bch2_subvolume_missing(trans->c, subvolid) ?: ret; if (ret) goto err; @@ -479,13 +495,11 @@ static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *wor { struct bch_fs *c = container_of(work, struct bch_fs, snapshot_wait_for_pagecache_and_delete_work); - snapshot_id_list s; - u32 *id; int ret = 0; while (!ret) { mutex_lock(&c->snapshots_unlinked_lock); - s = c->snapshots_unlinked; + snapshot_id_list s = c->snapshots_unlinked; darray_init(&c->snapshots_unlinked); mutex_unlock(&c->snapshots_unlinked_lock); @@ -494,7 +508,7 @@ static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *wor bch2_evict_subvolume_inodes(c, &s); - for (id = s.data; id < s.data + s.nr; id++) { + darray_for_each(s, id) { ret = bch2_trans_run(c, bch2_subvolume_delete(trans, *id)); bch_err_msg(c, ret, "deleting subvolume %u", *id); if (ret) @@ -504,7 +518,7 @@ static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *wor darray_exit(&s); } - bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_snapshot_delete_pagecache); } struct subvolume_unlink_hook { @@ -527,11 +541,11 @@ static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans if (ret) return ret; - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_snapshot_delete_pagecache)) return -EROFS; if (!queue_work(c->write_ref_wq, &c->snapshot_wait_for_pagecache_and_delete_work)) - bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_snapshot_delete_pagecache); return 0; } @@ -555,11 +569,10 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid) BTREE_ID_subvolumes, POS(0, subvolid), BTREE_ITER_cached, subvolume); ret = PTR_ERR_OR_ZERO(n); - if (unlikely(ret)) { - bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, - "missing subvolume %u", subvolid); + if (bch2_err_matches(ret, ENOENT)) + ret = bch2_subvolume_missing(trans->c, subvolid) ?: ret; + if (unlikely(ret)) return ret; - } SET_BCH_SUBVOLUME_UNLINKED(&n->v, true); n->v.fs_path_parent = 0; @@ -598,11 +611,10 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, BTREE_ID_subvolumes, POS(0, src_subvolid), BTREE_ITER_cached, subvolume); ret = PTR_ERR_OR_ZERO(src_subvol); - if (unlikely(ret)) { - bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, - "subvolume %u not found", src_subvolid); + if (bch2_err_matches(ret, ENOENT)) + ret = bch2_subvolume_missing(trans->c, src_subvolid) ?: ret; + if (unlikely(ret)) goto err; - } parent = le32_to_cpu(src_subvol->v.snapshot); } @@ -716,11 +728,8 @@ int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) return ret; } -int bch2_fs_subvolumes_init(struct bch_fs *c) +void bch2_fs_subvolumes_init_early(struct bch_fs *c) { - INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work); INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work, bch2_subvolume_wait_for_pagecache_and_delete); - mutex_init(&c->snapshots_unlinked_lock); - return 0; } diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h index f640c1e3d639..075f55e25c70 100644 --- a/fs/bcachefs/subvolume.h +++ b/fs/bcachefs/subvolume.h @@ -77,15 +77,12 @@ bch2_btree_iter_peek_in_subvolume_max_type(struct btree_trans *trans, struct btr _end, _subvolid, _flags, _k, _do); \ }) -int bch2_delete_dead_snapshots(struct bch_fs *); -void bch2_delete_dead_snapshots_async(struct bch_fs *); - int bch2_subvolume_unlink(struct btree_trans *, u32); int bch2_subvolume_create(struct btree_trans *, u64, u32, u32, u32 *, u32 *, bool); int bch2_initialize_subvolumes(struct bch_fs *); int bch2_fs_upgrade_for_subvolumes(struct bch_fs *); -int bch2_fs_subvolumes_init(struct bch_fs *); +void bch2_fs_subvolumes_init_early(struct bch_fs *); #endif /* _BCACHEFS_SUBVOLUME_H */ diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h index 1549d6daf7af..9d634b906dcd 100644 --- a/fs/bcachefs/subvolume_types.h +++ b/fs/bcachefs/subvolume_types.h @@ -2,33 +2,6 @@ #ifndef _BCACHEFS_SUBVOLUME_TYPES_H #define _BCACHEFS_SUBVOLUME_TYPES_H -#include "darray.h" - -typedef DARRAY(u32) snapshot_id_list; - -#define IS_ANCESTOR_BITMAP 128 - -struct snapshot_t { - bool live; - u32 parent; - u32 skip[3]; - u32 depth; - u32 children[2]; - u32 subvol; /* Nonzero only if a subvolume points to this node: */ - u32 tree; - unsigned long is_ancestor[BITS_TO_LONGS(IS_ANCESTOR_BITMAP)]; -}; - -struct snapshot_table { - struct rcu_head rcu; - size_t nr; -#ifndef RUST_BINDGEN - DECLARE_FLEX_ARRAY(struct snapshot_t, s); -#else - struct snapshot_t s[0]; -#endif -}; - typedef struct { /* we can't have padding in this struct: */ u64 subvol; diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index cb5d960aed92..6687b9235d3c 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -87,7 +87,8 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v struct printbuf buf = PRINTBUF; prt_str(&buf, "requested incompat feature "); bch2_version_to_text(&buf, version); - prt_str(&buf, " currently not enabled"); + prt_str(&buf, " currently not enabled, allowed up to "); + bch2_version_to_text(&buf, version); prt_printf(&buf, "\n set version_upgrade=incompat to enable"); bch_notice(c, "%s", buf.buf); @@ -260,11 +261,11 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb, /* XXX: we're not checking that offline device have enough space */ - for_each_online_member(c, ca) { + for_each_online_member(c, ca, BCH_DEV_READ_REF_sb_field_resize) { struct bch_sb_handle *dev_sb = &ca->disk_sb; if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) { - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_sb_field_resize); return NULL; } } @@ -384,7 +385,6 @@ static int bch2_sb_compatible(struct bch_sb *sb, struct printbuf *out) int bch2_sb_validate(struct bch_sb *sb, u64 read_offset, enum bch_validate_flags flags, struct printbuf *out) { - struct bch_sb_field_members_v1 *mi; enum bch_opt_id opt_id; int ret; @@ -468,6 +468,9 @@ int bch2_sb_validate(struct bch_sb *sb, u64 read_offset, SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(sb, BCH_SB_VERSION_INCOMPAT(sb)); } + if (sb->nr_devices > 1) + SET_BCH_SB_MULTI_DEVICE(sb, true); + if (!flags) { /* * Been seeing a bug where these are getting inexplicably @@ -536,14 +539,17 @@ int bch2_sb_validate(struct bch_sb *sb, u64 read_offset, } } + struct bch_sb_field *mi = + bch2_sb_field_get_id(sb, BCH_SB_FIELD_members_v2) ?: + bch2_sb_field_get_id(sb, BCH_SB_FIELD_members_v1); + /* members must be validated first: */ - mi = bch2_sb_field_get(sb, members_v1); if (!mi) { prt_printf(out, "Invalid superblock: member info area missing"); return -BCH_ERR_invalid_sb_members_missing; } - ret = bch2_sb_field_validate(sb, &mi->field, flags, out); + ret = bch2_sb_field_validate(sb, mi, flags, out); if (ret) return ret; @@ -612,11 +618,15 @@ static void bch2_sb_update(struct bch_fs *c) c->sb.features = le64_to_cpu(src->features[0]); c->sb.compat = le64_to_cpu(src->compat[0]); + c->sb.multi_device = BCH_SB_MULTI_DEVICE(src); memset(c->sb.errors_silent, 0, sizeof(c->sb.errors_silent)); struct bch_sb_field_ext *ext = bch2_sb_field_get(src, ext); if (ext) { + c->sb.recovery_passes_required = + bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); + le_bitvector_to_cpu(c->sb.errors_silent, (void *) ext->errors_silent, sizeof(c->sb.errors_silent) * 8); c->sb.btrees_lost_data = le64_to_cpu(ext->btrees_lost_data); @@ -961,7 +971,7 @@ static void write_super_endio(struct bio *bio) } closure_put(&ca->fs->sb_write); - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super); } static void read_back_super(struct bch_fs *c, struct bch_dev *ca) @@ -979,7 +989,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca) this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio)); - percpu_ref_get(&ca->io_ref[READ]); + enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super); closure_bio_submit(bio, &c->sb_write); } @@ -1005,7 +1015,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx) this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb], bio_sectors(bio)); - percpu_ref_get(&ca->io_ref[READ]); + enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super); closure_bio_submit(bio, &c->sb_write); } @@ -1022,7 +1032,7 @@ int bch2_write_super(struct bch_fs *c) trace_and_count(c, write_super, c, _RET_IP_); - if (c->opts.very_degraded) + if (c->opts.degraded == BCH_DEGRADED_very) degraded_flags |= BCH_FORCE_IF_LOST; lockdep_assert_held(&c->sb_lock); @@ -1037,13 +1047,13 @@ int bch2_write_super(struct bch_fs *c) * For now, we expect to be able to call write_super() when we're not * yet RW: */ - for_each_online_member(c, ca) { + for_each_online_member(c, ca, BCH_DEV_READ_REF_write_super) { ret = darray_push(&online_devices, ca); if (bch2_fs_fatal_err_on(ret, c, "%s: error allocating online devices", __func__)) { - percpu_ref_put(&ca->io_ref[READ]); + enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super); goto out; } - percpu_ref_get(&ca->io_ref[READ]); + enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super); } /* Make sure we're using the new magic numbers: */ @@ -1210,7 +1220,7 @@ out: /* Make new options visible after they're persistent: */ bch2_sb_update(c); darray_for_each(online_devices, ca) - percpu_ref_put(&(*ca)->io_ref[READ]); + enumerated_ref_put(&(*ca)->io_ref[READ], BCH_DEV_READ_REF_write_super); darray_exit(&online_devices); printbuf_exit(&err); return ret; @@ -1270,6 +1280,31 @@ void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version, bool incompat) } } +void bch2_sb_upgrade_incompat(struct bch_fs *c) +{ + mutex_lock(&c->sb_lock); + if (c->sb.version == c->sb.version_incompat_allowed) + goto unlock; + + struct printbuf buf = PRINTBUF; + + prt_str(&buf, "Now allowing incompatible features up to "); + bch2_version_to_text(&buf, c->sb.version); + prt_str(&buf, ", previously allowed up to "); + bch2_version_to_text(&buf, c->sb.version_incompat_allowed); + prt_newline(&buf); + + bch_notice(c, "%s", buf.buf); + printbuf_exit(&buf); + + c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL); + SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb, + max(BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb), c->sb.version)); + bch2_write_super(c); +unlock: + mutex_unlock(&c->sb_lock); +} + static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f, enum bch_validate_flags flags, struct printbuf *err) { diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h index 78f708a6fbcd..a3b7a90f2533 100644 --- a/fs/bcachefs/super-io.h +++ b/fs/bcachefs/super-io.h @@ -107,6 +107,7 @@ static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat) bool bch2_check_version_downgrade(struct bch_fs *); void bch2_sb_upgrade(struct bch_fs *, unsigned, bool); +void bch2_sb_upgrade_incompat(struct bch_fs *); void __bch2_sb_field_to_text(struct printbuf *, struct bch_sb *, struct bch_sb_field *); diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 84a37d971ffd..11579b74c640 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -10,6 +10,8 @@ #include "bcachefs.h" #include "alloc_background.h" #include "alloc_foreground.h" +#include "async_objs.h" +#include "backpointers.h" #include "bkey_sort.h" #include "btree_cache.h" #include "btree_gc.h" @@ -28,6 +30,7 @@ #include "disk_accounting.h" #include "disk_groups.h" #include "ec.h" +#include "enumerated_ref.h" #include "errcode.h" #include "error.h" #include "fs.h" @@ -48,6 +51,7 @@ #include "quota.h" #include "rebalance.h" #include "recovery.h" +#include "recovery_passes.h" #include "replicas.h" #include "sb-clean.h" #include "sb-counters.h" @@ -75,14 +79,32 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); MODULE_DESCRIPTION("bcachefs filesystem"); -const char * const bch2_fs_flag_strs[] = { +typedef DARRAY(struct bch_sb_handle) bch_sb_handles; + #define x(n) #n, +const char * const bch2_fs_flag_strs[] = { BCH_FS_FLAGS() -#undef x NULL }; -void bch2_print_str(struct bch_fs *c, const char *str) +const char * const bch2_write_refs[] = { + BCH_WRITE_REFS() + NULL +}; + +const char * const bch2_dev_read_refs[] = { + BCH_DEV_READ_REFS() + NULL +}; + +const char * const bch2_dev_write_refs[] = { + BCH_DEV_WRITE_REFS() + NULL +}; +#undef x + +static void __bch2_print_str(struct bch_fs *c, const char *prefix, + const char *str, bool nonblocking) { #ifdef __KERNEL__ struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c); @@ -92,7 +114,17 @@ void bch2_print_str(struct bch_fs *c, const char *str) return; } #endif - bch2_print_string_as_lines(KERN_ERR, str); + bch2_print_string_as_lines(KERN_ERR, str, nonblocking); +} + +void bch2_print_str(struct bch_fs *c, const char *prefix, const char *str) +{ + __bch2_print_str(c, prefix, str, false); +} + +void bch2_print_str_nonblocking(struct bch_fs *c, const char *prefix, const char *str) +{ + __bch2_print_str(c, prefix, str, true); } __printf(2, 0) @@ -183,6 +215,7 @@ static int bch2_dev_alloc(struct bch_fs *, unsigned); static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *); static void bch2_dev_io_ref_stop(struct bch_dev *, int); static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *); +static int bch2_fs_init_rw(struct bch_fs *); struct bch_fs *bch2_dev_to_fs(dev_t dev) { @@ -297,15 +330,13 @@ static void __bch2_fs_read_only(struct bch_fs *c) } } -#ifndef BCH_WRITE_REF_DEBUG -static void bch2_writes_disabled(struct percpu_ref *writes) +static void bch2_writes_disabled(struct enumerated_ref *writes) { struct bch_fs *c = container_of(writes, struct bch_fs, writes); set_bit(BCH_FS_write_disable_complete, &c->flags); wake_up(&bch2_read_only_wait); } -#endif void bch2_fs_read_only(struct bch_fs *c) { @@ -323,12 +354,7 @@ void bch2_fs_read_only(struct bch_fs *c) * writes will return -EROFS: */ set_bit(BCH_FS_going_ro, &c->flags); -#ifndef BCH_WRITE_REF_DEBUG - percpu_ref_kill(&c->writes); -#else - for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) - bch2_write_ref_put(c, i); -#endif + enumerated_ref_stop_async(&c->writes); /* * If we're not doing an emergency shutdown, we want to wait on @@ -366,7 +392,7 @@ void bch2_fs_read_only(struct bch_fs *c) !test_bit(BCH_FS_emergency_ro, &c->flags) && test_bit(BCH_FS_started, &c->flags) && test_bit(BCH_FS_clean_shutdown, &c->flags) && - c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) { + c->recovery.pass_done >= BCH_RECOVERY_PASS_journal_replay) { BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal)); BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty)); BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty)); @@ -412,6 +438,30 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c) return ret; } +static bool __bch2_fs_emergency_read_only2(struct bch_fs *c, struct printbuf *out, + bool locked) +{ + bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags); + + if (!locked) + bch2_journal_halt(&c->journal); + else + bch2_journal_halt_locked(&c->journal); + bch2_fs_read_only_async(c); + wake_up(&bch2_read_only_wait); + + if (ret) + prt_printf(out, "emergency read only at seq %llu\n", + journal_cur_seq(&c->journal)); + + return ret; +} + +bool bch2_fs_emergency_read_only2(struct bch_fs *c, struct printbuf *out) +{ + return __bch2_fs_emergency_read_only2(c, out, false); +} + bool bch2_fs_emergency_read_only_locked(struct bch_fs *c) { bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags); @@ -429,26 +479,42 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags)); + if (WARN_ON(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))) + return -BCH_ERR_erofs_no_alloc_info; + if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) { bch_err(c, "cannot go rw, unfixed btree errors"); return -BCH_ERR_erofs_unfixed_errors; } + if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) { + bch_err(c, "cannot go rw, filesystem is an unresized image file"); + return -BCH_ERR_erofs_filesystem_full; + } + if (test_bit(BCH_FS_rw, &c->flags)) return 0; bch_info(c, "going read-write"); + ret = bch2_fs_init_rw(c); + if (ret) + goto err; + ret = bch2_sb_members_v2_init(c); if (ret) goto err; clear_bit(BCH_FS_clean_shutdown, &c->flags); - __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) { - bch2_dev_allocator_add(c, ca); - percpu_ref_reinit(&ca->io_ref[WRITE]); - } + rcu_read_lock(); + for_each_online_member_rcu(c, ca) + if (ca->mi.state == BCH_MEMBER_STATE_rw) { + bch2_dev_allocator_add(c, ca); + enumerated_ref_start(&ca->io_ref[WRITE]); + } + rcu_read_unlock(); + bch2_recalc_capacity(c); /* @@ -474,14 +540,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) set_bit(BCH_FS_rw, &c->flags); set_bit(BCH_FS_was_rw, &c->flags); -#ifndef BCH_WRITE_REF_DEBUG - percpu_ref_reinit(&c->writes); -#else - for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) { - BUG_ON(atomic_long_read(&c->writes[i])); - atomic_long_inc(&c->writes[i]); - } -#endif + enumerated_ref_start(&c->writes); ret = bch2_copygc_start(c); if (ret) { @@ -517,6 +576,9 @@ int bch2_fs_read_write(struct bch_fs *c) if (c->opts.nochanges) return -BCH_ERR_erofs_nochanges; + if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) + return -BCH_ERR_erofs_no_alloc_info; + return __bch2_fs_read_write(c, false); } @@ -543,35 +605,37 @@ static void __bch2_fs_free(struct bch_fs *c) bch2_find_btree_nodes_exit(&c->found_btree_nodes); bch2_free_pending_node_rewrites(c); bch2_free_fsck_errs(c); - bch2_fs_accounting_exit(c); - bch2_fs_sb_errors_exit(c); - bch2_fs_counters_exit(c); + bch2_fs_vfs_exit(c); bch2_fs_snapshots_exit(c); + bch2_fs_sb_errors_exit(c); + bch2_fs_replicas_exit(c); + bch2_fs_rebalance_exit(c); bch2_fs_quota_exit(c); + bch2_fs_nocow_locking_exit(c); + bch2_fs_journal_exit(&c->journal); bch2_fs_fs_io_direct_exit(c); bch2_fs_fs_io_buffered_exit(c); bch2_fs_fsio_exit(c); - bch2_fs_vfs_exit(c); - bch2_fs_ec_exit(c); - bch2_fs_encryption_exit(c); - bch2_fs_nocow_locking_exit(c); bch2_fs_io_write_exit(c); bch2_fs_io_read_exit(c); + bch2_fs_encryption_exit(c); + bch2_fs_ec_exit(c); + bch2_fs_counters_exit(c); + bch2_fs_compress_exit(c); + bch2_io_clock_exit(&c->io_clock[WRITE]); + bch2_io_clock_exit(&c->io_clock[READ]); bch2_fs_buckets_waiting_for_journal_exit(c); - bch2_fs_btree_interior_update_exit(c); + bch2_fs_btree_write_buffer_exit(c); bch2_fs_btree_key_cache_exit(&c->btree_key_cache); - bch2_fs_btree_cache_exit(c); bch2_fs_btree_iter_exit(c); - bch2_fs_replicas_exit(c); - bch2_fs_journal_exit(&c->journal); - bch2_io_clock_exit(&c->io_clock[WRITE]); - bch2_io_clock_exit(&c->io_clock[READ]); - bch2_fs_compress_exit(c); - bch2_fs_btree_gc_exit(c); + bch2_fs_btree_interior_update_exit(c); + bch2_fs_btree_cache_exit(c); + bch2_fs_accounting_exit(c); + bch2_fs_async_obj_exit(c); bch2_journal_keys_put_initial(c); bch2_find_btree_nodes_exit(&c->found_btree_nodes); + BUG_ON(atomic_read(&c->journal_keys.ref)); - bch2_fs_btree_write_buffer_exit(c); percpu_free_rwsem(&c->mark_lock); if (c->online_reserved) { u64 v = percpu_u64_get(c->online_reserved); @@ -587,9 +651,7 @@ static void __bch2_fs_free(struct bch_fs *c) mempool_exit(&c->btree_bounce_pool); bioset_exit(&c->btree_bio); mempool_exit(&c->fill_iter); -#ifndef BCH_WRITE_REF_DEBUG - percpu_ref_exit(&c->writes); -#endif + enumerated_ref_exit(&c->writes); kfree(rcu_dereference_protected(c->disk_groups, 1)); kfree(c->journal_seq_blacklist_table); @@ -601,8 +663,8 @@ static void __bch2_fs_free(struct bch_fs *c) destroy_workqueue(c->btree_read_complete_wq); if (c->copygc_wq) destroy_workqueue(c->copygc_wq); - if (c->btree_io_complete_wq) - destroy_workqueue(c->btree_io_complete_wq); + if (c->btree_write_complete_wq) + destroy_workqueue(c->btree_write_complete_wq); if (c->btree_update_wq) destroy_workqueue(c->btree_update_wq); @@ -628,6 +690,12 @@ void __bch2_fs_stop(struct bch_fs *c) bch2_fs_read_only(c); up_write(&c->state_lock); + for (unsigned i = 0; i < c->sb.nr_devices; i++) { + struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true); + if (ca) + bch2_dev_io_ref_stop(ca, READ); + } + for_each_member_device(c, ca) bch2_dev_unlink(ca); @@ -656,8 +724,6 @@ void __bch2_fs_stop(struct bch_fs *c) void bch2_fs_free(struct bch_fs *c) { - unsigned i; - mutex_lock(&bch_fs_list_lock); list_del(&c->list); mutex_unlock(&bch_fs_list_lock); @@ -665,7 +731,7 @@ void bch2_fs_free(struct bch_fs *c) closure_sync(&c->cl); closure_debug_destroy(&c->cl); - for (i = 0; i < c->sb.nr_devices; i++) { + for (unsigned i = 0; i < c->sb.nr_devices; i++) { struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true); if (ca) { @@ -693,9 +759,10 @@ static int bch2_fs_online(struct bch_fs *c) lockdep_assert_held(&bch_fs_list_lock); - if (__bch2_uuid_to_fs(c->sb.uuid)) { + if (c->sb.multi_device && + __bch2_uuid_to_fs(c->sb.uuid)) { bch_err(c, "filesystem UUID already open"); - return -EINVAL; + return -BCH_ERR_filesystem_uuid_already_open; } ret = bch2_fs_chardev_init(c); @@ -706,7 +773,9 @@ static int bch2_fs_online(struct bch_fs *c) bch2_fs_debug_init(c); - ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?: + ret = (c->sb.multi_device + ? kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) + : kobject_add(&c->kobj, NULL, "%s", c->name)) ?: kobject_add(&c->internal, &c->kobj, "internal") ?: kobject_add(&c->opts_dir, &c->kobj, "options") ?: #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT @@ -737,7 +806,37 @@ err: return ret; } -static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) +static int bch2_fs_init_rw(struct bch_fs *c) +{ + if (test_bit(BCH_FS_rw_init_done, &c->flags)) + return 0; + + if (!(c->btree_update_wq = alloc_workqueue("bcachefs", + WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_UNBOUND, 512)) || + !(c->btree_write_complete_wq = alloc_workqueue("bcachefs_btree_write_complete", + WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || + !(c->copygc_wq = alloc_workqueue("bcachefs_copygc", + WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || + !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit", + WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || + !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref", + WQ_FREEZABLE, 0))) + return -BCH_ERR_ENOMEM_fs_other_alloc; + + int ret = bch2_fs_btree_interior_update_init(c) ?: + bch2_fs_btree_write_buffer_init(c) ?: + bch2_fs_fs_io_buffered_init(c) ?: + bch2_fs_io_write_init(c) ?: + bch2_fs_journal_init(&c->journal); + if (ret) + return ret; + + set_bit(BCH_FS_rw_init_done, &c->flags); + return 0; +} + +static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts, + bch_sb_handles *sbs) { struct bch_fs *c; struct printbuf name = PRINTBUF; @@ -750,7 +849,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) goto out; } - c->stdio = (void *)(unsigned long) opts.stdio; + c->stdio = (void *)(unsigned long) opts->stdio; __module_get(THIS_MODULE); @@ -774,24 +873,29 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) refcount_set(&c->ro_ref, 1); init_waitqueue_head(&c->ro_ref_wait); - spin_lock_init(&c->recovery_pass_lock); - sema_init(&c->online_fsck_mutex, 1); for (i = 0; i < BCH_TIME_STAT_NR; i++) bch2_time_stats_init(&c->times[i]); - bch2_fs_copygc_init(c); - bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); - bch2_fs_btree_iter_init_early(c); - bch2_fs_btree_interior_update_init_early(c); - bch2_fs_journal_keys_init(c); bch2_fs_allocator_background_init(c); bch2_fs_allocator_foreground_init(c); - bch2_fs_rebalance_init(c); - bch2_fs_quota_init(c); + bch2_fs_btree_cache_init_early(&c->btree_cache); + bch2_fs_btree_gc_init_early(c); + bch2_fs_btree_interior_update_init_early(c); + bch2_fs_btree_iter_init_early(c); + bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); + bch2_fs_btree_write_buffer_init_early(c); + bch2_fs_copygc_init(c); bch2_fs_ec_init_early(c); + bch2_fs_journal_init_early(&c->journal); + bch2_fs_journal_keys_init(c); bch2_fs_move_init(c); + bch2_fs_nocow_locking_init_early(c); + bch2_fs_quota_init(c); + bch2_fs_recovery_passes_init(c); bch2_fs_sb_errors_init_early(c); + bch2_fs_snapshots_init_early(c); + bch2_fs_subvolumes_init_early(c); INIT_LIST_HEAD(&c->list); @@ -817,8 +921,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write]; c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq]; - bch2_fs_btree_cache_init_early(&c->btree_cache); - mutex_init(&c->sectors_available_lock); ret = percpu_init_rwsem(&c->mark_lock); @@ -832,14 +934,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) if (ret) goto err; - pr_uuid(&name, c->sb.user_uuid.b); - ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0; - if (ret) - goto err; - - strscpy(c->name, name.buf, sizeof(c->name)); - printbuf_exit(&name); - /* Compat: */ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 && !BCH_SB_JOURNAL_FLUSH_DELAY(sb)) @@ -854,7 +948,14 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) if (ret) goto err; - bch2_opts_apply(&c->opts, opts); + bch2_opts_apply(&c->opts, *opts); + + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + c->opts.block_size > PAGE_SIZE) { + bch_err(c, "cannot mount bs > ps filesystem without CONFIG_TRANSPARENT_HUGEPAGE"); + ret = -EINVAL; + goto err; + } c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc; if (c->opts.inodes_use_key_cache) @@ -870,26 +971,26 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) goto err; } + if (c->sb.multi_device) + pr_uuid(&name, c->sb.user_uuid.b); + else + prt_bdevname(&name, sbs->data[0].bdev); + + ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0; + if (ret) + goto err; + + strscpy(c->name, name.buf, sizeof(c->name)); + printbuf_exit(&name); + iter_size = sizeof(struct sort_iter) + (btree_blocks(c) + 1) * 2 * sizeof(struct sort_iter_set); - if (!(c->btree_update_wq = alloc_workqueue("bcachefs", - WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_UNBOUND, 512)) || - !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io", - WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || - !(c->copygc_wq = alloc_workqueue("bcachefs_copygc", - WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || - !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete", + if (!(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete", WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) || - !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit", - WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || - !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref", - WQ_FREEZABLE, 0)) || -#ifndef BCH_WRITE_REF_DEBUG - percpu_ref_init(&c->writes, bch2_writes_disabled, - PERCPU_REF_INIT_DEAD, GFP_KERNEL) || -#endif + enumerated_ref_init(&c->writes, BCH_WRITE_REF_NR, + bch2_writes_disabled) || mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || bioset_init(&c->btree_bio, 1, max(offsetof(struct btree_read_bio, bio), @@ -905,29 +1006,24 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) goto err; } - ret = bch2_fs_counters_init(c) ?: - bch2_fs_sb_errors_init(c) ?: - bch2_io_clock_init(&c->io_clock[READ]) ?: - bch2_io_clock_init(&c->io_clock[WRITE]) ?: - bch2_fs_journal_init(&c->journal) ?: - bch2_fs_btree_iter_init(c) ?: + ret = + bch2_fs_async_obj_init(c) ?: bch2_fs_btree_cache_init(c) ?: + bch2_fs_btree_iter_init(c) ?: bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?: - bch2_fs_btree_interior_update_init(c) ?: - bch2_fs_btree_gc_init(c) ?: bch2_fs_buckets_waiting_for_journal_init(c) ?: - bch2_fs_btree_write_buffer_init(c) ?: - bch2_fs_subvolumes_init(c) ?: - bch2_fs_io_read_init(c) ?: - bch2_fs_io_write_init(c) ?: - bch2_fs_nocow_locking_init(c) ?: - bch2_fs_encryption_init(c) ?: + bch2_io_clock_init(&c->io_clock[READ]) ?: + bch2_io_clock_init(&c->io_clock[WRITE]) ?: bch2_fs_compress_init(c) ?: + bch2_fs_counters_init(c) ?: bch2_fs_ec_init(c) ?: - bch2_fs_vfs_init(c) ?: + bch2_fs_encryption_init(c) ?: bch2_fs_fsio_init(c) ?: - bch2_fs_fs_io_buffered_init(c) ?: - bch2_fs_fs_io_direct_init(c); + bch2_fs_fs_io_direct_init(c) ?: + bch2_fs_io_read_init(c) ?: + bch2_fs_rebalance_init(c) ?: + bch2_fs_sb_errors_init(c) ?: + bch2_fs_vfs_init(c); if (ret) goto err; @@ -1013,6 +1109,11 @@ static void print_mount_opts(struct bch_fs *c) bch2_version_to_text(&p, c->sb.version_incompat_allowed); } + if (c->opts.verbose) { + prt_printf(&p, "\n features: "); + prt_bitflags(&p, bch2_sb_features, c->sb.features); + } + bch_info(c, "%s", p.buf); printbuf_exit(&p); } @@ -1020,19 +1121,18 @@ static void print_mount_opts(struct bch_fs *c) static bool bch2_fs_may_start(struct bch_fs *c) { struct bch_dev *ca; - unsigned i, flags = 0; + unsigned flags = 0; - if (c->opts.very_degraded) + switch (c->opts.degraded) { + case BCH_DEGRADED_very: flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST; - - if (c->opts.degraded) + break; + case BCH_DEGRADED_yes: flags |= BCH_FORCE_IF_DEGRADED; - - if (!c->opts.degraded && - !c->opts.very_degraded) { + break; + default: mutex_lock(&c->sb_lock); - - for (i = 0; i < c->disk_sb.sb->nr_devices; i++) { + for (unsigned i = 0; i < c->disk_sb.sb->nr_devices; i++) { if (!bch2_member_exists(c->disk_sb.sb, i)) continue; @@ -1046,9 +1146,10 @@ static bool bch2_fs_may_start(struct bch_fs *c) } } mutex_unlock(&c->sb_lock); + break; } - return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true); + return bch2_have_enough_devs(c, c->online_devs, flags, true); } int bch2_fs_start(struct bch_fs *c) @@ -1081,13 +1182,22 @@ int bch2_fs_start(struct bch_fs *c) goto err; } - for_each_online_member(c, ca) - bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now); + rcu_read_lock(); + for_each_online_member_rcu(c, ca) + bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = + cpu_to_le64(now); + rcu_read_unlock(); + /* + * Dno't write superblock yet: recovery might have to downgrade + */ mutex_unlock(&c->sb_lock); - for_each_rw_member(c, ca) - bch2_dev_allocator_add(c, ca); + rcu_read_lock(); + for_each_online_member_rcu(c, ca) + if (ca->mi.state == BCH_MEMBER_STATE_rw) + bch2_dev_allocator_add(c, ca); + rcu_read_unlock(); bch2_recalc_capacity(c); up_write(&c->state_lock); @@ -1100,7 +1210,7 @@ int bch2_fs_start(struct bch_fs *c) if (ret) goto err; - ret = bch2_opts_check_may_set(c); + ret = bch2_opts_hooks_pre_set(c); if (ret) goto err; @@ -1234,11 +1344,14 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs, static void bch2_dev_io_ref_stop(struct bch_dev *ca, int rw) { - if (!percpu_ref_is_zero(&ca->io_ref[rw])) { - reinit_completion(&ca->io_ref_completion[rw]); - percpu_ref_kill(&ca->io_ref[rw]); - wait_for_completion(&ca->io_ref_completion[rw]); - } + if (rw == READ) + clear_bit(ca->dev_idx, ca->fs->online_devs.d); + + if (!enumerated_ref_is_zero(&ca->io_ref[rw])) + enumerated_ref_stop(&ca->io_ref[rw], + rw == READ + ? bch2_dev_read_refs + : bch2_dev_write_refs); } static void bch2_dev_release(struct kobject *kobj) @@ -1250,8 +1363,8 @@ static void bch2_dev_release(struct kobject *kobj) static void bch2_dev_free(struct bch_dev *ca) { - WARN_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE])); - WARN_ON(!percpu_ref_is_zero(&ca->io_ref[READ])); + WARN_ON(!enumerated_ref_is_zero(&ca->io_ref[WRITE])); + WARN_ON(!enumerated_ref_is_zero(&ca->io_ref[READ])); cancel_work_sync(&ca->io_error_work); @@ -1260,6 +1373,9 @@ static void bch2_dev_free(struct bch_dev *ca) if (ca->kobj.state_in_sysfs) kobject_del(&ca->kobj); + bch2_bucket_bitmap_free(&ca->bucket_backpointer_mismatch); + bch2_bucket_bitmap_free(&ca->bucket_backpointer_empty); + bch2_free_super(&ca->disk_sb); bch2_dev_allocator_background_exit(ca); bch2_dev_journal_exit(ca); @@ -1271,8 +1387,8 @@ static void bch2_dev_free(struct bch_dev *ca) bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]); bch2_time_stats_quantiles_exit(&ca->io_latency[READ]); - percpu_ref_exit(&ca->io_ref[WRITE]); - percpu_ref_exit(&ca->io_ref[READ]); + enumerated_ref_exit(&ca->io_ref[WRITE]); + enumerated_ref_exit(&ca->io_ref[READ]); #ifndef CONFIG_BCACHEFS_DEBUG percpu_ref_exit(&ca->ref); #endif @@ -1284,7 +1400,7 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca) lockdep_assert_held(&c->state_lock); - if (percpu_ref_is_zero(&ca->io_ref[READ])) + if (enumerated_ref_is_zero(&ca->io_ref[READ])) return; __bch2_dev_read_only(c, ca); @@ -1306,20 +1422,6 @@ static void bch2_dev_ref_complete(struct percpu_ref *ref) } #endif -static void bch2_dev_io_ref_read_complete(struct percpu_ref *ref) -{ - struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[READ]); - - complete(&ca->io_ref_completion[READ]); -} - -static void bch2_dev_io_ref_write_complete(struct percpu_ref *ref) -{ - struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[WRITE]); - - complete(&ca->io_ref_completion[WRITE]); -} - static void bch2_dev_unlink(struct bch_dev *ca) { struct kobject *b; @@ -1381,8 +1483,6 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, kobject_init(&ca->kobj, &bch2_dev_ktype); init_completion(&ca->ref_completion); - init_completion(&ca->io_ref_completion[READ]); - init_completion(&ca->io_ref_completion[WRITE]); INIT_WORK(&ca->io_error_work, bch2_io_error_work); @@ -1406,12 +1506,13 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, atomic_long_set(&ca->ref, 1); #endif + mutex_init(&ca->bucket_backpointer_mismatch.lock); + mutex_init(&ca->bucket_backpointer_empty.lock); + bch2_dev_allocator_background_init(ca); - if (percpu_ref_init(&ca->io_ref[READ], bch2_dev_io_ref_read_complete, - PERCPU_REF_INIT_DEAD, GFP_KERNEL) || - percpu_ref_init(&ca->io_ref[WRITE], bch2_dev_io_ref_write_complete, - PERCPU_REF_INIT_DEAD, GFP_KERNEL) || + if (enumerated_ref_init(&ca->io_ref[READ], BCH_DEV_READ_REF_NR, NULL) || + enumerated_ref_init(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_NR, NULL) || !(ca->sb_read_scratch = kmalloc(BCH_SB_READ_SCRATCH_BUF_SIZE, GFP_KERNEL)) || bch2_dev_buckets_alloc(c, ca) || !(ca->io_done = alloc_percpu(*ca->io_done))) @@ -1428,7 +1529,9 @@ static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca, { ca->dev_idx = dev_idx; __set_bit(ca->dev_idx, ca->self.d); - scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx); + + if (!ca->name[0]) + scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx); ca->fs = c; rcu_assign_pointer(c->devs[ca->dev_idx], ca); @@ -1473,13 +1576,18 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) return -BCH_ERR_device_size_too_small; } - BUG_ON(!percpu_ref_is_zero(&ca->io_ref[READ])); - BUG_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE])); + BUG_ON(!enumerated_ref_is_zero(&ca->io_ref[READ])); + BUG_ON(!enumerated_ref_is_zero(&ca->io_ref[WRITE])); ret = bch2_dev_journal_init(ca, sb->sb); if (ret) return ret; + struct printbuf name = PRINTBUF; + prt_bdevname(&name, sb->bdev); + strscpy(ca->name, name.buf, sizeof(ca->name)); + printbuf_exit(&name); + /* Commit: */ ca->disk_sb = *sb; memset(sb, 0, sizeof(*sb)); @@ -1493,7 +1601,7 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) ca->dev = ca->disk_sb.bdev->bd_dev; - percpu_ref_reinit(&ca->io_ref[READ]); + enumerated_ref_start(&ca->io_ref[READ]); return 0; } @@ -1517,16 +1625,9 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb) if (ret) return ret; - bch2_dev_sysfs_online(c, ca); - - struct printbuf name = PRINTBUF; - prt_bdevname(&name, ca->disk_sb.bdev); - - if (c->sb.nr_devices == 1) - strscpy(c->name, name.buf, sizeof(c->name)); - strscpy(ca->name, name.buf, sizeof(ca->name)); + set_bit(ca->dev_idx, c->online_devs.d); - printbuf_exit(&name); + bch2_dev_sysfs_online(c, ca); bch2_rebalance_wakeup(c); return 0; @@ -1578,7 +1679,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca, return true; /* do we have enough devices to read from? */ - new_online_devs = bch2_online_devs(c); + new_online_devs = c->online_devs; __clear_bit(ca->dev_idx, new_online_devs.d); return bch2_have_enough_devs(c, new_online_devs, flags, false); @@ -1608,8 +1709,8 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) bch2_dev_allocator_add(c, ca); bch2_recalc_capacity(c); - if (percpu_ref_is_zero(&ca->io_ref[WRITE])) - percpu_ref_reinit(&ca->io_ref[WRITE]); + if (enumerated_ref_is_zero(&ca->io_ref[WRITE])) + enumerated_ref_start(&ca->io_ref[WRITE]); bch2_dev_do_discards(ca); } @@ -1663,6 +1764,8 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) { struct bch_member *m; unsigned dev_idx = ca->dev_idx, data; + bool fast_device_removal = !bch2_request_incompat_feature(c, + bcachefs_metadata_version_fast_device_removal); int ret; down_write(&c->state_lock); @@ -1681,11 +1784,25 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) __bch2_dev_read_only(c, ca); - ret = bch2_dev_data_drop(c, ca->dev_idx, flags); - bch_err_msg(ca, ret, "bch2_dev_data_drop()"); + ret = fast_device_removal + ? bch2_dev_data_drop_by_backpointers(c, ca->dev_idx, flags) + : (bch2_dev_data_drop(c, ca->dev_idx, flags) ?: + bch2_dev_remove_stripes(c, ca->dev_idx, flags)); if (ret) goto err; + /* Check if device still has data before blowing away alloc info */ + struct bch_dev_usage usage = bch2_dev_usage_read(ca); + for (unsigned i = 0; i < BCH_DATA_NR; i++) + if (!data_type_is_empty(i) && + !data_type_is_hidden(i) && + usage.buckets[i]) { + bch_err(ca, "Remove failed: still has data (%s, %llu buckets)", + __bch2_data_types[i], usage.buckets[i]); + ret = -EBUSY; + goto err; + } + ret = bch2_dev_remove_alloc(c, ca); bch_err_msg(ca, ret, "bch2_dev_remove_alloc()"); if (ret) @@ -1749,7 +1866,11 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) */ mutex_lock(&c->sb_lock); m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx); - memset(&m->uuid, 0, sizeof(m->uuid)); + + if (fast_device_removal) + m->uuid = BCH_SB_MEMBER_DELETED_UUID; + else + memset(&m->uuid, 0, sizeof(m->uuid)); bch2_write_super(c); @@ -1759,7 +1880,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) err: if (test_bit(BCH_FS_rw, &c->flags) && ca->mi.state == BCH_MEMBER_STATE_rw && - !percpu_ref_is_zero(&ca->io_ref[READ])) + !enumerated_ref_is_zero(&ca->io_ref[READ])) __bch2_dev_read_write(c, ca); up_write(&c->state_lock); return ret; @@ -1769,11 +1890,11 @@ err: int bch2_dev_add(struct bch_fs *c, const char *path) { struct bch_opts opts = bch2_opts_empty(); - struct bch_sb_handle sb; + struct bch_sb_handle sb = {}; struct bch_dev *ca = NULL; struct printbuf errbuf = PRINTBUF; struct printbuf label = PRINTBUF; - int ret; + int ret = 0; ret = bch2_read_super(path, &opts, &sb); bch_err_msg(c, ret, "reading super"); @@ -1790,6 +1911,20 @@ int bch2_dev_add(struct bch_fs *c, const char *path) } } + if (list_empty(&c->list)) { + mutex_lock(&bch_fs_list_lock); + if (__bch2_uuid_to_fs(c->sb.uuid)) + ret = -BCH_ERR_filesystem_uuid_already_open; + else + list_add(&c->list, &bch_fs_list); + mutex_unlock(&bch_fs_list_lock); + + if (ret) { + bch_err(c, "filesystem UUID already open"); + goto err; + } + } + ret = bch2_dev_may_add(sb.sb, c); if (ret) goto err; @@ -1806,6 +1941,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path) down_write(&c->state_lock); mutex_lock(&c->sb_lock); + SET_BCH_SB_MULTI_DEVICE(c->disk_sb.sb, true); ret = bch2_sb_from_fs(c, ca); bch_err_msg(c, ret, "setting up new superblock"); @@ -1821,6 +1957,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path) goto err_unlock; } unsigned dev_idx = ret; + ret = 0; /* success: */ @@ -1840,27 +1977,29 @@ int bch2_dev_add(struct bch_fs *c, const char *path) bch2_write_super(c); mutex_unlock(&c->sb_lock); - ret = bch2_dev_usage_init(ca, false); - if (ret) - goto err_late; + if (test_bit(BCH_FS_started, &c->flags)) { + ret = bch2_dev_usage_init(ca, false); + if (ret) + goto err_late; - ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional); - bch_err_msg(ca, ret, "marking new superblock"); - if (ret) - goto err_late; + ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional); + bch_err_msg(ca, ret, "marking new superblock"); + if (ret) + goto err_late; - ret = bch2_fs_freespace_init(c); - bch_err_msg(ca, ret, "initializing free space"); - if (ret) - goto err_late; + ret = bch2_fs_freespace_init(c); + bch_err_msg(ca, ret, "initializing free space"); + if (ret) + goto err_late; - if (ca->mi.state == BCH_MEMBER_STATE_rw) - __bch2_dev_read_write(c, ca); + if (ca->mi.state == BCH_MEMBER_STATE_rw) + __bch2_dev_read_write(c, ca); - ret = bch2_dev_journal_alloc(ca, false); - bch_err_msg(c, ret, "allocating journal"); - if (ret) - goto err_late; + ret = bch2_dev_journal_alloc(ca, false); + bch_err_msg(c, ret, "allocating journal"); + if (ret) + goto err_late; + } up_write(&c->state_lock); out: @@ -1971,6 +2110,18 @@ int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags) return 0; } +static int __bch2_dev_resize_alloc(struct bch_dev *ca, u64 old_nbuckets, u64 new_nbuckets) +{ + struct bch_fs *c = ca->fs; + u64 v[3] = { new_nbuckets - old_nbuckets, 0, 0 }; + + return bch2_trans_commit_do(ca->fs, NULL, NULL, 0, + bch2_disk_accounting_mod2(trans, false, v, dev_data_type, + .dev = ca->dev_idx, + .data_type = BCH_DATA_free)) ?: + bch2_dev_freespace_init(c, ca, old_nbuckets, new_nbuckets); +} + int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) { struct bch_member *m; @@ -2018,13 +2169,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) mutex_unlock(&c->sb_lock); if (ca->mi.freespace_initialized) { - u64 v[3] = { nbuckets - old_nbuckets, 0, 0 }; - - ret = bch2_trans_commit_do(ca->fs, NULL, NULL, 0, - bch2_disk_accounting_mod2(trans, false, v, dev_data_type, - .dev = ca->dev_idx, - .data_type = BCH_DATA_free)) ?: - bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets); + ret = __bch2_dev_resize_alloc(ca, old_nbuckets, nbuckets); if (ret) goto err; } @@ -2035,6 +2180,49 @@ err: return ret; } +int bch2_fs_resize_on_mount(struct bch_fs *c) +{ + for_each_online_member(c, ca, BCH_DEV_READ_REF_fs_resize_on_mount) { + u64 old_nbuckets = ca->mi.nbuckets; + u64 new_nbuckets = div64_u64(get_capacity(ca->disk_sb.bdev->bd_disk), + ca->mi.bucket_size); + + if (ca->mi.resize_on_mount && + new_nbuckets > ca->mi.nbuckets) { + bch_info(ca, "resizing to size %llu", new_nbuckets * ca->mi.bucket_size); + int ret = bch2_dev_buckets_resize(c, ca, new_nbuckets); + bch_err_fn(ca, ret); + if (ret) { + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_fs_resize_on_mount); + up_write(&c->state_lock); + return ret; + } + + mutex_lock(&c->sb_lock); + struct bch_member *m = + bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + m->nbuckets = cpu_to_le64(new_nbuckets); + SET_BCH_MEMBER_RESIZE_ON_MOUNT(m, false); + + c->disk_sb.sb->features[0] &= ~cpu_to_le64(BIT_ULL(BCH_FEATURE_small_image)); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + + if (ca->mi.freespace_initialized) { + ret = __bch2_dev_resize_alloc(ca, old_nbuckets, new_nbuckets); + if (ret) { + enumerated_ref_put(&ca->io_ref[READ], + BCH_DEV_READ_REF_fs_resize_on_mount); + up_write(&c->state_lock); + return ret; + } + } + } + } + return 0; +} + /* return with ref on ca->ref: */ struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name) { @@ -2095,20 +2283,32 @@ static void bch2_fs_bdev_mark_dead(struct block_device *bdev, bool surprise) if (!ca) goto unlock; - if (bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, BCH_FORCE_IF_DEGRADED)) { + bool dev = bch2_dev_state_allowed(c, ca, + BCH_MEMBER_STATE_failed, + BCH_FORCE_IF_DEGRADED); + + if (!dev && sb) { + if (!surprise) + sync_filesystem(sb); + shrink_dcache_sb(sb); + evict_inodes(sb); + } + + struct printbuf buf = PRINTBUF; + __bch2_log_msg_start(ca->name, &buf); + + prt_printf(&buf, "offline from block layer"); + + if (dev) { __bch2_dev_offline(c, ca); } else { - if (sb) { - if (!surprise) - sync_filesystem(sb); - shrink_dcache_sb(sb); - evict_inodes(sb); - } - bch2_journal_flush(&c->journal); - bch2_fs_emergency_read_only(c); + bch2_fs_emergency_read_only2(c, &buf); } + bch2_print_str(c, KERN_ERR, buf.buf); + printbuf_exit(&buf); + bch2_dev_put(ca); unlock: if (sb) @@ -2151,10 +2351,10 @@ static inline int sb_cmp(struct bch_sb *l, struct bch_sb *r) cmp_int(le64_to_cpu(l->write_time), le64_to_cpu(r->write_time)); } -struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, - struct bch_opts opts) +struct bch_fs *bch2_fs_open(darray_const_str *devices, + struct bch_opts *opts) { - DARRAY(struct bch_sb_handle) sbs = { 0 }; + bch_sb_handles sbs = {}; struct bch_fs *c = NULL; struct bch_sb_handle *best = NULL; struct printbuf errbuf = PRINTBUF; @@ -2163,26 +2363,26 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, if (!try_module_get(THIS_MODULE)) return ERR_PTR(-ENODEV); - if (!nr_devices) { + if (!devices->nr) { ret = -EINVAL; goto err; } - ret = darray_make_room(&sbs, nr_devices); + ret = darray_make_room(&sbs, devices->nr); if (ret) goto err; - for (unsigned i = 0; i < nr_devices; i++) { + darray_for_each(*devices, i) { struct bch_sb_handle sb = { NULL }; - ret = bch2_read_super(devices[i], &opts, &sb); + ret = bch2_read_super(*i, opts, &sb); if (ret) goto err; BUG_ON(darray_push(&sbs, sb)); } - if (opts.nochanges && !opts.read_only) { + if (opts->nochanges && !opts->read_only) { ret = -BCH_ERR_erofs_nochanges; goto err_print; } @@ -2192,7 +2392,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, best = sb; darray_for_each_reverse(sbs, sb) { - ret = bch2_dev_in_fs(best, sb, &opts); + ret = bch2_dev_in_fs(best, sb, opts); if (ret == -BCH_ERR_device_has_been_removed || ret == -BCH_ERR_device_splitbrain) { @@ -2207,7 +2407,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, goto err_print; } - c = bch2_fs_alloc(best->sb, opts); + c = bch2_fs_alloc(best->sb, opts, &sbs); ret = PTR_ERR_OR_ZERO(c); if (ret) goto err; @@ -2236,7 +2436,7 @@ out: return c; err_print: pr_err("bch_fs_open err opening %s: %s", - devices[0], bch2_err_str(ret)); + devices->data[0], bch2_err_str(ret)); err: if (!IS_ERR_OR_NULL(c)) bch2_fs_stop(c); @@ -2273,9 +2473,45 @@ err: return -ENOMEM; } -#define BCH_DEBUG_PARAM(name, description) \ - bool bch2_##name; \ - module_param_named(name, bch2_##name, bool, 0644); \ +#define BCH_DEBUG_PARAM(name, description) DEFINE_STATIC_KEY_FALSE(bch2_##name); +BCH_DEBUG_PARAMS_ALL() +#undef BCH_DEBUG_PARAM + +static int bch2_param_set_static_key_t(const char *val, const struct kernel_param *kp) +{ + /* Match bool exactly, by re-using it. */ + struct static_key *key = kp->arg; + struct kernel_param boolkp = *kp; + bool v; + int ret; + + boolkp.arg = &v; + + ret = param_set_bool(val, &boolkp); + if (ret) + return ret; + if (v) + static_key_enable(key); + else + static_key_disable(key); + return 0; +} + +static int bch2_param_get_static_key_t(char *buffer, const struct kernel_param *kp) +{ + struct static_key *key = kp->arg; + return sprintf(buffer, "%c\n", static_key_enabled(key) ? 'N' : 'Y'); +} + +static const struct kernel_param_ops bch2_param_ops_static_key_t = { + .flags = KERNEL_PARAM_OPS_FL_NOARG, + .set = bch2_param_set_static_key_t, + .get = bch2_param_get_static_key_t, +}; + +#define BCH_DEBUG_PARAM(name, description) \ + module_param_cb(name, &bch2_param_ops_static_key_t, &bch2_##name.key, 0644);\ + __MODULE_PARM_TYPE(name, "static_key_t"); \ MODULE_PARM_DESC(name, description); BCH_DEBUG_PARAMS() #undef BCH_DEBUG_PARAM diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h index 23533bce5709..dc52f06cb2b9 100644 --- a/fs/bcachefs/super.h +++ b/fs/bcachefs/super.h @@ -9,6 +9,9 @@ #include <linux/math64.h> extern const char * const bch2_fs_flag_strs[]; +extern const char * const bch2_write_refs[]; +extern const char * const bch2_dev_read_refs[]; +extern const char * const bch2_dev_write_refs[]; struct bch_fs *bch2_dev_to_fs(dev_t); struct bch_fs *bch2_uuid_to_fs(__uuid_t); @@ -29,18 +32,22 @@ int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64); struct bch_dev *bch2_dev_lookup(struct bch_fs *, const char *); bool bch2_fs_emergency_read_only(struct bch_fs *); +bool bch2_fs_emergency_read_only2(struct bch_fs *, struct printbuf *); + bool bch2_fs_emergency_read_only_locked(struct bch_fs *); void bch2_fs_read_only(struct bch_fs *); int bch2_fs_read_write(struct bch_fs *); int bch2_fs_read_write_early(struct bch_fs *); +int bch2_fs_resize_on_mount(struct bch_fs *); + void __bch2_fs_stop(struct bch_fs *); void bch2_fs_free(struct bch_fs *); void bch2_fs_stop(struct bch_fs *); int bch2_fs_start(struct bch_fs *); -struct bch_fs *bch2_fs_open(char * const *, unsigned, struct bch_opts); +struct bch_fs *bch2_fs_open(darray_const_str *, struct bch_opts *); extern const struct blk_holder_ops bch2_sb_handle_bdev_ops; diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index 82ee333ddd21..1a55196d69f1 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -25,6 +25,7 @@ #include "disk_accounting.h" #include "disk_groups.h" #include "ec.h" +#include "enumerated_ref.h" #include "inode.h" #include "journal.h" #include "journal_reclaim.h" @@ -34,6 +35,7 @@ #include "nocow_locking.h" #include "opts.h" #include "rebalance.h" +#include "recovery_passes.h" #include "replicas.h" #include "super-io.h" #include "tests.h" @@ -145,8 +147,10 @@ write_attribute(trigger_journal_flush); write_attribute(trigger_journal_writes); write_attribute(trigger_btree_cache_shrink); write_attribute(trigger_btree_key_cache_shrink); -write_attribute(trigger_freelist_wakeup); write_attribute(trigger_btree_updates); +write_attribute(trigger_freelist_wakeup); +write_attribute(trigger_recalc_capacity); +write_attribute(trigger_delete_dead_snapshots); read_attribute(gc_gens_pos); read_attribute(uuid); @@ -176,25 +180,9 @@ read_attribute(open_buckets); read_attribute(open_buckets_partial); read_attribute(nocow_lock_table); -#ifdef BCH_WRITE_REF_DEBUG +read_attribute(read_refs); read_attribute(write_refs); -static const char * const bch2_write_refs[] = { -#define x(n) #n, - BCH_WRITE_REFS() -#undef x - NULL -}; - -static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c) -{ - bch2_printbuf_tabstop_push(out, 24); - - for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) - prt_printf(out, "%s\t%li\n", bch2_write_refs[i], atomic_long_read(&c->writes[i])); -} -#endif - read_attribute(internal_uuid); read_attribute(disk_groups); @@ -212,6 +200,8 @@ read_attribute(copy_gc_wait); sysfs_pd_controller_attribute(rebalance); read_attribute(rebalance_status); +read_attribute(snapshot_delete_status); +read_attribute(recovery_status); read_attribute(new_stripes); @@ -334,6 +324,12 @@ SHOW(bch2_fs) if (attr == &sysfs_rebalance_status) bch2_rebalance_status_to_text(out, c); + if (attr == &sysfs_snapshot_delete_status) + bch2_snapshot_delete_status_to_text(out, c); + + if (attr == &sysfs_recovery_status) + bch2_recovery_pass_status_to_text(out, c); + /* Debugging: */ if (attr == &sysfs_journal_debug) @@ -369,10 +365,8 @@ SHOW(bch2_fs) if (attr == &sysfs_moving_ctxts) bch2_fs_moving_ctxts_to_text(out, c); -#ifdef BCH_WRITE_REF_DEBUG if (attr == &sysfs_write_refs) - bch2_write_refs_to_text(out, c); -#endif + enumerated_ref_to_text(out, &c->writes, bch2_write_refs); if (attr == &sysfs_nocow_lock_table) bch2_nocow_locks_to_text(out, &c->nocow_locks); @@ -405,7 +399,7 @@ STORE(bch2_fs) if (attr == &sysfs_trigger_btree_updates) queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work); - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)) + if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_sysfs)) return -EROFS; if (attr == &sysfs_trigger_btree_cache_shrink) { @@ -445,6 +439,15 @@ STORE(bch2_fs) if (attr == &sysfs_trigger_freelist_wakeup) closure_wake_up(&c->freelist_wait); + if (attr == &sysfs_trigger_recalc_capacity) { + down_read(&c->state_lock); + bch2_recalc_capacity(c); + up_read(&c->state_lock); + } + + if (attr == &sysfs_trigger_delete_dead_snapshots) + __bch2_delete_dead_snapshots(c); + #ifdef CONFIG_BCACHEFS_TESTS if (attr == &sysfs_perf_test) { char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp; @@ -465,7 +468,7 @@ STORE(bch2_fs) size = ret; } #endif - bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_sysfs); return size; } SYSFS_OPS(bch2_fs); @@ -476,6 +479,8 @@ struct attribute *bch2_fs_files[] = { &sysfs_btree_write_stats, &sysfs_rebalance_status, + &sysfs_snapshot_delete_status, + &sysfs_recovery_status, &sysfs_compression_stats, @@ -558,9 +563,7 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_new_stripes, &sysfs_open_buckets, &sysfs_open_buckets_partial, -#ifdef BCH_WRITE_REF_DEBUG &sysfs_write_refs, -#endif &sysfs_nocow_lock_table, &sysfs_io_timers_read, &sysfs_io_timers_write, @@ -572,8 +575,10 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_trigger_journal_writes, &sysfs_trigger_btree_cache_shrink, &sysfs_trigger_btree_key_cache_shrink, - &sysfs_trigger_freelist_wakeup, &sysfs_trigger_btree_updates, + &sysfs_trigger_freelist_wakeup, + &sysfs_trigger_recalc_capacity, + &sysfs_trigger_delete_dead_snapshots, &sysfs_gc_gens_pos, @@ -626,7 +631,7 @@ static ssize_t sysfs_opt_store(struct bch_fs *c, * We don't need to take c->writes for correctness, but it eliminates an * unsightly error message in the dmesg log when we're RO: */ - if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))) + if (unlikely(!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_sysfs))) return -EROFS; char *tmp = kstrdup(buf, GFP_KERNEL); @@ -637,40 +642,34 @@ static ssize_t sysfs_opt_store(struct bch_fs *c, u64 v; ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL) ?: - bch2_opt_check_may_set(c, ca, id, v); + bch2_opt_hook_pre_set(c, ca, id, v); kfree(tmp); if (ret < 0) goto err; - bch2_opt_set_sb(c, ca, opt, v); - bch2_opt_set_by_id(&c->opts, id, v); - - if (v && - (id == Opt_background_target || - (id == Opt_foreground_target && !c->opts.background_target) || - id == Opt_background_compression || - (id == Opt_compression && !c->opts.background_compression))) - bch2_set_rebalance_needs_scan(c, 0); + bool is_sb = opt->get_sb || opt->get_member; + bool changed = false; - if (v && id == Opt_rebalance_enabled) - bch2_rebalance_wakeup(c); - - if (v && id == Opt_copygc_enabled) - bch2_copygc_wakeup(c); + if (is_sb) { + changed = bch2_opt_set_sb(c, ca, opt, v); + } else if (!ca) { + changed = bch2_opt_get_by_id(&c->opts, id) != v; + } else { + /* device options that aren't superblock options aren't + * supported */ + BUG(); + } - if (id == Opt_discard && !ca) { - mutex_lock(&c->sb_lock); - for_each_member_device(c, ca) - opt->set_member(bch2_members_v2_get_mut(ca->disk_sb.sb, ca->dev_idx), v); + if (!ca) + bch2_opt_set_by_id(&c->opts, id, v); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); - } + if (changed) + bch2_opt_hook_post_set(c, ca, 0, &c->opts, id); ret = size; err: - bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); + enumerated_ref_put(&c->writes, BCH_WRITE_REF_sysfs); return ret; } @@ -821,6 +820,12 @@ SHOW(bch2_dev) if (opt_id >= 0) return sysfs_opt_show(c, ca, opt_id, out); + if (attr == &sysfs_read_refs) + enumerated_ref_to_text(out, &ca->io_ref[READ], bch2_dev_read_refs); + + if (attr == &sysfs_write_refs) + enumerated_ref_to_text(out, &ca->io_ref[WRITE], bch2_dev_write_refs); + return 0; } @@ -876,6 +881,9 @@ struct attribute *bch2_dev_files[] = { /* debug: */ &sysfs_alloc_debug, &sysfs_open_buckets, + + &sysfs_read_refs, + &sysfs_write_refs, NULL }; diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 519d00d62ae7..8cb5b40704fd 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -339,6 +339,11 @@ DEFINE_EVENT(bio, io_read_reuse_race, TP_ARGS(bio) ); +DEFINE_EVENT(bio, io_read_fail_and_poison, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + /* ec.c */ TRACE_EVENT(stripe_create, @@ -1122,51 +1127,9 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split, TP_ARGS(trans, caller_ip, path) ); -TRACE_EVENT(trans_restart_upgrade, - TP_PROTO(struct btree_trans *trans, - unsigned long caller_ip, - struct btree_path *path, - unsigned old_locks_want, - unsigned new_locks_want, - struct get_locks_fail *f), - TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f), - - TP_STRUCT__entry( - __array(char, trans_fn, 32 ) - __field(unsigned long, caller_ip ) - __field(u8, btree_id ) - __field(u8, old_locks_want ) - __field(u8, new_locks_want ) - __field(u8, level ) - __field(u32, path_seq ) - __field(u32, node_seq ) - TRACE_BPOS_entries(pos) - ), - - TP_fast_assign( - strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); - __entry->caller_ip = caller_ip; - __entry->btree_id = path->btree_id; - __entry->old_locks_want = old_locks_want; - __entry->new_locks_want = new_locks_want; - __entry->level = f->l; - __entry->path_seq = path->l[f->l].lock_seq; - __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq; - TRACE_BPOS_assign(pos, path->pos) - ), - - TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u", - __entry->trans_fn, - (void *) __entry->caller_ip, - bch2_btree_id_str(__entry->btree_id), - __entry->pos_inode, - __entry->pos_offset, - __entry->pos_snapshot, - __entry->old_locks_want, - __entry->new_locks_want, - __entry->level, - __entry->path_seq, - __entry->node_seq) +DEFINE_EVENT(fs_str, trans_restart_upgrade, + TP_PROTO(struct bch_fs *c, const char *str), + TP_ARGS(c, str) ); DEFINE_EVENT(trans_str, trans_restart_relock, @@ -1468,6 +1431,11 @@ DEFINE_EVENT(fs_str, data_update, TP_ARGS(c, str) ); +DEFINE_EVENT(fs_str, io_move_created_rebalance, + TP_PROTO(struct bch_fs *c, const char *str), + TP_ARGS(c, str) +); + TRACE_EVENT(error_downcast, TP_PROTO(int bch_err, int std_err, unsigned long ip), TP_ARGS(bch_err, std_err, ip), diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c index 87af551692f4..dc3817f545fa 100644 --- a/fs/bcachefs/util.c +++ b/fs/bcachefs/util.c @@ -252,8 +252,18 @@ void bch2_prt_u64_base2(struct printbuf *out, u64 v) bch2_prt_u64_base2_nbits(out, v, fls64(v) ?: 1); } -static void __bch2_print_string_as_lines(const char *prefix, const char *lines, - bool nonblocking) +static bool string_is_spaces(const char *str) +{ + while (*str) { + if (*str != ' ') + return false; + str++; + } + return true; +} + +void bch2_print_string_as_lines(const char *prefix, const char *lines, + bool nonblocking) { bool locked = false; const char *p; @@ -272,6 +282,9 @@ static void __bch2_print_string_as_lines(const char *prefix, const char *lines, while (*lines) { p = strchrnul(lines, '\n'); + if (!*p && string_is_spaces(lines)) + break; + printk("%s%.*s\n", prefix, (int) (p - lines), lines); if (!*p) break; @@ -281,16 +294,6 @@ static void __bch2_print_string_as_lines(const char *prefix, const char *lines, console_unlock(); } -void bch2_print_string_as_lines(const char *prefix, const char *lines) -{ - return __bch2_print_string_as_lines(prefix, lines, false); -} - -void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines) -{ - return __bch2_print_string_as_lines(prefix, lines, true); -} - int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr, gfp_t gfp) { @@ -725,6 +728,16 @@ void bch2_corrupt_bio(struct bio *bio) } #endif +void bch2_bio_to_text(struct printbuf *out, struct bio *bio) +{ + prt_printf(out, "bi_remaining:\t%u\n", + atomic_read(&bio->__bi_remaining)); + prt_printf(out, "bi_end_io:\t%ps\n", + bio->bi_end_io); + prt_printf(out, "bi_status:\t%u\n", + bio->bi_status); +} + #if 0 void eytzinger1_test(void) { @@ -1003,14 +1016,14 @@ u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr) return ret; } -void bch2_darray_str_exit(darray_str *d) +void bch2_darray_str_exit(darray_const_str *d) { darray_for_each(*d, i) kfree(*i); darray_exit(d); } -int bch2_split_devs(const char *_dev_name, darray_str *ret) +int bch2_split_devs(const char *_dev_name, darray_const_str *ret) { darray_init(ret); diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index 3e52c7f8ddd2..25cf61ebd40c 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -14,6 +14,7 @@ #include <linux/log2.h> #include <linux/percpu.h> #include <linux/preempt.h> +#include <linux/random.h> #include <linux/ratelimit.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -55,15 +56,16 @@ static inline size_t buf_pages(void *p, size_t len) PAGE_SIZE); } -static inline void *bch2_kvmalloc(size_t n, gfp_t flags) +static inline void *bch2_kvmalloc_noprof(size_t n, gfp_t flags) { void *p = unlikely(n >= INT_MAX) - ? vmalloc(n) - : kvmalloc(n, flags & ~__GFP_ZERO); + ? vmalloc_noprof(n) + : kvmalloc_noprof(n, flags & ~__GFP_ZERO); if (p && (flags & __GFP_ZERO)) memset(p, 0, n); return p; } +#define bch2_kvmalloc(...) alloc_hooks(bch2_kvmalloc_noprof(__VA_ARGS__)) #define init_heap(heap, _size, gfp) \ ({ \ @@ -211,8 +213,7 @@ u64 bch2_read_flag_list(const char *, const char * const[]); void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned); void bch2_prt_u64_base2(struct printbuf *, u64); -void bch2_print_string_as_lines(const char *prefix, const char *lines); -void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines); +void bch2_print_string_as_lines(const char *, const char *, bool); typedef DARRAY(unsigned long) bch_stacktrace; int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t); @@ -419,6 +420,8 @@ static inline void bch2_maybe_corrupt_bio(struct bio *bio, unsigned ratio) #define bch2_maybe_corrupt_bio(...) do {} while (0) #endif +void bch2_bio_to_text(struct printbuf *, struct bio *); + static inline void memcpy_u64s_small(void *dst, const void *src, unsigned u64s) { @@ -688,8 +691,8 @@ static inline bool qstr_eq(const struct qstr l, const struct qstr r) return l.len == r.len && !memcmp(l.name, r.name, l.len); } -void bch2_darray_str_exit(darray_str *); -int bch2_split_devs(const char *, darray_str *); +void bch2_darray_str_exit(darray_const_str *); +int bch2_split_devs(const char *, darray_const_str *); #ifdef __KERNEL__ diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c index 651da52b2cbc..627f153798c6 100644 --- a/fs/bcachefs/xattr.c +++ b/fs/bcachefs/xattr.c @@ -38,7 +38,7 @@ static u64 xattr_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k) struct bkey_s_c_xattr x = bkey_s_c_to_xattr(k); return bch2_xattr_hash(info, - &X_SEARCH(x.v->x_type, x.v->x_name, x.v->x_name_len)); + &X_SEARCH(x.v->x_type, x.v->x_name_and_value, x.v->x_name_len)); } static bool xattr_cmp_key(struct bkey_s_c _l, const void *_r) @@ -48,7 +48,7 @@ static bool xattr_cmp_key(struct bkey_s_c _l, const void *_r) return l.v->x_type != r->type || l.v->x_name_len != r->name.len || - memcmp(l.v->x_name, r->name.name, r->name.len); + memcmp(l.v->x_name_and_value, r->name.name, r->name.len); } static bool xattr_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r) @@ -58,7 +58,7 @@ static bool xattr_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r) return l.v->x_type != r.v->x_type || l.v->x_name_len != r.v->x_name_len || - memcmp(l.v->x_name, r.v->x_name, r.v->x_name_len); + memcmp(l.v->x_name_and_value, r.v->x_name_and_value, r.v->x_name_len); } const struct bch_hash_desc bch2_xattr_hash_desc = { @@ -96,7 +96,7 @@ int bch2_xattr_validate(struct bch_fs *c, struct bkey_s_c k, c, xattr_invalid_type, "invalid type (%u)", xattr.v->x_type); - bkey_fsck_err_on(memchr(xattr.v->x_name, '\0', xattr.v->x_name_len), + bkey_fsck_err_on(memchr(xattr.v->x_name_and_value, '\0', xattr.v->x_name_len), c, xattr_name_invalid_chars, "xattr name has invalid characters"); fsck_err: @@ -120,13 +120,13 @@ void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c, unsigned name_len = xattr.v->x_name_len; unsigned val_len = le16_to_cpu(xattr.v->x_val_len); unsigned max_name_val_bytes = bkey_val_bytes(xattr.k) - - offsetof(struct bch_xattr, x_name); + offsetof(struct bch_xattr, x_name_and_value); val_len = min_t(int, val_len, max_name_val_bytes - name_len); name_len = min(name_len, max_name_val_bytes); prt_printf(out, "%.*s:%.*s", - name_len, xattr.v->x_name, + name_len, xattr.v->x_name_and_value, val_len, (char *) xattr_val(xattr.v)); if (xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS || @@ -176,6 +176,11 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum, if (ret) return ret; + /* + * Besides the ctime update, extents, dirents and xattrs updates require + * that an inode update also happens - to ensure that if a key exists in + * one of those btrees with a given snapshot ID an inode is also present + */ inode_u->bi_ctime = bch2_current_time(c); ret = bch2_inode_write(trans, &inode_iter, inode_u); @@ -202,7 +207,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum, xattr->v.x_type = type; xattr->v.x_name_len = namelen; xattr->v.x_val_len = cpu_to_le16(size); - memcpy(xattr->v.x_name, name, namelen); + memcpy(xattr->v.x_name_and_value, name, namelen); memcpy(xattr_val(&xattr->v), value, size); ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info, @@ -270,7 +275,7 @@ static int bch2_xattr_emit(struct dentry *dentry, if (!prefix) return 0; - return __bch2_xattr_emit(prefix, xattr->x_name, xattr->x_name_len, buf); + return __bch2_xattr_emit(prefix, xattr->x_name_and_value, xattr->x_name_len, buf); } static int bch2_xattr_list_bcachefs(struct bch_fs *c, @@ -473,6 +478,12 @@ static int inode_opt_set_fn(struct btree_trans *trans, { struct inode_opt_set *s = p; + if (s->id == Inode_opt_casefold) { + int ret = bch2_inode_set_casefold(trans, inode_inum(inode), bi, s->v); + if (ret) + return ret; + } + if (s->defined) bi->bi_fields_set |= 1U << s->id; else @@ -523,7 +534,7 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, if (ret < 0) goto err_class_exit; - ret = bch2_opt_check_may_set(c, NULL, opt_id, v); + ret = bch2_opt_hook_pre_set(c, NULL, opt_id, v); if (ret < 0) goto err_class_exit; diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h index 132fbbd15a66..1139bf345f70 100644 --- a/fs/bcachefs/xattr.h +++ b/fs/bcachefs/xattr.h @@ -18,12 +18,12 @@ void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); static inline unsigned xattr_val_u64s(unsigned name_len, unsigned val_len) { - return DIV_ROUND_UP(offsetof(struct bch_xattr, x_name) + + return DIV_ROUND_UP(offsetof(struct bch_xattr, x_name_and_value) + name_len + val_len, sizeof(u64)); } #define xattr_val(_xattr) \ - ((void *) (_xattr)->x_name + (_xattr)->x_name_len) + ((void *) (_xattr)->x_name_and_value + (_xattr)->x_name_len) struct xattr_search_key { u8 type; diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h index 67426e33d04e..4121b78d9a92 100644 --- a/fs/bcachefs/xattr_format.h +++ b/fs/bcachefs/xattr_format.h @@ -16,10 +16,10 @@ struct bch_xattr { /* * x_name contains the name and value counted by * x_name_len + x_val_len. The introduction of - * __counted_by(x_name_len) caused a false positive + * __counted_by(x_name_len) previously caused a false positive * detection of an out of bounds write. */ - __u8 x_name[]; + __u8 x_name_and_value[]; } __packed __aligned(8); #endif /* _BCACHEFS_XATTR_FORMAT_H */ diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index db81570c9637..1d41ce477df5 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -17,6 +17,7 @@ #include <linux/writeback.h> #include <linux/uio.h> #include <linux/uaccess.h> +#include <linux/fs_context.h> #include "bfs.h" MODULE_AUTHOR("Tigran Aivazian <aivazian.tigran@gmail.com>"); @@ -305,7 +306,7 @@ void bfs_dump_imap(const char *prefix, struct super_block *s) #endif } -static int bfs_fill_super(struct super_block *s, void *data, int silent) +static int bfs_fill_super(struct super_block *s, struct fs_context *fc) { struct buffer_head *bh, *sbh; struct bfs_super_block *bfs_sb; @@ -314,6 +315,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) struct bfs_sb_info *info; int ret = -EINVAL; unsigned long i_sblock, i_eblock, i_eoff, s_size; + int silent = fc->sb_flags & SB_SILENT; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) @@ -446,18 +448,28 @@ out: return ret; } -static struct dentry *bfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int bfs_get_tree(struct fs_context *fc) { - return mount_bdev(fs_type, flags, dev_name, data, bfs_fill_super); + return get_tree_bdev(fc, bfs_fill_super); +} + +static const struct fs_context_operations bfs_context_ops = { + .get_tree = bfs_get_tree, +}; + +static int bfs_init_fs_context(struct fs_context *fc) +{ + fc->ops = &bfs_context_ops; + + return 0; } static struct file_system_type bfs_fs_type = { - .owner = THIS_MODULE, - .name = "bfs", - .mount = bfs_mount, - .kill_sb = kill_block_super, - .fs_flags = FS_REQUIRES_DEV, + .owner = THIS_MODULE, + .name = "bfs", + .init_fs_context = bfs_init_fs_context, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("bfs"); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 4c1ea6b52a53..a43363d593e5 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -68,12 +68,6 @@ static int load_elf_binary(struct linux_binprm *bprm); -#ifdef CONFIG_USELIB -static int load_elf_library(struct file *); -#else -#define load_elf_library NULL -#endif - /* * If we don't support core dumping, then supply a NULL so we * don't even try. @@ -101,7 +95,6 @@ static int elf_core_dump(struct coredump_params *cprm); static struct linux_binfmt elf_format = { .module = THIS_MODULE, .load_binary = load_elf_binary, - .load_shlib = load_elf_library, #ifdef CONFIG_COREDUMP .core_dump = elf_core_dump, .min_coredump = ELF_EXEC_PAGESIZE, @@ -1384,75 +1377,6 @@ out_free_ph: goto out; } -#ifdef CONFIG_USELIB -/* This is really simpleminded and specialized - we are loading an - a.out library that is given an ELF header. */ -static int load_elf_library(struct file *file) -{ - struct elf_phdr *elf_phdata; - struct elf_phdr *eppnt; - int retval, error, i, j; - struct elfhdr elf_ex; - - error = -ENOEXEC; - retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0); - if (retval < 0) - goto out; - - if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) - goto out; - - /* First of all, some simple consistency checks */ - if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || - !elf_check_arch(&elf_ex) || !file->f_op->mmap) - goto out; - if (elf_check_fdpic(&elf_ex)) - goto out; - - /* Now read in all of the header information */ - - j = sizeof(struct elf_phdr) * elf_ex.e_phnum; - /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ - - error = -ENOMEM; - elf_phdata = kmalloc(j, GFP_KERNEL); - if (!elf_phdata) - goto out; - - eppnt = elf_phdata; - error = -ENOEXEC; - retval = elf_read(file, eppnt, j, elf_ex.e_phoff); - if (retval < 0) - goto out_free_ph; - - for (j = 0, i = 0; i<elf_ex.e_phnum; i++) - if ((eppnt + i)->p_type == PT_LOAD) - j++; - if (j != 1) - goto out_free_ph; - - while (eppnt->p_type != PT_LOAD) - eppnt++; - - /* Now use mmap to map the library into memory. */ - error = elf_load(file, ELF_PAGESTART(eppnt->p_vaddr), - eppnt, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED_NOREPLACE | MAP_PRIVATE, - 0); - - if (error != ELF_PAGESTART(eppnt->p_vaddr)) - goto out_free_ph; - - error = 0; - -out_free_ph: - kfree(elf_phdata); -out: - return error; -} -#endif /* #ifdef CONFIG_USELIB */ - #ifdef CONFIG_ELF_CORE /* * ELF core dumper diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 5a7ebd160724..432fbf4fc334 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -842,7 +842,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, } inode_lock(d_inode(root)); - dentry = lookup_one_len(e->name, root, strlen(e->name)); + dentry = lookup_noperm(&QSTR(e->name), root); err = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out; diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 73a2dfb854c5..c352f3ae0385 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -52,10 +52,10 @@ config BTRFS_FS_RUN_SANITY_TESTS bool "Btrfs will run sanity tests upon loading" depends on BTRFS_FS help - This will run some basic sanity tests on the free space cache - code to make sure it is acting as it should. These are mostly - regression tests and are only really interesting to btrfs - developers. + This will run sanity tests for core functionality like free space, + extent maps, extent io, extent buffers, inodes, qgroups and others, + at module load time. These are mostly regression tests and are only + interesting to developers. If unsure, say N. @@ -63,9 +63,12 @@ config BTRFS_DEBUG bool "Btrfs debugging support" depends on BTRFS_FS help - Enable run-time debugging support for the btrfs filesystem. This may - enable additional and expensive checks with negative impact on - performance, or export extra information via sysfs. + Enable run-time debugging support for the btrfs filesystem. + + Additional potentially expensive checks, debugging functionality or + sysfs exported information is enabled, like leak checks of internal + objects, optional forced space fragmentation and /sys/fs/btrfs/debug . + This has negative impact on performance. If unsure, say N. @@ -73,8 +76,10 @@ config BTRFS_ASSERT bool "Btrfs assert support" depends on BTRFS_FS help - Enable run-time assertion checking. This will result in panics if - any of the assertions trip. This is meant for btrfs developers only. + Enable run-time assertion checking. Additional safety checks are + done, simple enough not to affect performance but verify invariants + and assumptions of code to run properly. This may result in panics, + and is meant for developers but can be enabled in general. If unsure, say N. @@ -89,7 +94,14 @@ config BTRFS_EXPERIMENTAL Current list: - - extent map shrinker - performance problems with too frequent shrinks + - COW fixup worker warning - last warning before removing the + functionality catching out-of-band page + dirtying, not necessary since 5.8 + + - RAID mirror read policy - additional read policies for balancing + reading from redundant block group + profiles (currently: pid, round-robin, + fixed devid) - send stream protocol v3 - fs-verity support diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index f3bffe08b290..6c6f3bb58f4e 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -219,8 +219,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq, spin_lock_irqsave(lock, flags); if (list_empty(list)) break; - work = list_entry(list->next, struct btrfs_work, - ordered_list); + work = list_first_entry(list, struct btrfs_work, ordered_list); if (!test_bit(WORK_DONE_BIT, &work->flags)) break; /* diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 5936cff80ff3..ed497f5f8d1b 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -2877,7 +2877,7 @@ int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) goto release; } if (path->slots[0] == 0) { - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN(); ret = -EUCLEAN; goto release; } @@ -3134,8 +3134,8 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, return; while (!list_empty(&node->upper)) { - edge = list_entry(node->upper.next, struct btrfs_backref_edge, - list[LOWER]); + edge = list_first_entry(&node->upper, struct btrfs_backref_edge, + list[LOWER]); list_del(&edge->list[LOWER]); list_del(&edge->list[UPPER]); btrfs_backref_free_edge(cache, edge); @@ -3473,8 +3473,8 @@ int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans, * type BTRFS_TREE_BLOCK_REF_KEY */ ASSERT(list_is_singular(&cur->upper)); - edge = list_entry(cur->upper.next, struct btrfs_backref_edge, - list[LOWER]); + edge = list_first_entry(&cur->upper, struct btrfs_backref_edge, + list[LOWER]); ASSERT(list_empty(&edge->list[UPPER])); exist = edge->node[UPPER]; /* @@ -3617,7 +3617,7 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, /* Sanity check, we shouldn't have any unchecked nodes */ if (!upper->checked) { - ASSERT(0); + DEBUG_WARN("we should not have any unchecked nodes"); return -EUCLEAN; } diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 74e614031274..953637115956 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -423,8 +423,8 @@ struct btrfs_backref_node *btrfs_backref_alloc_node( struct btrfs_backref_edge *btrfs_backref_alloc_edge( struct btrfs_backref_cache *cache); -#define LINK_LOWER (1 << 0) -#define LINK_UPPER (1 << 1) +#define LINK_LOWER (1U << 0) +#define LINK_UPPER (1U << 1) void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, struct btrfs_backref_node *lower, diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 8c2eee1f1878..f7d8958b7327 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -192,7 +192,7 @@ static void btrfs_end_repair_bio(struct btrfs_bio *repair_bbio, btrfs_repair_io_failure(fs_info, btrfs_ino(inode), repair_bbio->file_offset, fs_info->sectorsize, repair_bbio->saved_iter.bi_sector << SECTOR_SHIFT, - page_folio(bv->bv_page), bv->bv_offset, mirror); + bvec_phys(bv), mirror); } while (mirror != fbio->bbio->mirror_num); done: @@ -512,7 +512,7 @@ static void btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, } } -static blk_status_t btrfs_bio_csum(struct btrfs_bio *bbio) +static int btrfs_bio_csum(struct btrfs_bio *bbio) { if (bbio->bio.bi_opf & REQ_META) return btree_csum_one_bio(bbio); @@ -543,11 +543,11 @@ static void run_one_async_start(struct btrfs_work *work) { struct async_submit_bio *async = container_of(work, struct async_submit_bio, work); - blk_status_t ret; + int ret; ret = btrfs_bio_csum(async->bbio); if (ret) - async->bbio->bio.bi_status = ret; + async->bbio->bio.bi_status = errno_to_blk_status(ret); } /* @@ -674,8 +674,8 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) bool use_append = btrfs_use_zone_append(bbio); struct btrfs_io_context *bioc = NULL; struct btrfs_io_stripe smap; - blk_status_t ret; - int error; + blk_status_t status; + int ret; if (!bbio->inode || btrfs_is_data_reloc_root(inode->root)) smap.rst_search_commit_root = true; @@ -683,10 +683,10 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) smap.rst_search_commit_root = false; btrfs_bio_counter_inc_blocked(fs_info); - error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, - &bioc, &smap, &mirror_num); - if (error) { - ret = errno_to_blk_status(error); + ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, + &bioc, &smap, &mirror_num); + if (ret) { + status = errno_to_blk_status(ret); btrfs_bio_counter_dec(fs_info); goto end_bbio; } @@ -700,7 +700,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) split = btrfs_split_bio(fs_info, bbio, map_length); if (IS_ERR(split)) { - ret = errno_to_blk_status(PTR_ERR(split)); + status = errno_to_blk_status(PTR_ERR(split)); btrfs_bio_counter_dec(fs_info); goto end_bbio; } @@ -715,7 +715,8 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) if (bio_op(bio) == REQ_OP_READ && is_data_bbio(bbio)) { bbio->saved_iter = bio->bi_iter; ret = btrfs_lookup_bio_sums(bbio); - if (ret) + status = errno_to_blk_status(ret); + if (status) goto fail; } @@ -748,13 +749,15 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) goto done; ret = btrfs_bio_csum(bbio); - if (ret) + status = errno_to_blk_status(ret); + if (status) goto fail; } else if (use_append || (btrfs_is_zoned(fs_info) && inode && inode->flags & BTRFS_INODE_NODATASUM)) { ret = btrfs_alloc_dummy_sum(bbio); - if (ret) + status = errno_to_blk_status(ret); + if (status) goto fail; } } @@ -775,10 +778,10 @@ fail: ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset); ASSERT(remaining); - btrfs_bio_end_io(remaining, ret); + btrfs_bio_end_io(remaining, status); } end_bbio: - btrfs_bio_end_io(bbio, ret); + btrfs_bio_end_io(bbio, status); /* Do not submit another chunk */ return true; } @@ -803,8 +806,7 @@ void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num) * freeing the bio. */ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, - u64 length, u64 logical, struct folio *folio, - unsigned int folio_offset, int mirror_num) + u64 length, u64 logical, phys_addr_t paddr, int mirror_num) { struct btrfs_io_stripe smap = { 0 }; struct bio_vec bvec; @@ -835,8 +837,7 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT; - ret = bio_add_folio(&bio, folio, length, folio_offset); - ASSERT(ret); + __bio_add_page(&bio, phys_to_page(paddr), length, offset_in_page(paddr)); ret = submit_bio_wait(&bio); if (ret) { /* try to remap that extent elsewhere? */ @@ -900,22 +901,18 @@ int __init btrfs_bioset_init(void) return -ENOMEM; if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE, offsetof(struct btrfs_bio, bio), 0)) - goto out_free_bioset; + goto out; if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE, offsetof(struct btrfs_bio, bio), BIOSET_NEED_BVECS)) - goto out_free_clone_bioset; + goto out; if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE, sizeof(struct btrfs_failed_bio))) - goto out_free_repair_bioset; + goto out; return 0; -out_free_repair_bioset: - bioset_exit(&btrfs_repair_bioset); -out_free_clone_bioset: - bioset_exit(&btrfs_clone_bioset); -out_free_bioset: - bioset_exit(&btrfs_bioset); +out: + btrfs_bioset_exit(); return -ENOMEM; } diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index e2fe16074ad6..dc2eb43b7097 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -110,7 +110,6 @@ void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status); void btrfs_submit_bbio(struct btrfs_bio *bbio, int mirror_num); void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace); int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, - u64 length, u64 logical, struct folio *folio, - unsigned int folio_offset, int mirror_num); + u64 length, u64 logical, phys_addr_t paddr, int mirror_num); #endif diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index a8129f1ce78c..5b0cb04b2b93 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -525,10 +525,9 @@ int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, *total_added_ret = 0; while (start < end) { - if (!find_first_extent_bit(&info->excluded_extents, start, - &extent_start, &extent_end, - EXTENT_DIRTY | EXTENT_UPTODATE, - NULL)) + if (!btrfs_find_first_extent_bit(&info->excluded_extents, start, + &extent_start, &extent_end, + EXTENT_DIRTY, NULL)) break; if (extent_start <= start) { @@ -701,7 +700,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) struct btrfs_block_group *block_group = caching_ctl->block_group; struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_root *extent_root; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct extent_buffer *leaf; struct btrfs_key key; u64 total_found = 0; @@ -828,14 +827,13 @@ next: block_group->start + block_group->length, NULL); out: - btrfs_free_path(path); return ret; } static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) { - clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, - bg->start + bg->length - 1, EXTENT_UPTODATE); + btrfs_clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, + bg->start + bg->length - 1, EXTENT_DIRTY); } static noinline void caching_thread(struct btrfs_work *work) @@ -1420,9 +1418,8 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans, int ret; spin_lock(&fs_info->trans_lock); - if (trans->transaction->list.prev != &fs_info->trans_list) { - prev_trans = list_last_entry(&trans->transaction->list, - struct btrfs_transaction, list); + if (!list_is_first(&trans->transaction->list, &fs_info->trans_list)) { + prev_trans = list_prev_entry(trans->transaction, list); refcount_inc(&prev_trans->use_count); } spin_unlock(&fs_info->trans_lock); @@ -1439,14 +1436,14 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans, */ mutex_lock(&fs_info->unused_bg_unpin_mutex); if (prev_trans) { - ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, - EXTENT_DIRTY); + ret = btrfs_clear_extent_bits(&prev_trans->pinned_extents, start, end, + EXTENT_DIRTY); if (ret) goto out; } - ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, - EXTENT_DIRTY); + ret = btrfs_clear_extent_bits(&trans->transaction->pinned_extents, start, end, + EXTENT_DIRTY); out: mutex_unlock(&fs_info->unused_bg_unpin_mutex); if (prev_trans) @@ -2218,9 +2215,9 @@ static int exclude_super_stripes(struct btrfs_block_group *cache) if (cache->start < BTRFS_SUPER_INFO_OFFSET) { stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; cache->bytes_super += stripe_len; - ret = set_extent_bit(&fs_info->excluded_extents, cache->start, - cache->start + stripe_len - 1, - EXTENT_UPTODATE, NULL); + ret = btrfs_set_extent_bit(&fs_info->excluded_extents, cache->start, + cache->start + stripe_len - 1, + EXTENT_DIRTY, NULL); if (ret) return ret; } @@ -2246,9 +2243,9 @@ static int exclude_super_stripes(struct btrfs_block_group *cache) cache->start + cache->length - logical[nr]); cache->bytes_super += len; - ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], - logical[nr] + len - 1, - EXTENT_UPTODATE, NULL); + ret = btrfs_set_extent_bit(&fs_info->excluded_extents, + logical[nr], logical[nr] + len - 1, + EXTENT_DIRTY, NULL); if (ret) { kfree(logical); return ret; @@ -2373,6 +2370,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, cache->commit_used = cache->used; cache->flags = btrfs_stack_block_group_flags(bgi); cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); + cache->space_info = btrfs_find_space_info(info, cache->flags); set_free_space_tree_thresholds(cache); @@ -2451,6 +2449,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, btrfs_remove_free_space_cache(cache); goto error; } + trace_btrfs_add_block_group(info, cache, 0); btrfs_add_bg_to_space_info(info, cache); @@ -2495,6 +2494,7 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) bg->cached = BTRFS_CACHE_FINISHED; bg->used = map->chunk_len; bg->flags = map->type; + bg->space_info = btrfs_find_space_info(fs_info, bg->flags); ret = btrfs_add_block_group_cache(bg); /* * We may have some valid block group cache added already, in @@ -2868,8 +2868,8 @@ static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 off } struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, - u64 type, - u64 chunk_offset, u64 size) + struct btrfs_space_info *space_info, + u64 type, u64 chunk_offset, u64 size) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *cache; @@ -2923,7 +2923,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran * assigned to our block group. We want our bg to be added to the rbtree * with its ->space_info set. */ - cache->space_info = btrfs_find_space_info(fs_info, cache->flags); + cache->space_info = space_info; ASSERT(cache->space_info); ret = btrfs_add_block_group_cache(cache); @@ -2968,6 +2968,7 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, bool do_chunk_alloc) { struct btrfs_fs_info *fs_info = cache->fs_info; + struct btrfs_space_info *space_info = cache->space_info; struct btrfs_trans_handle *trans; struct btrfs_root *root = btrfs_block_group_root(fs_info); u64 alloc_flags; @@ -3020,7 +3021,7 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, */ alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); if (alloc_flags != cache->flags) { - ret = btrfs_chunk_alloc(trans, alloc_flags, + ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); /* * ENOSPC is allowed here, we may have enough space @@ -3048,15 +3049,15 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) goto unlock_out; - alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); - ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); + alloc_flags = btrfs_get_alloc_profile(fs_info, space_info->flags); + ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); if (ret < 0) goto out; /* * We have allocated a new chunk. We also need to activate that chunk to * grant metadata tickets for zoned filesystem. */ - ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); + ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true); if (ret < 0) goto out; @@ -3738,8 +3739,8 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, spin_unlock(&cache->lock); spin_unlock(&space_info->lock); - set_extent_bit(&trans->transaction->pinned_extents, bytenr, - bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); + btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr, + bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); } spin_lock(&trans->transaction->dirty_bgs_lock); @@ -3828,17 +3829,17 @@ out: /* * Update the block_group and space info counters. * - * @cache: The cache we are manipulating - * @num_bytes: The number of bytes in question - * @delalloc: The blocks are allocated for the delalloc write + * @cache: The cache we are manipulating. + * @num_bytes: The number of bytes in question. + * @is_delalloc: Whether the blocks are allocated for a delalloc write. * * This is called by somebody who is freeing space that was never actually used * on disk. For example if you reserve some space for a new leaf in transaction * A and before transaction A commits you free that leaf, you call this with * reserve set to 0 in order to clear the reservation. */ -void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, - u64 num_bytes, int delalloc) +void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, + bool is_delalloc) { struct btrfs_space_info *space_info = cache->space_info; @@ -3852,7 +3853,7 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, space_info->bytes_reserved -= num_bytes; space_info->max_extent_size = 0; - if (delalloc) + if (is_delalloc) cache->delalloc_bytes -= num_bytes; spin_unlock(&cache->lock); @@ -3871,14 +3872,14 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) } } -static int should_alloc_chunk(const struct btrfs_fs_info *fs_info, - const struct btrfs_space_info *sinfo, int force) +static bool should_alloc_chunk(const struct btrfs_fs_info *fs_info, + const struct btrfs_space_info *sinfo, int force) { u64 bytes_used = btrfs_space_info_used(sinfo, false); u64 thresh; if (force == CHUNK_ALLOC_FORCE) - return 1; + return true; /* * in limited mode, we want to have some free space up to @@ -3889,22 +3890,31 @@ static int should_alloc_chunk(const struct btrfs_fs_info *fs_info, thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); if (sinfo->total_bytes - bytes_used < thresh) - return 1; + return true; } if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) - return 0; - return 1; + return false; + return true; } int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) { u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); + struct btrfs_space_info *space_info; - return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); + space_info = btrfs_find_space_info(trans->fs_info, type); + if (!space_info) { + DEBUG_WARN(); + return -EINVAL; + } + + return btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); } -static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) +static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_space_info *space_info, + u64 flags) { struct btrfs_block_group *bg; int ret; @@ -3917,7 +3927,7 @@ static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans */ check_system_chunk(trans, flags); - bg = btrfs_create_chunk(trans, flags); + bg = btrfs_create_chunk(trans, space_info, flags); if (IS_ERR(bg)) { ret = PTR_ERR(bg); goto out; @@ -3965,8 +3975,16 @@ static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans if (ret == -ENOSPC) { const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); struct btrfs_block_group *sys_bg; + struct btrfs_space_info *sys_space_info; + + sys_space_info = btrfs_find_space_info(trans->fs_info, sys_flags); + if (!sys_space_info) { + ret = -EINVAL; + btrfs_abort_transaction(trans, ret); + goto out; + } - sys_bg = btrfs_create_chunk(trans, sys_flags); + sys_bg = btrfs_create_chunk(trans, sys_space_info, sys_flags); if (IS_ERR(sys_bg)) { ret = PTR_ERR(sys_bg); btrfs_abort_transaction(trans, ret); @@ -4097,6 +4115,8 @@ out: * * This function, btrfs_chunk_alloc(), belongs to phase 1. * + * @space_info: specify which space_info the new chunk should belong to. + * * If @force is CHUNK_ALLOC_FORCE: * - return 1 if it successfully allocates a chunk, * - return errors including -ENOSPC otherwise. @@ -4105,11 +4125,11 @@ out: * - return 1 if it successfully allocates a chunk, * - return errors including -ENOSPC otherwise. */ -int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, +int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_space_info *space_info, u64 flags, enum btrfs_chunk_alloc_enum force) { struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_space_info *space_info; struct btrfs_block_group *ret_bg; bool wait_for_alloc = false; bool should_alloc = false; @@ -4148,9 +4168,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, if (flags & BTRFS_BLOCK_GROUP_SYSTEM) return -ENOSPC; - space_info = btrfs_find_space_info(fs_info, flags); - ASSERT(space_info); - do { spin_lock(&space_info->lock); if (force < space_info->force_alloc) @@ -4211,7 +4228,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, force_metadata_allocation(fs_info); } - ret_bg = do_chunk_alloc(trans, flags); + ret_bg = do_chunk_alloc(trans, space_info, flags); trans->allocating_chunk = false; if (IS_ERR(ret_bg)) { @@ -4287,6 +4304,10 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans, if (left < bytes) { u64 flags = btrfs_system_alloc_profile(fs_info); struct btrfs_block_group *bg; + struct btrfs_space_info *space_info; + + space_info = btrfs_find_space_info(fs_info, flags); + ASSERT(space_info); /* * Ignore failure to create system chunk. We might end up not @@ -4294,7 +4315,7 @@ static void reserve_chunk_space(struct btrfs_trans_handle *trans, * the paths we visit in the chunk tree (they were already COWed * or created in the current transaction for example). */ - bg = btrfs_create_chunk(trans, flags); + bg = btrfs_create_chunk(trans, space_info, flags); if (IS_ERR(bg)) { ret = PTR_ERR(bg); } else { @@ -4402,6 +4423,43 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) } } +static void check_removing_space_info(struct btrfs_space_info *space_info) +{ + struct btrfs_fs_info *info = space_info->fs_info; + + if (space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY) { + /* This is a top space_info, proceed with its children first. */ + for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) { + if (space_info->sub_group[i]) { + check_removing_space_info(space_info->sub_group[i]); + kfree(space_info->sub_group[i]); + space_info->sub_group[i] = NULL; + } + } + } + + /* + * Do not hide this behind enospc_debug, this is actually important and + * indicates a real bug if this happens. + */ + if (WARN_ON(space_info->bytes_pinned > 0 || space_info->bytes_may_use > 0)) + btrfs_dump_space_info(info, space_info, 0, 0); + + /* + * If there was a failure to cleanup a log tree, very likely due to an + * IO failure on a writeback attempt of one or more of its extent + * buffers, we could not do proper (and cheap) unaccounting of their + * reserved space, so don't warn on bytes_reserved > 0 in that case. + */ + if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || + !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { + if (WARN_ON(space_info->bytes_reserved > 0)) + btrfs_dump_space_info(info, space_info, 0, 0); + } + + WARN_ON(space_info->reclaim_size > 0); +} + /* * Must be called only after stopping all workers, since we could have block * group caching kthreads running, and therefore they could race with us if we @@ -4427,8 +4485,8 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) write_lock(&info->block_group_cache_lock); while (!list_empty(&info->caching_block_groups)) { - caching_ctl = list_entry(info->caching_block_groups.next, - struct btrfs_caching_control, list); + caching_ctl = list_first_entry(&info->caching_block_groups, + struct btrfs_caching_control, list); list_del(&caching_ctl->list); btrfs_put_caching_control(caching_ctl); } @@ -4499,32 +4557,10 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) btrfs_release_global_block_rsv(info); while (!list_empty(&info->space_info)) { - space_info = list_entry(info->space_info.next, - struct btrfs_space_info, - list); - - /* - * Do not hide this behind enospc_debug, this is actually - * important and indicates a real bug if this happens. - */ - if (WARN_ON(space_info->bytes_pinned > 0 || - space_info->bytes_may_use > 0)) - btrfs_dump_space_info(info, space_info, 0, 0); - - /* - * If there was a failure to cleanup a log tree, very likely due - * to an IO failure on a writeback attempt of one or more of its - * extent buffers, we could not do proper (and cheap) unaccounting - * of their reserved space, so don't warn on bytes_reserved > 0 in - * that case. - */ - if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || - !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { - if (WARN_ON(space_info->bytes_reserved > 0)) - btrfs_dump_space_info(info, space_info, 0, 0); - } + space_info = list_first_entry(&info->space_info, + struct btrfs_space_info, list); - WARN_ON(space_info->reclaim_size > 0); + check_removing_space_info(space_info); list_del(&space_info->list); btrfs_sysfs_remove_space_info(space_info); } diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 36937eeab9b8..9de356bcb411 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -326,8 +326,8 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); int btrfs_read_block_groups(struct btrfs_fs_info *info); struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, - u64 type, - u64 chunk_offset, u64 size); + struct btrfs_space_info *space_info, + u64 type, u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, bool do_chunk_alloc); @@ -340,9 +340,10 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, u64 ram_bytes, u64 num_bytes, int delalloc, bool force_wrong_size_class); -void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, - u64 num_bytes, int delalloc); -int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, +void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, u64 num_bytes, + bool is_delalloc); +int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_space_info *space_info, u64 flags, enum btrfs_chunk_alloc_enum force); int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index 3f3608299c0b..5ad6de738aee 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -418,6 +418,9 @@ void btrfs_init_root_block_rsv(struct btrfs_root *root) case BTRFS_CHUNK_TREE_OBJECTID: root->block_rsv = &fs_info->chunk_block_rsv; break; + case BTRFS_TREE_LOG_OBJECTID: + root->block_rsv = &fs_info->treelog_rsv; + break; default: root->block_rsv = NULL; break; @@ -438,6 +441,14 @@ void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info) fs_info->delayed_block_rsv.space_info = space_info; fs_info->delayed_refs_rsv.space_info = space_info; + /* The treelog_rsv uses a dedicated space_info on the zoned mode. */ + if (!btrfs_is_zoned(fs_info)) { + fs_info->treelog_rsv.space_info = space_info; + } else { + ASSERT(space_info->sub_group[0]->subgroup_id == BTRFS_SUB_GROUP_TREELOG); + fs_info->treelog_rsv.space_info = space_info->sub_group[0]; + } + btrfs_update_global_block_rsv(fs_info); } diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h index d12b1fac5c74..79ae9d05cd91 100644 --- a/fs/btrfs/block-rsv.h +++ b/fs/btrfs/block-rsv.h @@ -24,6 +24,7 @@ enum btrfs_rsv_type { BTRFS_BLOCK_RSV_CHUNK, BTRFS_BLOCK_RSV_DELOPS, BTRFS_BLOCK_RSV_DELREFS, + BTRFS_BLOCK_RSV_TREELOG, BTRFS_BLOCK_RSV_EMPTY, BTRFS_BLOCK_RSV_TEMP, }; diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 4e2952cf5766..a79fa0726f1d 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -529,8 +529,8 @@ static inline void btrfs_update_inode_mapping_flags(struct btrfs_inode *inode) #define CSUM_FMT "0x%*phN" #define CSUM_FMT_VALUE(size, bytes) size, bytes -int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, - u32 pgoff, u8 *csum, const u8 * const csum_expected); +int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, void *kaddr, u8 *csum, + const u8 * const csum_expected); bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, u32 bio_offset, struct bio_vec *bv); noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len, @@ -547,8 +547,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, struct btrfs_inode *parent_inode, struct btrfs_inode *inode, const struct fscrypt_str *name, int add_backref, u64 index); int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry); -int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, - int front); +int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end); int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context); int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7f11ef559be6..48d07939fee4 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -285,12 +285,12 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb) unsigned long index = cb->start >> PAGE_SHIFT; unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct folio_batch fbatch; - const int error = blk_status_to_errno(cb->bbio.bio.bi_status); int i; int ret; - if (error) - mapping_set_error(inode->i_mapping, error); + ret = blk_status_to_errno(cb->bbio.bio.bi_status); + if (ret) + mapping_set_error(inode->i_mapping, ret); folio_batch_init(&fbatch); while (index <= end_index) { @@ -499,9 +499,9 @@ static noinline int add_ra_bio_pages(struct inode *inode, } page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1; - lock_extent(tree, cur, page_end, NULL); + btrfs_lock_extent(tree, cur, page_end, NULL); read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); + em = btrfs_lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); read_unlock(&em_tree->lock); /* @@ -510,20 +510,20 @@ static noinline int add_ra_bio_pages(struct inode *inode, * to this compressed extent on disk. */ if (!em || cur < em->start || - (cur + fs_info->sectorsize > extent_map_end(em)) || - (extent_map_block_start(em) >> SECTOR_SHIFT) != + (cur + fs_info->sectorsize > btrfs_extent_map_end(em)) || + (btrfs_extent_map_block_start(em) >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) { - free_extent_map(em); - unlock_extent(tree, cur, page_end, NULL); + btrfs_free_extent_map(em); + btrfs_unlock_extent(tree, cur, page_end, NULL); folio_unlock(folio); folio_put(folio); break; } add_size = min(em->start + em->len, page_end + 1) - cur; - free_extent_map(em); - unlock_extent(tree, cur, page_end, NULL); + btrfs_free_extent_map(em); + btrfs_unlock_extent(tree, cur, page_end, NULL); - if (folio->index == end_index) { + if (folio_contains(folio, end_index)) { size_t zero_offset = offset_in_folio(folio, isize); if (zero_offset) { @@ -576,19 +576,19 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio) struct extent_map *em; unsigned long pflags; int memstall = 0; - blk_status_t ret; - int ret2; + blk_status_t status; + int ret; /* we need the actual starting offset of this extent in the file */ read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize); + em = btrfs_lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize); read_unlock(&em_tree->lock); if (!em) { - ret = BLK_STS_IOERR; + status = BLK_STS_IOERR; goto out; } - ASSERT(extent_map_is_compressed(em)); + ASSERT(btrfs_extent_map_is_compressed(em)); compressed_len = em->disk_num_bytes; cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ, @@ -600,21 +600,21 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio) cb->len = bbio->bio.bi_iter.bi_size; cb->compressed_len = compressed_len; - cb->compress_type = extent_map_compression(em); + cb->compress_type = btrfs_extent_map_compression(em); cb->orig_bbio = bbio; - free_extent_map(em); + btrfs_free_extent_map(em); cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE); cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS); if (!cb->compressed_folios) { - ret = BLK_STS_RESOURCE; + status = BLK_STS_RESOURCE; goto out_free_bio; } - ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios); - if (ret2) { - ret = BLK_STS_RESOURCE; + ret = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios); + if (ret) { + status = BLK_STS_RESOURCE; goto out_free_compressed_pages; } @@ -637,7 +637,7 @@ out_free_compressed_pages: out_free_bio: bio_put(&cb->bbio.bio); out: - btrfs_bio_end_io(bbio, ret); + btrfs_bio_end_io(bbio, status); } /* @@ -1138,6 +1138,22 @@ void __cold btrfs_exit_compress(void) } /* + * The bvec is a single page bvec from a bio that contains folios from a filemap. + * + * Since the folio may be a large one, and if the bv_page is not a head page of + * a large folio, then page->index is unreliable. + * + * Thus we need this helper to grab the proper file offset. + */ +static u64 file_offset_from_bvec(const struct bio_vec *bvec) +{ + const struct page *page = bvec->bv_page; + const struct folio *folio = page_folio(page); + + return (page_pgoff(folio, page) << PAGE_SHIFT) + bvec->bv_offset; +} + +/* * Copy decompressed data from working buffer to pages. * * @buf: The decompressed data buffer @@ -1182,13 +1198,14 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len, u32 copy_start; /* Offset inside the full decompressed extent */ u32 bvec_offset; + void *kaddr; bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter); /* * cb->start may underflow, but subtracting that value can still * give us correct offset inside the full decompressed extent. */ - bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start; + bvec_offset = file_offset_from_bvec(&bvec) - cb->start; /* Haven't reached the bvec range, exit */ if (decompressed + buf_len <= bvec_offset) @@ -1204,10 +1221,12 @@ int btrfs_decompress_buf2page(const char *buf, u32 buf_len, * @buf + @buf_len. */ ASSERT(copy_start - decompressed < buf_len); - memcpy_to_page(bvec.bv_page, bvec.bv_offset, - buf + copy_start - decompressed, copy_len); - cur_offset += copy_len; + kaddr = bvec_kmap_local(&bvec); + memcpy(kaddr, buf + copy_start - decompressed, copy_len); + kunmap_local(kaddr); + + cur_offset += copy_len; bio_advance(orig_bio, copy_len); /* Finished the bio */ if (!orig_bio->bi_iter.bi_size) diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index df198623cc08..d34c4341eaf4 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -11,7 +11,9 @@ #include <linux/list.h> #include <linux/workqueue.h> #include <linux/wait.h> +#include <linux/pagemap.h> #include "bio.h" +#include "messages.h" struct address_space; struct page; @@ -73,11 +75,14 @@ struct compressed_bio { }; /* @range_end must be exclusive. */ -static inline u32 btrfs_calc_input_length(u64 range_end, u64 cur) +static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u64 cur) { - u64 page_end = round_down(cur, PAGE_SIZE) + PAGE_SIZE; + const u64 folio_end = folio_pos(folio) + folio_size(folio); - return min(range_end, page_end) - cur; + /* @cur must be inside the folio. */ + ASSERT(folio_pos(folio) <= cur); + ASSERT(cur < folio_end); + return min(range_end, folio_end) - cur; } int __init btrfs_init_compress(void); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 075a06db43a1..71fa42ca04fe 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -61,7 +61,6 @@ struct btrfs_path { /* if there is real range locking, this locks field will change */ u8 locks[BTRFS_MAX_LEVEL]; u8 reada; - /* keep some upper locks as we walk down */ u8 lowest_level; /* @@ -69,6 +68,7 @@ struct btrfs_path { * and to force calls to keep space in the nodes */ unsigned int search_for_split:1; + /* Keep some upper locks as we walk down. */ unsigned int keep_locks:1; unsigned int skip_locking:1; unsigned int search_commit_root:1; diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index d4310d93f532..1831618579cb 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -105,15 +105,15 @@ static int btrfs_insert_inode_defrag(struct btrfs_inode *inode, return 0; } -static inline int need_auto_defrag(struct btrfs_fs_info *fs_info) +static inline bool need_auto_defrag(struct btrfs_fs_info *fs_info) { if (!btrfs_test_opt(fs_info, AUTO_DEFRAG)) - return 0; + return false; if (btrfs_fs_closing(fs_info)) - return 0; + return false; - return 1; + return true; } /* @@ -191,10 +191,7 @@ static struct inode_defrag *btrfs_pick_defrag_inode( if (parent && compare_inode_defrag(&tmp, entry) > 0) { parent = rb_next(parent); - if (parent) - entry = rb_entry(parent, struct inode_defrag, rb_node); - else - entry = NULL; + entry = rb_entry_safe(parent, struct inode_defrag, rb_node); } out: if (entry) @@ -624,7 +621,7 @@ static struct extent_map *defrag_get_extent(struct btrfs_inode *inode, u64 ino = btrfs_ino(inode); int ret; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { ret = -ENOMEM; goto err; @@ -734,12 +731,12 @@ next: not_found: btrfs_release_path(&path); - free_extent_map(em); + btrfs_free_extent_map(em); return NULL; err: btrfs_release_path(&path); - free_extent_map(em); + btrfs_free_extent_map(em); return ERR_PTR(ret); } @@ -756,7 +753,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start, * full extent lock. */ read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, start, sectorsize); + em = btrfs_lookup_extent_mapping(em_tree, start, sectorsize); read_unlock(&em_tree->lock); /* @@ -769,7 +766,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start, * file extent items in the inode's subvolume tree). */ if (em && (em->flags & EXTENT_FLAG_MERGED)) { - free_extent_map(em); + btrfs_free_extent_map(em); em = NULL; } @@ -779,10 +776,10 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start, /* Get the big lock and read metadata off disk. */ if (!locked) - lock_extent(io_tree, start, end, &cached); + btrfs_lock_extent(io_tree, start, end, &cached); em = defrag_get_extent(BTRFS_I(inode), start, newer_than); if (!locked) - unlock_extent(io_tree, start, end, &cached); + btrfs_unlock_extent(io_tree, start, end, &cached); if (IS_ERR(em)) return NULL; @@ -794,7 +791,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start, static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info, const struct extent_map *em) { - if (extent_map_is_compressed(em)) + if (btrfs_extent_map_is_compressed(em)) return BTRFS_MAX_COMPRESSED; return fs_info->max_extent_size; } @@ -837,7 +834,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em, ret = true; out: - free_extent_map(next); + btrfs_free_extent_map(next); return ret; } @@ -857,13 +854,14 @@ static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t { struct address_space *mapping = inode->vfs_inode.i_mapping; gfp_t mask = btrfs_alloc_write_mask(mapping); - u64 page_start = (u64)index << PAGE_SHIFT; - u64 page_end = page_start + PAGE_SIZE - 1; + u64 folio_start; + u64 folio_end; struct extent_state *cached_state = NULL; struct folio *folio; int ret; again: + /* TODO: Add order fgp order flags when large folios are fully enabled. */ folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); if (IS_ERR(folio)) @@ -871,13 +869,16 @@ again: /* * Since we can defragment files opened read-only, we can encounter - * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We - * can't do I/O using huge pages yet, so return an error for now. + * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). + * + * The IO for such large folios is not fully tested, thus return + * an error to reject such folios unless it's an experimental build. + * * Filesystem transparent huge pages are typically only used for * executables that explicitly enable them, so this isn't very * restrictive. */ - if (folio_test_large(folio)) { + if (!IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) && folio_test_large(folio)) { folio_unlock(folio); folio_put(folio); return ERR_PTR(-ETXTBSY); @@ -890,14 +891,15 @@ again: return ERR_PTR(ret); } + folio_start = folio_pos(folio); + folio_end = folio_pos(folio) + folio_size(folio) - 1; /* Wait for any existing ordered extent in the range */ while (1) { struct btrfs_ordered_extent *ordered; - lock_extent(&inode->io_tree, page_start, page_end, &cached_state); - ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); - unlock_extent(&inode->io_tree, page_start, page_end, - &cached_state); + btrfs_lock_extent(&inode->io_tree, folio_start, folio_end, &cached_state); + ordered = btrfs_lookup_ordered_range(inode, folio_start, folio_size(folio)); + btrfs_unlock_extent(&inode->io_tree, folio_start, folio_end, &cached_state); if (!ordered) break; @@ -1027,8 +1029,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode, * very likely resulting in a larger extent after writeback is * triggered (except in a case of free space fragmentation). */ - if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1, - EXTENT_DELALLOC)) + if (btrfs_test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1, + EXTENT_DELALLOC)) goto next; /* @@ -1066,8 +1068,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode, /* Empty target list, no way to merge with last entry */ if (list_empty(target_list)) goto next; - last = list_entry(target_list->prev, - struct defrag_target_range, list); + last = list_last_entry(target_list, + struct defrag_target_range, list); /* Not mergeable with last entry */ if (last->start + last->len != cur) goto next; @@ -1077,7 +1079,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode, add: last_is_target = true; - range_len = min(extent_map_end(em), start + len) - cur; + range_len = min(btrfs_extent_map_end(em), start + len) - cur; /* * This one is a good target, check if it can be merged into * last range of the target list. @@ -1085,8 +1087,8 @@ add: if (!list_empty(target_list)) { struct defrag_target_range *last; - last = list_entry(target_list->prev, - struct defrag_target_range, list); + last = list_last_entry(target_list, + struct defrag_target_range, list); ASSERT(last->start + last->len <= cur); if (last->start + last->len == cur) { /* Mergeable, enlarge the last entry */ @@ -1099,7 +1101,7 @@ add: /* Allocate new defrag_target_range */ new = kmalloc(sizeof(*new), GFP_NOFS); if (!new) { - free_extent_map(em); + btrfs_free_extent_map(em); ret = -ENOMEM; break; } @@ -1108,8 +1110,8 @@ add: list_add_tail(&new->list, target_list); next: - cur = extent_map_end(em); - free_extent_map(em); + cur = btrfs_extent_map_end(em); + btrfs_free_extent_map(em); } if (ret < 0) { struct defrag_target_range *entry; @@ -1162,27 +1164,31 @@ static int defrag_one_locked_target(struct btrfs_inode *inode, struct extent_changeset *data_reserved = NULL; const u64 start = target->start; const u64 len = target->len; - unsigned long last_index = (start + len - 1) >> PAGE_SHIFT; - unsigned long start_index = start >> PAGE_SHIFT; - unsigned long first_index = folios[0]->index; int ret = 0; - int i; - - ASSERT(last_index - first_index + 1 <= nr_pages); ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len); if (ret < 0) return ret; - clear_extent_bit(&inode->io_tree, start, start + len - 1, - EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | - EXTENT_DEFRAG, cached_state); - set_extent_bit(&inode->io_tree, start, start + len - 1, - EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state); - - /* Update the page status */ - for (i = start_index - first_index; i <= last_index - first_index; i++) { - folio_clear_checked(folios[i]); - btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len); + btrfs_clear_extent_bit(&inode->io_tree, start, start + len - 1, + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | + EXTENT_DEFRAG, cached_state); + btrfs_set_extent_bit(&inode->io_tree, start, start + len - 1, + EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state); + + /* + * Update the page status. + * Due to possible large folios, we have to check all folios one by one. + */ + for (int i = 0; i < nr_pages && folios[i]; i++) { + struct folio *folio = folios[i]; + + if (!folio) + break; + if (start >= folio_pos(folio) + folio_size(folio) || + start + len <= folio_pos(folio)) + continue; + btrfs_folio_clamp_clear_checked(fs_info, folio, start, len); + btrfs_folio_clamp_set_dirty(fs_info, folio, start, len); } btrfs_delalloc_release_extents(inode, len); extent_changeset_free(data_reserved); @@ -1200,11 +1206,10 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, LIST_HEAD(target_list); struct folio **folios; const u32 sectorsize = inode->root->fs_info->sectorsize; - u64 last_index = (start + len - 1) >> PAGE_SHIFT; - u64 start_index = start >> PAGE_SHIFT; - unsigned int nr_pages = last_index - start_index + 1; + u64 cur = start; + const unsigned int nr_pages = ((start + len - 1) >> PAGE_SHIFT) - + (start >> PAGE_SHIFT) + 1; int ret = 0; - int i; ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE); ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize)); @@ -1214,21 +1219,25 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, return -ENOMEM; /* Prepare all pages */ - for (i = 0; i < nr_pages; i++) { - folios[i] = defrag_prepare_one_folio(inode, start_index + i); + for (int i = 0; cur < start + len && i < nr_pages; i++) { + folios[i] = defrag_prepare_one_folio(inode, cur >> PAGE_SHIFT); if (IS_ERR(folios[i])) { ret = PTR_ERR(folios[i]); - nr_pages = i; + folios[i] = NULL; goto free_folios; } + cur = folio_pos(folios[i]) + folio_size(folios[i]); } - for (i = 0; i < nr_pages; i++) + for (int i = 0; i < nr_pages; i++) { + if (!folios[i]) + break; folio_wait_writeback(folios[i]); + } + /* We should get at least one folio. */ + ASSERT(folios[0]); /* Lock the pages range */ - lock_extent(&inode->io_tree, start_index << PAGE_SHIFT, - (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, - &cached_state); + btrfs_lock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state); /* * Now we have a consistent view about the extent map, re-check * which range really needs to be defragged. @@ -1254,11 +1263,11 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, kfree(entry); } unlock_extent: - unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT, - (last_index << PAGE_SHIFT) + PAGE_SIZE - 1, - &cached_state); + btrfs_unlock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state); free_folios: - for (i = 0; i < nr_pages; i++) { + for (int i = 0; i < nr_pages; i++) { + if (!folios[i]) + break; folio_unlock(folios[i]); folio_put(folios[i]); } diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index 88e900e5a43d..288e1776c02d 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -111,6 +111,18 @@ * making error handling and cleanup easier. */ +static inline struct btrfs_space_info *data_sinfo_for_inode(const struct btrfs_inode *inode) +{ + struct btrfs_fs_info *fs_info = inode->root->fs_info; + + if (btrfs_is_zoned(fs_info) && btrfs_is_data_reloc_root(inode->root)) { + ASSERT(fs_info->data_sinfo->sub_group[0]->subgroup_id == + BTRFS_SUB_GROUP_DATA_RELOC); + return fs_info->data_sinfo->sub_group[0]; + } + return fs_info->data_sinfo; +} + int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes) { struct btrfs_root *root = inode->root; @@ -123,7 +135,7 @@ int btrfs_alloc_data_chunk_ondemand(const struct btrfs_inode *inode, u64 bytes) if (btrfs_is_free_space_inode(inode)) flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE; - return btrfs_reserve_data_bytes(fs_info, bytes, flush); + return btrfs_reserve_data_bytes(data_sinfo_for_inode(inode), bytes, flush); } int btrfs_check_data_free_space(struct btrfs_inode *inode, @@ -144,14 +156,14 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode, else if (btrfs_is_free_space_inode(inode)) flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE; - ret = btrfs_reserve_data_bytes(fs_info, len, flush); + ret = btrfs_reserve_data_bytes(data_sinfo_for_inode(inode), len, flush); if (ret < 0) return ret; /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */ ret = btrfs_qgroup_reserve_data(inode, reserved, start, len); if (ret < 0) { - btrfs_free_reserved_data_space_noquota(fs_info, len); + btrfs_free_reserved_data_space_noquota(inode, len); extent_changeset_free(*reserved); *reserved = NULL; } else { @@ -168,15 +180,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode, * which we can't sleep and is sure it won't affect qgroup reserved space. * Like clear_bit_hook(). */ -void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info, - u64 len) +void btrfs_free_reserved_data_space_noquota(struct btrfs_inode *inode, u64 len) { - struct btrfs_space_info *data_sinfo; + struct btrfs_fs_info *fs_info = inode->root->fs_info; ASSERT(IS_ALIGNED(len, fs_info->sectorsize)); - data_sinfo = fs_info->data_sinfo; - btrfs_space_info_free_bytes_may_use(data_sinfo, len); + btrfs_space_info_free_bytes_may_use(data_sinfo_for_inode(inode), len); } /* @@ -196,7 +206,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode, round_down(start, fs_info->sectorsize); start = round_down(start, fs_info->sectorsize); - btrfs_free_reserved_data_space_noquota(fs_info, len); + btrfs_free_reserved_data_space_noquota(inode, len); btrfs_qgroup_free_data(inode, reserved, start, len, NULL); } @@ -439,6 +449,29 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes) btrfs_inode_rsv_release(inode, true); } +/* Shrink a previously reserved extent to a new length. */ +void btrfs_delalloc_shrink_extents(struct btrfs_inode *inode, u64 reserved_len, u64 new_len) +{ + struct btrfs_fs_info *fs_info = inode->root->fs_info; + const u32 reserved_num_extents = count_max_extents(fs_info, reserved_len); + const u32 new_num_extents = count_max_extents(fs_info, new_len); + const int diff_num_extents = new_num_extents - reserved_num_extents; + + ASSERT(new_len <= reserved_len); + if (new_num_extents == reserved_num_extents) + return; + + spin_lock(&inode->lock); + btrfs_mod_outstanding_extents(inode, diff_num_extents); + btrfs_calculate_inode_block_rsv_size(fs_info, inode); + spin_unlock(&inode->lock); + + if (btrfs_is_testing(fs_info)) + return; + + btrfs_inode_rsv_release(inode, true); +} + /* * Reserve data and metadata space for delalloc * diff --git a/fs/btrfs/delalloc-space.h b/fs/btrfs/delalloc-space.h index 3f32953c0a80..6119c0d3f883 100644 --- a/fs/btrfs/delalloc-space.h +++ b/fs/btrfs/delalloc-space.h @@ -18,8 +18,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode, void btrfs_delalloc_release_space(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len, bool qgroup_free); -void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info, - u64 len); +void btrfs_free_reserved_data_space_noquota(struct btrfs_inode *inode, u64 len); void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, bool qgroup_free); int btrfs_delalloc_reserve_space(struct btrfs_inode *inode, @@ -27,5 +26,6 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode, int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, u64 disk_num_bytes, bool noflush); void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes); +void btrfs_delalloc_shrink_extents(struct btrfs_inode *inode, u64 reserved_len, u64 new_len); #endif /* BTRFS_DELALLOC_SPACE_H */ diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 3f1551d8a5c6..c7cc24a5dd5e 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -119,7 +119,12 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( return NULL; } -/* Will return either the node or PTR_ERR(-ENOMEM) */ +/* + * Look up an existing delayed node associated with @btrfs_inode or create a new + * one and insert it to the delayed nodes of the root. + * + * Return the delayed node, or error pointer on failure. + */ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( struct btrfs_inode *btrfs_inode) { @@ -211,17 +216,13 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, static struct btrfs_delayed_node *btrfs_first_delayed_node( struct btrfs_delayed_root *delayed_root) { - struct list_head *p; - struct btrfs_delayed_node *node = NULL; + struct btrfs_delayed_node *node; spin_lock(&delayed_root->lock); - if (list_empty(&delayed_root->node_list)) - goto out; - - p = delayed_root->node_list.next; - node = list_entry(p, struct btrfs_delayed_node, n_list); - refcount_inc(&node->refs); -out: + node = list_first_entry_or_null(&delayed_root->node_list, + struct btrfs_delayed_node, n_list); + if (node) + refcount_inc(&node->refs); spin_unlock(&delayed_root->lock); return node; @@ -293,18 +294,15 @@ static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( struct btrfs_delayed_root *delayed_root) { - struct list_head *p; - struct btrfs_delayed_node *node = NULL; + struct btrfs_delayed_node *node; spin_lock(&delayed_root->lock); - if (list_empty(&delayed_root->prepare_list)) - goto out; - - p = delayed_root->prepare_list.next; - list_del_init(p); - node = list_entry(p, struct btrfs_delayed_node, p_list); - refcount_inc(&node->refs); -out: + node = list_first_entry_or_null(&delayed_root->prepare_list, + struct btrfs_delayed_node, p_list); + if (node) { + list_del_init(&node->p_list); + refcount_inc(&node->refs); + } spin_unlock(&delayed_root->lock); return node; @@ -454,40 +452,25 @@ static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( struct btrfs_delayed_node *delayed_node) { - struct rb_node *p; - struct btrfs_delayed_item *item = NULL; + struct rb_node *p = rb_first_cached(&delayed_node->ins_root); - p = rb_first_cached(&delayed_node->ins_root); - if (p) - item = rb_entry(p, struct btrfs_delayed_item, rb_node); - - return item; + return rb_entry_safe(p, struct btrfs_delayed_item, rb_node); } static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( struct btrfs_delayed_node *delayed_node) { - struct rb_node *p; - struct btrfs_delayed_item *item = NULL; - - p = rb_first_cached(&delayed_node->del_root); - if (p) - item = rb_entry(p, struct btrfs_delayed_item, rb_node); + struct rb_node *p = rb_first_cached(&delayed_node->del_root); - return item; + return rb_entry_safe(p, struct btrfs_delayed_item, rb_node); } static struct btrfs_delayed_item *__btrfs_next_delayed_item( struct btrfs_delayed_item *item) { - struct rb_node *p; - struct btrfs_delayed_item *next = NULL; - - p = rb_next(&item->rb_node); - if (p) - next = rb_entry(p, struct btrfs_delayed_item, rb_node); + struct rb_node *p = rb_next(&item->rb_node); - return next; + return rb_entry_safe(p, struct btrfs_delayed_item, rb_node); } static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, @@ -1397,17 +1380,17 @@ void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info) WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root)); } -static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq) +static bool could_end_wait(struct btrfs_delayed_root *delayed_root, int seq) { int val = atomic_read(&delayed_root->items_seq); if (val < seq || val >= seq + BTRFS_DELAYED_BATCH) - return 1; + return true; if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) - return 1; + return true; - return 0; + return false; } void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info) diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 98c5b61dabe8..739c9e29aaa3 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -331,12 +331,9 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root, struct btrfs_delayed_ref_node *ins) { struct rb_node *node = &ins->ref_node; - struct rb_node *exist; + struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node); - exist = rb_find_add_cached(node, root, cmp_refs_node); - if (exist) - return rb_entry(exist, struct btrfs_delayed_ref_node, ref_node); - return NULL; + return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node); } static struct btrfs_delayed_ref_head *find_first_ref_head( @@ -1339,7 +1336,7 @@ int __init btrfs_delayed_ref_init(void) { btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0); if (!btrfs_delayed_ref_head_cachep) - goto fail; + return -ENOMEM; btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0); if (!btrfs_delayed_ref_node_cachep) diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index f5ae880308d3..78cc23837610 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -262,7 +262,6 @@ enum btrfs_ref_type { BTRFS_REF_NOT_SET, BTRFS_REF_DATA, BTRFS_REF_METADATA, - BTRFS_REF_LAST, } __packed; struct btrfs_ref { diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 53d7d85cb4be..2decb9fff445 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -637,7 +637,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: - ASSERT(0); + DEBUG_WARN("unexpected STARTED ot SUSPENDED dev-replace state"); ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED; up_write(&dev_replace->rwsem); goto leave; @@ -794,17 +794,17 @@ static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, lockdep_assert_held(&srcdev->fs_info->chunk_mutex); - while (find_first_extent_bit(&srcdev->alloc_state, start, - &found_start, &found_end, - CHUNK_ALLOCATED, &cached_state)) { - ret = set_extent_bit(&tgtdev->alloc_state, found_start, - found_end, CHUNK_ALLOCATED, NULL); + while (btrfs_find_first_extent_bit(&srcdev->alloc_state, start, + &found_start, &found_end, + CHUNK_ALLOCATED, &cached_state)) { + ret = btrfs_set_extent_bit(&tgtdev->alloc_state, found_start, + found_end, CHUNK_ALLOCATED, NULL); if (ret) break; start = found_end + 1; } - free_extent_state(cached_state); + btrfs_free_extent_state(cached_state); return ret; } @@ -1265,16 +1265,16 @@ static int btrfs_dev_replace_kthread(void *data) return 0; } -int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) +bool __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) { if (!dev_replace->is_valid) - return 0; + return false; switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: - return 0; + return false; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: /* @@ -1289,7 +1289,7 @@ int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) */ break; } - return 1; + return true; } void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 23e480efe5e6..b35cecf388f2 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -25,7 +25,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info); void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info); int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info); -int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); +bool __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, struct btrfs_block_group *cache, u64 physical); diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c index a374ce7a1813..fe9a4bd7e6e6 100644 --- a/fs/btrfs/direct-io.c +++ b/fs/btrfs/direct-io.c @@ -42,21 +42,21 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, /* Direct lock must be taken before the extent lock. */ if (nowait) { - if (!try_lock_dio_extent(io_tree, lockstart, lockend, cached_state)) + if (!btrfs_try_lock_dio_extent(io_tree, lockstart, lockend, cached_state)) return -EAGAIN; } else { - lock_dio_extent(io_tree, lockstart, lockend, cached_state); + btrfs_lock_dio_extent(io_tree, lockstart, lockend, cached_state); } while (1) { if (nowait) { - if (!try_lock_extent(io_tree, lockstart, lockend, - cached_state)) { + if (!btrfs_try_lock_extent(io_tree, lockstart, lockend, + cached_state)) { ret = -EAGAIN; break; } } else { - lock_extent(io_tree, lockstart, lockend, cached_state); + btrfs_lock_extent(io_tree, lockstart, lockend, cached_state); } /* * We're concerned with the entire range that we're going to be @@ -78,7 +78,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, lockstart, lockend))) break; - unlock_extent(io_tree, lockstart, lockend, cached_state); + btrfs_unlock_extent(io_tree, lockstart, lockend, cached_state); if (ordered) { if (nowait) { @@ -131,7 +131,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, } if (ret) - unlock_dio_extent(io_tree, lockstart, lockend, cached_state); + btrfs_unlock_dio_extent(io_tree, lockstart, lockend, cached_state); return ret; } @@ -151,11 +151,11 @@ static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, } ordered = btrfs_alloc_ordered_extent(inode, start, file_extent, - (1 << type) | - (1 << BTRFS_ORDERED_DIRECT)); + (1U << type) | + (1U << BTRFS_ORDERED_DIRECT)); if (IS_ERR(ordered)) { if (em) { - free_extent_map(em); + btrfs_free_extent_map(em); btrfs_drop_extent_map_range(inode, start, start + file_extent->num_bytes - 1, false); } @@ -204,8 +204,7 @@ again: BTRFS_ORDERED_REGULAR); btrfs_dec_block_group_reservations(fs_info, ins.objectid); if (IS_ERR(em)) - btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, - 1); + btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); return em; } @@ -246,7 +245,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, else type = BTRFS_ORDERED_NOCOW; len = min(len, em->len - (start - em->start)); - block_start = extent_map_block_start(em) + (start - em->start); + block_start = btrfs_extent_map_block_start(em) + (start - em->start); if (can_nocow_extent(BTRFS_I(inode), start, &len, &file_extent, false) == 1) { @@ -265,7 +264,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, nowait); if (ret < 0) { /* Our caller expects us to free the input extent map. */ - free_extent_map(em); + btrfs_free_extent_map(em); *map = NULL; btrfs_dec_nocow_writers(bg); if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) @@ -278,7 +277,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, &file_extent, type); btrfs_dec_nocow_writers(bg); if (type == BTRFS_ORDERED_PREALLOC) { - free_extent_map(em); + btrfs_free_extent_map(em); *map = em2; em = em2; } @@ -291,7 +290,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, dio_data->nocow_done = true; } else { /* Our caller expects us to free the input extent map. */ - free_extent_map(em); + btrfs_free_extent_map(em); *map = NULL; if (nowait) { @@ -440,8 +439,8 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, start, data_alloc_len, false); if (!ret) dio_data->data_space_reserved = true; - else if (ret && !(BTRFS_I(inode)->flags & - (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) + else if (!(BTRFS_I(inode)->flags & + (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) goto err; } @@ -474,8 +473,8 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, * to buffered IO. Don't blame me, this is the price we pay for using * the generic code. */ - if (extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) { - free_extent_map(em); + if (btrfs_extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) { + btrfs_free_extent_map(em); /* * If we are in a NOWAIT context, return -EAGAIN in order to * fallback to buffered IO. This is not only because we can @@ -516,7 +515,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, * after we have submitted bios for all the extents in the range. */ if ((flags & IOMAP_NOWAIT) && len < length) { - free_extent_map(em); + btrfs_free_extent_map(em); ret = -EAGAIN; goto unlock_err; } @@ -558,13 +557,13 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, iomap->addr = IOMAP_NULL_ADDR; iomap->type = IOMAP_HOLE; } else { - iomap->addr = extent_map_block_start(em) + (start - em->start); + iomap->addr = btrfs_extent_map_block_start(em) + (start - em->start); iomap->type = IOMAP_MAPPED; } iomap->offset = start; iomap->bdev = fs_info->fs_devices->latest_dev->bdev; iomap->length = len; - free_extent_map(em); + btrfs_free_extent_map(em); /* * Reads will hold the EXTENT_DIO_LOCKED bit until the io is completed, @@ -575,13 +574,13 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, if (write) unlock_bits |= EXTENT_DIO_LOCKED; - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - unlock_bits, &cached_state); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, + unlock_bits, &cached_state); /* We didn't use everything, unlock the dio extent for the remainder. */ if (!write && (start + len) < lockend) - unlock_dio_extent(&BTRFS_I(inode)->io_tree, start + len, - lockend, NULL); + btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, start + len, + lockend, NULL); return 0; @@ -591,8 +590,8 @@ unlock_err: * to update this, be explicit that we expect EXTENT_LOCKED and * EXTENT_DIO_LOCKED to be set here, and so that's what we're clearing. */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - EXTENT_LOCKED | EXTENT_DIO_LOCKED, &cached_state); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, + EXTENT_LOCKED | EXTENT_DIO_LOCKED, &cached_state); err: if (dio_data->data_space_reserved) { btrfs_free_reserved_data_space(BTRFS_I(inode), @@ -615,8 +614,8 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, if (!write && (iomap->type == IOMAP_HOLE)) { /* If reading from a hole, unlock and return */ - unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos, - pos + length - 1, NULL); + btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos, + pos + length - 1, NULL); return 0; } @@ -627,8 +626,8 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, btrfs_finish_ordered_extent(dio_data->ordered, NULL, pos, length, false); else - unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos, - pos + length - 1, NULL); + btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos, + pos + length - 1, NULL); ret = -ENOTBLK; } if (write) { @@ -660,8 +659,8 @@ static void btrfs_dio_end_io(struct btrfs_bio *bbio) dip->file_offset, dip->bytes, !bio->bi_status); } else { - unlock_dio_extent(&inode->io_tree, dip->file_offset, - dip->file_offset + dip->bytes - 1, NULL); + btrfs_unlock_dio_extent(&inode->io_tree, dip->file_offset, + dip->file_offset + dip->bytes - 1, NULL); } bbio->bio.bi_private = bbio->private; @@ -692,9 +691,9 @@ static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, * a pre-existing one. */ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { - ret = split_extent_map(bbio->inode, bbio->file_offset, - ordered->num_bytes, len, - ordered->disk_bytenr); + ret = btrfs_split_extent_map(bbio->inode, bbio->file_offset, + ordered->num_bytes, len, + ordered->disk_bytenr); if (ret) return ret; } diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index de23c4b3515e..89fe85778115 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -256,7 +256,9 @@ again: * the discard lists. */ ASSERT(block_group->discard_index != - BTRFS_DISCARD_INDEX_UNUSED); + BTRFS_DISCARD_INDEX_UNUSED, + "discard_index=%d", + block_group->discard_index); } else { list_del_init(&block_group->discard_list); btrfs_put_block_group(block_group); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index aa58e0663a5d..1beb9458f622 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -193,10 +193,11 @@ static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, u64 end = min_t(u64, eb->start + eb->len, folio_pos(folio) + eb->folio_size); u32 len = end - start; + phys_addr_t paddr = PFN_PHYS(folio_pfn(folio)) + + offset_in_folio(folio, start); - ret = btrfs_repair_io_failure(fs_info, 0, start, len, - start, folio, offset_in_folio(folio, start), - mirror_num); + ret = btrfs_repair_io_failure(fs_info, 0, start, len, start, + paddr, mirror_num); if (ret) break; } @@ -224,7 +225,6 @@ int btrfs_read_extent_buffer(struct extent_buffer *eb, ASSERT(check); while (1) { - clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); ret = read_extent_buffer_pages(eb, mirror_num, check); if (!ret) break; @@ -256,7 +256,7 @@ int btrfs_read_extent_buffer(struct extent_buffer *eb, /* * Checksum a dirty tree block before IO. */ -blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio) +int btree_csum_one_bio(struct btrfs_bio *bbio) { struct extent_buffer *eb = bbio->private; struct btrfs_fs_info *fs_info = eb->fs_info; @@ -267,9 +267,9 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio) /* Btree blocks are always contiguous on disk. */ if (WARN_ON_ONCE(bbio->file_offset != eb->start)) - return BLK_STS_IOERR; + return -EIO; if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len)) - return BLK_STS_IOERR; + return -EIO; /* * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't @@ -278,13 +278,13 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio) */ if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) { memzero_extent_buffer(eb, 0, eb->len); - return BLK_STS_OK; + return 0; } if (WARN_ON_ONCE(found_start != eb->start)) - return BLK_STS_IOERR; + return -EIO; if (WARN_ON(!btrfs_meta_folio_test_uptodate(eb->folios[0], eb))) - return BLK_STS_IOERR; + return -EIO; ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, offsetof(struct btrfs_header, fsid), @@ -312,7 +312,7 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio) goto error; } write_extent_buffer(eb, result, 0, fs_info->csum_size); - return BLK_STS_OK; + return 0; error: btrfs_print_tree(eb, 0); @@ -326,7 +326,7 @@ error: */ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) || btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID); - return errno_to_blk_status(ret); + return ret; } static bool check_tree_block_fsid(struct extent_buffer *eb) @@ -452,15 +452,9 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb, goto out; } - /* - * If this is a leaf block and it is corrupt, set the corrupt bit so - * that we don't try and read the other copies of this block, just - * return -EIO. - */ - if (found_level == 0 && btrfs_check_leaf(eb)) { - set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); + /* If this is a leaf block and it is corrupt, just return -EIO. */ + if (found_level == 0 && btrfs_check_leaf(eb)) ret = -EIO; - } if (found_level > 0 && btrfs_check_node(eb)) ret = -EIO; @@ -641,11 +635,16 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, } -static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, - u64 objectid) +static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, + u64 objectid, gfp_t flags) { + struct btrfs_root *root; bool dummy = btrfs_is_testing(fs_info); + root = kzalloc(sizeof(*root), flags); + if (!root) + return NULL; + memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); @@ -698,10 +697,10 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, btrfs_set_root_last_log_commit(root, 0); root->anon_dev = 0; if (!dummy) { - extent_io_tree_init(fs_info, &root->dirty_log_pages, - IO_TREE_ROOT_DIRTY_LOG_PAGES); - extent_io_tree_init(fs_info, &root->log_csum_range, - IO_TREE_LOG_CSUM_RANGE); + btrfs_extent_io_tree_init(fs_info, &root->dirty_log_pages, + IO_TREE_ROOT_DIRTY_LOG_PAGES); + btrfs_extent_io_tree_init(fs_info, &root->log_csum_range, + IO_TREE_LOG_CSUM_RANGE); } spin_lock_init(&root->root_item_lock); @@ -712,14 +711,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, list_add_tail(&root->leak_list, &fs_info->allocated_roots); spin_unlock(&fs_info->fs_roots_radix_lock); #endif -} -static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, - u64 objectid, gfp_t flags) -{ - struct btrfs_root *root = kzalloc(sizeof(*root), flags); - if (root) - __setup_root(root, fs_info, objectid); return root; } @@ -1863,8 +1855,8 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) int i; while (!list_empty(&fs_info->dead_roots)) { - gang[0] = list_entry(fs_info->dead_roots.next, - struct btrfs_root, root_list); + gang[0] = list_first_entry(&fs_info->dead_roots, + struct btrfs_root, root_list); list_del(&gang[0]->root_list); if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) @@ -1927,9 +1919,9 @@ static int btrfs_init_btree_inode(struct super_block *sb) inode->i_mapping->a_ops = &btree_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); - extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, - IO_TREE_BTREE_INODE_IO); - extent_map_tree_init(&BTRFS_I(inode)->extent_tree); + btrfs_extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, + IO_TREE_BTREE_INODE_IO); + btrfs_extent_map_tree_init(&BTRFS_I(inode)->extent_tree); BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root); set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); @@ -2002,7 +1994,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan", ordered_flags); fs_info->discard_ctl.discard_workers = - alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE); + alloc_ordered_workqueue("btrfs-discard", WQ_FREEZABLE); if (!(fs_info->workers && fs_info->delalloc_workers && fs_info->flush_workers && @@ -2769,10 +2761,21 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) return ret; } +/* + * Lockdep gets confused between our buffer_tree which requires IRQ locking because + * we modify marks in the IRQ context, and our delayed inode xarray which doesn't + * have these requirements. Use a class key so lockdep doesn't get them mixed up. + */ +static struct lock_class_key buffer_xa_class; + void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) { INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); - INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); + + /* Use the same flags as mapping->i_pages. */ + xa_init_flags(&fs_info->buffer_tree, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); + lockdep_set_class(&fs_info->buffer_tree.xa_lock, &buffer_xa_class); + INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->delayed_iputs); @@ -2784,7 +2787,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); spin_lock_init(&fs_info->super_lock); - spin_lock_init(&fs_info->buffer_lock); spin_lock_init(&fs_info->unused_bgs_lock); spin_lock_init(&fs_info->treelog_bg_lock); spin_lock_init(&fs_info->zone_active_bgs_lock); @@ -2829,6 +2831,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) BTRFS_BLOCK_RSV_GLOBAL); btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); + btrfs_init_block_rsv(&fs_info->treelog_rsv, BTRFS_BLOCK_RSV_TREELOG); btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); btrfs_init_block_rsv(&fs_info->delayed_block_rsv, BTRFS_BLOCK_RSV_DELOPS); @@ -2862,8 +2865,8 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) rwlock_init(&fs_info->block_group_cache_lock); fs_info->block_group_cache_tree = RB_ROOT_CACHED; - extent_io_tree_init(fs_info, &fs_info->excluded_extents, - IO_TREE_FS_EXCLUDED_EXTENTS); + btrfs_extent_io_tree_init(fs_info, &fs_info->excluded_extents, + IO_TREE_FS_EXCLUDED_EXTENTS); mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->tree_log_mutex); @@ -3315,7 +3318,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device /* * Read super block and check the signature bytes only */ - disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev); + disk_super = btrfs_read_disk_super(fs_devices->latest_dev->bdev, 0, false); if (IS_ERR(disk_super)) { ret = PTR_ERR(disk_super); goto fail_alloc; @@ -3710,85 +3713,6 @@ static void btrfs_end_super_write(struct bio *bio) bio_put(bio); } -struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, - int copy_num, bool drop_cache) -{ - struct btrfs_super_block *super; - struct page *page; - u64 bytenr, bytenr_orig; - struct address_space *mapping = bdev->bd_mapping; - int ret; - - bytenr_orig = btrfs_sb_offset(copy_num); - ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr); - if (ret == -ENOENT) - return ERR_PTR(-EINVAL); - else if (ret) - return ERR_PTR(ret); - - if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev)) - return ERR_PTR(-EINVAL); - - if (drop_cache) { - /* This should only be called with the primary sb. */ - ASSERT(copy_num == 0); - - /* - * Drop the page of the primary superblock, so later read will - * always read from the device. - */ - invalidate_inode_pages2_range(mapping, - bytenr >> PAGE_SHIFT, - (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT); - } - - page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); - if (IS_ERR(page)) - return ERR_CAST(page); - - super = page_address(page); - if (btrfs_super_magic(super) != BTRFS_MAGIC) { - btrfs_release_disk_super(super); - return ERR_PTR(-ENODATA); - } - - if (btrfs_super_bytenr(super) != bytenr_orig) { - btrfs_release_disk_super(super); - return ERR_PTR(-EINVAL); - } - - return super; -} - - -struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev) -{ - struct btrfs_super_block *super, *latest = NULL; - int i; - u64 transid = 0; - - /* we would like to check all the supers, but that would make - * a btrfs mount succeed after a mkfs from a different FS. - * So, we need to add a special mount option to scan for - * later supers, using BTRFS_SUPER_MIRROR_MAX instead - */ - for (i = 0; i < 1; i++) { - super = btrfs_read_dev_one_super(bdev, i, false); - if (IS_ERR(super)) - continue; - - if (!latest || btrfs_super_generation(super) > transid) { - if (latest) - btrfs_release_disk_super(super); - - latest = super; - transid = btrfs_super_generation(super); - } - } - - return super; -} - /* * Write superblock @sb to the @device. Do not wait for completion, all the * folios we use for writing are locked. @@ -3828,8 +3752,8 @@ static int write_dev_supers(struct btrfs_device *device, continue; } else if (ret < 0) { btrfs_err(device->fs_info, - "couldn't get super block location for mirror %d", - i); + "couldn't get super block location for mirror %d error %d", + i, ret); atomic_inc(&device->sb_write_errors); continue; } @@ -3848,8 +3772,8 @@ static int write_dev_supers(struct btrfs_device *device, GFP_NOFS); if (IS_ERR(folio)) { btrfs_err(device->fs_info, - "couldn't get super block page for bytenr %llu", - bytenr); + "couldn't get super block page for bytenr %llu error %ld", + bytenr, PTR_ERR(folio)); atomic_inc(&device->sb_write_errors); continue; } @@ -4244,8 +4168,9 @@ static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info) u64 found_end; found = true; - while (find_first_extent_bit(&trans->dirty_pages, cur, - &found_start, &found_end, EXTENT_DIRTY, &cached)) { + while (btrfs_find_first_extent_bit(&trans->dirty_pages, cur, + &found_start, &found_end, + EXTENT_DIRTY, &cached)) { dirty_bytes += found_end + 1 - found_start; cur = found_end + 1; } @@ -4441,7 +4366,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); if (btrfs_check_quota_leak(fs_info)) { - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN("qgroup reserved space leaked"); btrfs_err(fs_info, "qgroup reserved space leaked"); } @@ -4698,9 +4623,9 @@ static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, u64 start = 0; u64 end; - while (find_first_extent_bit(dirty_pages, start, &start, &end, - mark, NULL)) { - clear_extent_bits(dirty_pages, start, end, mark); + while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end, + mark, NULL)) { + btrfs_clear_extent_bits(dirty_pages, start, end, mark); while (start <= end) { eb = find_extent_buffer(fs_info, start); start += fs_info->nodesize; @@ -4733,14 +4658,14 @@ static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, * the same extent range. */ mutex_lock(&fs_info->unused_bg_unpin_mutex); - if (!find_first_extent_bit(unpin, 0, &start, &end, - EXTENT_DIRTY, &cached_state)) { + if (!btrfs_find_first_extent_bit(unpin, 0, &start, &end, + EXTENT_DIRTY, &cached_state)) { mutex_unlock(&fs_info->unused_bg_unpin_mutex); break; } - clear_extent_dirty(unpin, start, end, &cached_state); - free_extent_state(cached_state); + btrfs_clear_extent_dirty(unpin, start, end, &cached_state); + btrfs_free_extent_state(cached_state); btrfs_error_unpin_extent_range(fs_info, start, end); mutex_unlock(&fs_info->unused_bg_unpin_mutex); cond_resched(); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 587842991b24..864a55a96226 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -58,9 +58,6 @@ int btrfs_validate_super(const struct btrfs_fs_info *fs_info, const struct btrfs_super_block *sb, int mirror_num); int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount); int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors); -struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev); -struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, - int copy_num, bool drop_cache); int btrfs_commit_super(struct btrfs_fs_info *fs_info); struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, const struct btrfs_key *key); @@ -114,7 +111,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, int btrfs_read_extent_buffer(struct extent_buffer *buf, const struct btrfs_tree_parent_check *check); -blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio); +int btree_csum_one_bio(struct btrfs_bio *bbio); int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index 13de6af279e5..b1b96eb5f64e 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -42,7 +42,7 @@ static inline void btrfs_extent_state_leak_debug_check(void) struct extent_state *state; while (!list_empty(&states)) { - state = list_entry(states.next, struct extent_state, leak_list); + state = list_first_entry(&states, struct extent_state, leak_list); pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", state->start, state->end, state->state, extent_state_in_tree(state), @@ -59,13 +59,12 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller, struct extent_io_tree *tree, u64 start, u64 end) { - const struct btrfs_inode *inode; + const struct btrfs_inode *inode = tree->inode; u64 isize; if (tree->owner != IO_TREE_INODE_IO) return; - inode = extent_io_tree_to_inode_const(tree); isize = i_size_read(&inode->vfs_inode); if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { btrfs_debug_rl(inode->root->fs_info, @@ -80,25 +79,8 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller, #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) #endif - -/* - * The only tree allowed to set the inode is IO_TREE_INODE_IO. - */ -static bool is_inode_io_tree(const struct extent_io_tree *tree) -{ - return tree->owner == IO_TREE_INODE_IO; -} - -/* Return the inode if it's valid for the given tree, otherwise NULL. */ -struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree) -{ - if (tree->owner == IO_TREE_INODE_IO) - return tree->inode; - return NULL; -} - /* Read-only access to the inode. */ -const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree) +const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree) { if (tree->owner == IO_TREE_INODE_IO) return tree->inode; @@ -106,15 +88,15 @@ const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_t } /* For read-only access to fs_info. */ -const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree) +const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree) { if (tree->owner == IO_TREE_INODE_IO) return tree->inode->root->fs_info; return tree->fs_info; } -void extent_io_tree_init(struct btrfs_fs_info *fs_info, - struct extent_io_tree *tree, unsigned int owner) +void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info, + struct extent_io_tree *tree, unsigned int owner) { tree->state = RB_ROOT; spin_lock_init(&tree->lock); @@ -129,7 +111,7 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info, * aren't any waiters on any extent state record (EXTENT_LOCK_BITS are never * set on any extent state when calling this function). */ -void extent_io_tree_release(struct extent_io_tree *tree) +void btrfs_extent_io_tree_release(struct extent_io_tree *tree) { struct rb_root root; struct extent_state *state; @@ -148,7 +130,7 @@ void extent_io_tree_release(struct extent_io_tree *tree) * (see wait_extent_bit()). */ ASSERT(!waitqueue_active(&state->wq)); - free_extent_state(state); + btrfs_free_extent_state(state); cond_resched_lock(&tree->lock); } /* @@ -176,7 +158,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask) btrfs_leak_debug_add_state(state); refcount_set(&state->refs, 1); init_waitqueue_head(&state->wq); - trace_alloc_extent_state(state, mask, _RET_IP_); + trace_btrfs_alloc_extent_state(state, mask, _RET_IP_); return state; } @@ -188,14 +170,14 @@ static struct extent_state *alloc_extent_state_atomic(struct extent_state *preal return prealloc; } -void free_extent_state(struct extent_state *state) +void btrfs_free_extent_state(struct extent_state *state) { if (!state) return; if (refcount_dec_and_test(&state->refs)) { WARN_ON(extent_state_in_tree(state)); btrfs_leak_debug_del_state(state); - trace_free_extent_state(state, _RET_IP_); + trace_btrfs_free_extent_state(state, _RET_IP_); kmem_cache_free(extent_state_cache, state); } } @@ -222,38 +204,34 @@ static inline struct extent_state *next_state(struct extent_state *state) { struct rb_node *next = rb_next(&state->rb_node); - if (next) - return rb_entry(next, struct extent_state, rb_node); - else - return NULL; + return rb_entry_safe(next, struct extent_state, rb_node); } static inline struct extent_state *prev_state(struct extent_state *state) { struct rb_node *next = rb_prev(&state->rb_node); - if (next) - return rb_entry(next, struct extent_state, rb_node); - else - return NULL; + return rb_entry_safe(next, struct extent_state, rb_node); } /* - * Search @tree for an entry that contains @offset. Such entry would have - * entry->start <= offset && entry->end >= offset. + * Search @tree for an entry that contains @offset or if none exists for the + * first entry that starts and ends after that offset. * * @tree: the tree to search - * @offset: offset that should fall within an entry in @tree + * @offset: search offset * @node_ret: pointer where new node should be anchored (used when inserting an * entry in the tree) * @parent_ret: points to entry which would have been the parent of the entry, * containing @offset * - * Return a pointer to the entry that contains @offset byte address and don't change - * @node_ret and @parent_ret. + * Return a pointer to the entry that contains @offset byte address. + * + * If no such entry exists, return the first entry that starts and ends after + * @offset if one exists, otherwise NULL. * - * If no such entry exists, return pointer to entry that ends before @offset - * and fill parameters @node_ret and @parent_ret, ie. does not return NULL. + * If the returned entry starts at @offset, then @node_ret and @parent_ret + * aren't changed. */ static inline struct extent_state *tree_search_for_insert(struct extent_io_tree *tree, u64 offset, @@ -282,7 +260,11 @@ static inline struct extent_state *tree_search_for_insert(struct extent_io_tree if (parent_ret) *parent_ret = prev; - /* Search neighbors until we find the first one past the end */ + /* + * Return either the current entry if it contains offset (it ends after + * or at offset) or the first entry that starts and ends after offset if + * one exists, or NULL. + */ while (entry && offset > entry->end) entry = next_state(entry); @@ -351,7 +333,7 @@ static void __cold extent_io_tree_panic(const struct extent_io_tree *tree, const char *opname, int err) { - btrfs_panic(extent_io_tree_to_fs_info(tree), err, + btrfs_panic(btrfs_extent_io_tree_to_fs_info(tree), err, "extent io tree error on %s state start %llu end %llu", opname, state->start, state->end); } @@ -362,13 +344,12 @@ static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *s prev = prev_state(state); if (prev && prev->end == state->start - 1 && prev->state == state->state) { - if (is_inode_io_tree(tree)) - btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree), - state, prev); + if (tree->owner == IO_TREE_INODE_IO) + btrfs_merge_delalloc_extent(tree->inode, state, prev); state->start = prev->start; rb_erase(&prev->rb_node, &tree->state); RB_CLEAR_NODE(&prev->rb_node); - free_extent_state(prev); + btrfs_free_extent_state(prev); } } @@ -378,13 +359,12 @@ static void merge_next_state(struct extent_io_tree *tree, struct extent_state *s next = next_state(state); if (next && next->start == state->end + 1 && next->state == state->state) { - if (is_inode_io_tree(tree)) - btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree), - state, next); + if (tree->owner == IO_TREE_INODE_IO) + btrfs_merge_delalloc_extent(tree->inode, state, next); state->end = next->end; rb_erase(&next->rb_node, &tree->state); RB_CLEAR_NODE(&next->rb_node); - free_extent_state(next); + btrfs_free_extent_state(next); } } @@ -413,8 +393,8 @@ static void set_state_bits(struct extent_io_tree *tree, u32 bits_to_set = bits & ~EXTENT_CTLBITS; int ret; - if (is_inode_io_tree(tree)) - btrfs_set_delalloc_extent(extent_io_tree_to_inode(tree), state, bits); + if (tree->owner == IO_TREE_INODE_IO) + btrfs_set_delalloc_extent(tree->inode, state, bits); ret = add_extent_changeset(state, bits_to_set, changeset, 1); BUG_ON(ret < 0); @@ -459,10 +439,9 @@ static struct extent_state *insert_state(struct extent_io_tree *tree, if (state->end < entry->start) { if (try_merge && end == entry->start && state->state == entry->state) { - if (is_inode_io_tree(tree)) - btrfs_merge_delalloc_extent( - extent_io_tree_to_inode(tree), - state, entry); + if (tree->owner == IO_TREE_INODE_IO) + btrfs_merge_delalloc_extent(tree->inode, + state, entry); entry->start = state->start; merge_prev_state(tree, entry); state->state = 0; @@ -472,10 +451,9 @@ static struct extent_state *insert_state(struct extent_io_tree *tree, } else if (state->end > entry->end) { if (try_merge && entry->end == start && state->state == entry->state) { - if (is_inode_io_tree(tree)) - btrfs_merge_delalloc_extent( - extent_io_tree_to_inode(tree), - state, entry); + if (tree->owner == IO_TREE_INODE_IO) + btrfs_merge_delalloc_extent(tree->inode, + state, entry); entry->end = state->end; merge_next_state(tree, entry); state->state = 0; @@ -527,9 +505,8 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, struct rb_node *parent = NULL; struct rb_node **node; - if (is_inode_io_tree(tree)) - btrfs_split_delalloc_extent(extent_io_tree_to_inode(tree), orig, - split); + if (tree->owner == IO_TREE_INODE_IO) + btrfs_split_delalloc_extent(tree->inode, orig, split); prealloc->start = orig->start; prealloc->end = split - 1; @@ -549,7 +526,7 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, } else if (prealloc->end > entry->end) { node = &(*node)->rb_right; } else { - free_extent_state(prealloc); + btrfs_free_extent_state(prealloc); return -EEXIST; } } @@ -561,6 +538,18 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, } /* + * Use this during tree iteration to avoid doing next node searches when it's + * not needed (the current record ends at or after the target range's end). + */ +static inline struct extent_state *next_search_state(struct extent_state *state, u64 end) +{ + if (state->end < end) + return next_state(state); + + return NULL; +} + +/* * Utility function to clear some bits in an extent state struct. It will * optionally wake up anyone waiting on this state (wake == 1). * @@ -569,16 +558,15 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, */ static struct extent_state *clear_state_bit(struct extent_io_tree *tree, struct extent_state *state, - u32 bits, int wake, + u32 bits, int wake, u64 end, struct extent_changeset *changeset) { struct extent_state *next; u32 bits_to_clear = bits & ~EXTENT_CTLBITS; int ret; - if (is_inode_io_tree(tree)) - btrfs_clear_delalloc_extent(extent_io_tree_to_inode(tree), state, - bits); + if (tree->owner == IO_TREE_INODE_IO) + btrfs_clear_delalloc_extent(tree->inode, state, bits); ret = add_extent_changeset(state, bits_to_clear, changeset, 0); BUG_ON(ret < 0); @@ -586,17 +574,17 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree, if (wake) wake_up(&state->wq); if (state->state == 0) { - next = next_state(state); + next = next_search_state(state, end); if (extent_state_in_tree(state)) { rb_erase(&state->rb_node, &tree->state); RB_CLEAR_NODE(&state->rb_node); - free_extent_state(state); + btrfs_free_extent_state(state); } else { WARN_ON(1); } } else { merge_state(tree, state); - next = next_state(state); + next = next_search_state(state, end); } return next; } @@ -620,18 +608,18 @@ static void set_gfp_mask_from_bits(u32 *bits, gfp_t *mask) * * This takes the tree lock, and returns 0 on success and < 0 on error. */ -int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_state **cached_state, - struct extent_changeset *changeset) +int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_state **cached_state, + struct extent_changeset *changeset) { struct extent_state *state; struct extent_state *cached; struct extent_state *prealloc = NULL; u64 last_end; - int err; - int clear = 0; - int wake; - int delete = (bits & EXTENT_CLEAR_ALL_BITS); + int ret = 0; + bool clear; + bool wake; + const bool delete = (bits & EXTENT_CLEAR_ALL_BITS); gfp_t mask; set_gfp_mask_from_bits(&bits, &mask); @@ -644,9 +632,8 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, if (bits & EXTENT_DELALLOC) bits |= EXTENT_NORESERVE; - wake = ((bits & EXTENT_LOCK_BITS) ? 1 : 0); - if (bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY)) - clear = 1; + wake = (bits & EXTENT_LOCK_BITS); + clear = (bits & (EXTENT_LOCK_BITS | EXTENT_BOUNDARY)); again: if (!prealloc) { /* @@ -676,7 +663,7 @@ again: goto hit_next; } if (clear) - free_extent_state(cached); + btrfs_free_extent_state(cached); } /* This search will find the extents that end after our range starts. */ @@ -691,7 +678,7 @@ hit_next: /* The state doesn't have the wanted bits, go ahead. */ if (!(state->state & bits)) { - state = next_state(state); + state = next_search_state(state, end); goto next; } @@ -714,18 +701,24 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; - err = split_state(tree, state, prealloc, start); - if (err) - extent_io_tree_panic(tree, state, "split", err); - + ret = split_state(tree, state, prealloc, start); prealloc = NULL; - if (err) + if (ret) { + extent_io_tree_panic(tree, state, "split", ret); goto out; + } if (state->end <= end) { - state = clear_state_bit(tree, state, bits, wake, changeset); + state = clear_state_bit(tree, state, bits, wake, end, + changeset); goto next; } - goto search_again; + if (need_resched()) + goto search_again; + /* + * Fallthrough and try atomic extent state allocation if needed. + * If it fails we'll jump to 'search_again' retry the allocation + * in non-atomic mode and start the search again. + */ } /* * | ---- desired range ---- | @@ -736,30 +729,31 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; - err = split_state(tree, state, prealloc, end + 1); - if (err) - extent_io_tree_panic(tree, state, "split", err); + ret = split_state(tree, state, prealloc, end + 1); + if (ret) { + extent_io_tree_panic(tree, state, "split", ret); + prealloc = NULL; + goto out; + } if (wake) wake_up(&state->wq); - clear_state_bit(tree, prealloc, bits, wake, changeset); + clear_state_bit(tree, prealloc, bits, wake, end, changeset); prealloc = NULL; goto out; } - state = clear_state_bit(tree, state, bits, wake, changeset); + state = clear_state_bit(tree, state, bits, wake, end, changeset); next: - if (last_end == (u64)-1) + if (last_end >= end) goto out; start = last_end + 1; - if (start <= end && state && !need_resched()) + if (state && !need_resched()) goto hit_next; search_again: - if (start > end) - goto out; spin_unlock(&tree->lock); if (gfpflags_allow_blocking(mask)) cond_resched(); @@ -767,10 +761,9 @@ search_again: out: spin_unlock(&tree->lock); - if (prealloc) - free_extent_state(prealloc); + btrfs_free_extent_state(prealloc); - return 0; + return ret; } @@ -820,7 +813,7 @@ process_node: schedule(); spin_lock(&tree->lock); finish_wait(&state->wq, &wait); - free_extent_state(state); + btrfs_free_extent_state(state); goto again; } start = state->end + 1; @@ -838,7 +831,7 @@ out: if (cached_state && *cached_state) { state = *cached_state; *cached_state = NULL; - free_extent_state(state); + btrfs_free_extent_state(state); } spin_unlock(&tree->lock); } @@ -877,7 +870,7 @@ static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *t */ state = tree_search(tree, start); while (state) { - if (state->end >= start && (state->state & bits)) + if (state->state & bits) return state; state = next_state(state); } @@ -892,9 +885,9 @@ static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *t * Return true if we find something, and update @start_ret and @end_ret. * Return false if we found nothing. */ -bool find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, u32 bits, - struct extent_state **cached_state) +bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, u32 bits, + struct extent_state **cached_state) { struct extent_state *state; bool ret = false; @@ -914,13 +907,13 @@ bool find_first_extent_bit(struct extent_io_tree *tree, u64 start, * again. If we haven't found any, clear as well since * it's now useless. */ - free_extent_state(*cached_state); + btrfs_free_extent_state(*cached_state); *cached_state = NULL; if (state) goto got_it; goto out; } - free_extent_state(*cached_state); + btrfs_free_extent_state(*cached_state); *cached_state = NULL; } @@ -952,14 +945,17 @@ out: * contiguous area for given bits. We will search to the first bit we find, and * then walk down the tree until we find a non-contiguous area. The area * returned will be the full contiguous area with the bits set. + * + * Returns true if we found a range with the given bits set, in which case + * @start_ret and @end_ret are updated, or false if no range was found. */ -int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, u32 bits) +bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, u32 bits) { struct extent_state *state; - int ret = 1; + bool ret = false; - ASSERT(!btrfs_fs_incompat(extent_io_tree_to_fs_info(tree), NO_HOLES)); + ASSERT(!btrfs_fs_incompat(btrfs_extent_io_tree_to_fs_info(tree), NO_HOLES)); spin_lock(&tree->lock); state = find_first_extent_bit_state(tree, start, bits); @@ -971,7 +967,7 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, break; *end_ret = state->end; } - ret = 0; + ret = true; } spin_unlock(&tree->lock); return ret; @@ -1046,11 +1042,11 @@ out: * * [start, end] is inclusive This takes the tree lock. */ -static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, u64 *failed_start, - struct extent_state **failed_state, - struct extent_state **cached_state, - struct extent_changeset *changeset) +static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, u64 *failed_start, + struct extent_state **failed_state, + struct extent_state **cached_state, + struct extent_changeset *changeset) { struct extent_state *state; struct extent_state *prealloc = NULL; @@ -1129,12 +1125,11 @@ hit_next: set_state_bits(tree, state, bits, changeset); cache_state(state, cached_state); merge_state(tree, state); - if (last_end == (u64)-1) + if (last_end >= end) goto out; start = last_end + 1; state = next_state(state); - if (start < end && state && state->start == start && - !need_resched()) + if (state && state->start == start && !need_resched()) goto hit_next; goto search_again; } @@ -1186,12 +1181,11 @@ hit_next: set_state_bits(tree, state, bits, changeset); cache_state(state, cached_state); merge_state(tree, state); - if (last_end == (u64)-1) + if (last_end >= end) goto out; start = last_end + 1; state = next_state(state); - if (start < end && state && state->start == start && - !need_resched()) + if (state && state->start == start && !need_resched()) goto hit_next; } goto search_again; @@ -1204,14 +1198,8 @@ hit_next: * extent we found. */ if (state->start > start) { - u64 this_end; struct extent_state *inserted_state; - if (end < last_start) - this_end = end; - else - this_end = last_start - 1; - prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; @@ -1221,17 +1209,38 @@ hit_next: * extent. */ prealloc->start = start; - prealloc->end = this_end; + if (end < last_start) + prealloc->end = end; + else + prealloc->end = last_start - 1; + inserted_state = insert_state(tree, prealloc, bits, changeset); if (IS_ERR(inserted_state)) { ret = PTR_ERR(inserted_state); extent_io_tree_panic(tree, prealloc, "insert", ret); + goto out; } cache_state(inserted_state, cached_state); if (inserted_state == prealloc) prealloc = NULL; - start = this_end + 1; + start = inserted_state->end + 1; + + /* Beyond target range, stop. */ + if (start > end) + goto out; + + if (need_resched()) + goto search_again; + + state = next_search_state(inserted_state, end); + /* + * If there's a next state, whether contiguous or not, we don't + * need to unlock and start search agian. If it's not contiguous + * we will end up here and try to allocate a prealloc state and insert. + */ + if (state) + goto hit_next; goto search_again; } /* @@ -1252,8 +1261,11 @@ hit_next: if (!prealloc) goto search_again; ret = split_state(tree, state, prealloc, end + 1); - if (ret) + if (ret) { extent_io_tree_panic(tree, state, "split", ret); + prealloc = NULL; + goto out; + } set_state_bits(tree, prealloc, bits, changeset); cache_state(prealloc, cached_state); @@ -1272,18 +1284,16 @@ search_again: out: spin_unlock(&tree->lock); - if (prealloc) - free_extent_state(prealloc); + btrfs_free_extent_state(prealloc); return ret; } -int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_state **cached_state) +int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_state **cached_state) { - return __set_extent_bit(tree, start, end, bits, NULL, NULL, - cached_state, NULL); + return set_extent_bit(tree, start, end, bits, NULL, NULL, cached_state, NULL); } /* @@ -1304,9 +1314,9 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, * * All allocations are done with GFP_NOFS. */ -int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, u32 clear_bits, - struct extent_state **cached_state) +int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, u32 clear_bits, + struct extent_state **cached_state) { struct extent_state *state; struct extent_state *prealloc = NULL; @@ -1374,12 +1384,11 @@ hit_next: if (state->start == start && state->end <= end) { set_state_bits(tree, state, bits, NULL); cache_state(state, cached_state); - state = clear_state_bit(tree, state, clear_bits, 0, NULL); - if (last_end == (u64)-1) + state = clear_state_bit(tree, state, clear_bits, 0, end, NULL); + if (last_end >= end) goto out; start = last_end + 1; - if (start < end && state && state->start == start && - !need_resched()) + if (state && state->start == start && !need_resched()) goto hit_next; goto search_again; } @@ -1406,20 +1415,19 @@ hit_next: goto out; } ret = split_state(tree, state, prealloc, start); - if (ret) - extent_io_tree_panic(tree, state, "split", ret); prealloc = NULL; - if (ret) + if (ret) { + extent_io_tree_panic(tree, state, "split", ret); goto out; + } if (state->end <= end) { set_state_bits(tree, state, bits, NULL); cache_state(state, cached_state); - state = clear_state_bit(tree, state, clear_bits, 0, NULL); - if (last_end == (u64)-1) + state = clear_state_bit(tree, state, clear_bits, 0, end, NULL); + if (last_end >= end) goto out; start = last_end + 1; - if (start < end && state && state->start == start && - !need_resched()) + if (state && state->start == start && !need_resched()) goto hit_next; } goto search_again; @@ -1432,14 +1440,8 @@ hit_next: * extent we found. */ if (state->start > start) { - u64 this_end; struct extent_state *inserted_state; - if (end < last_start) - this_end = end; - else - this_end = last_start - 1; - prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) { ret = -ENOMEM; @@ -1451,16 +1453,37 @@ hit_next: * extent. */ prealloc->start = start; - prealloc->end = this_end; + if (end < last_start) + prealloc->end = end; + else + prealloc->end = last_start - 1; + inserted_state = insert_state(tree, prealloc, bits, NULL); if (IS_ERR(inserted_state)) { ret = PTR_ERR(inserted_state); extent_io_tree_panic(tree, prealloc, "insert", ret); + goto out; } cache_state(inserted_state, cached_state); if (inserted_state == prealloc) prealloc = NULL; - start = this_end + 1; + start = inserted_state->end + 1; + + /* Beyond target range, stop. */ + if (start > end) + goto out; + + if (need_resched()) + goto search_again; + + state = next_search_state(inserted_state, end); + /* + * If there's a next state, whether contiguous or not, we don't + * need to unlock and start search again. If it's not contiguous + * we will end up here and try to allocate a prealloc state and insert. + */ + if (state) + goto hit_next; goto search_again; } /* @@ -1477,12 +1500,15 @@ hit_next: } ret = split_state(tree, state, prealloc, end + 1); - if (ret) + if (ret) { extent_io_tree_panic(tree, state, "split", ret); + prealloc = NULL; + goto out; + } set_state_bits(tree, prealloc, bits, NULL); cache_state(prealloc, cached_state); - clear_state_bit(tree, prealloc, clear_bits, 0, NULL); + clear_state_bit(tree, prealloc, clear_bits, 0, end, NULL); prealloc = NULL; goto out; } @@ -1497,8 +1523,7 @@ search_again: out: spin_unlock(&tree->lock); - if (prealloc) - free_extent_state(prealloc); + btrfs_free_extent_state(prealloc); return ret; } @@ -1518,8 +1543,8 @@ out: * spans (last_range_end, end of device]. In this case it's up to the caller to * trim @end_ret to the appropriate size. */ -void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, u32 bits) +void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, u32 bits) { struct extent_state *state; struct extent_state *prev = NULL, *next = NULL; @@ -1636,10 +1661,10 @@ out: * all given bits set. If the returned number of bytes is greater than zero * then @start is updated with the offset of the first byte with the bits set. */ -u64 count_range_bits(struct extent_io_tree *tree, - u64 *start, u64 search_end, u64 max_bytes, - u32 bits, int contig, - struct extent_state **cached_state) +u64 btrfs_count_range_bits(struct extent_io_tree *tree, + u64 *start, u64 search_end, u64 max_bytes, + u32 bits, int contig, + struct extent_state **cached_state) { struct extent_state *state = NULL; struct extent_state *cached; @@ -1710,7 +1735,7 @@ search: } if (cached_state) { - free_extent_state(*cached_state); + btrfs_free_extent_state(*cached_state); *cached_state = state; if (state) refcount_inc(&state->refs); @@ -1724,16 +1749,16 @@ search: /* * Check if the single @bit exists in the given range. */ -bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit) +bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit) { - struct extent_state *state = NULL; + struct extent_state *state; bool bitset = false; ASSERT(is_power_of_2(bit)); spin_lock(&tree->lock); state = tree_search(tree, start); - while (state && start <= end) { + while (state) { if (state->start > end) break; @@ -1742,9 +1767,7 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 break; } - /* If state->end is (u64)-1, start will overflow to 0 */ - start = state->end + 1; - if (start > end || start == 0) + if (state->end >= end) break; state = next_state(state); } @@ -1752,16 +1775,51 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 return bitset; } +void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits, + struct extent_state **cached_state) +{ + struct extent_state *state; + + /* + * The cached state is currently mandatory and not used to start the + * search, only to cache the first state record found in the range. + */ + ASSERT(cached_state != NULL); + ASSERT(*cached_state == NULL); + + *bits = 0; + + spin_lock(&tree->lock); + state = tree_search(tree, start); + if (state && state->start < end) { + *cached_state = state; + refcount_inc(&state->refs); + } + while (state) { + if (state->start > end) + break; + + *bits |= state->state; + + if (state->end >= end) + break; + + state = next_state(state); + } + spin_unlock(&tree->lock); +} + /* * Check if the whole range [@start,@end) contains the single @bit set. */ -bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, - struct extent_state *cached) +bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, + struct extent_state *cached) { - struct extent_state *state = NULL; + struct extent_state *state; bool bitset = true; ASSERT(is_power_of_2(bit)); + ASSERT(start < end); spin_lock(&tree->lock); if (cached && extent_state_in_tree(cached) && cached->start <= start && @@ -1769,30 +1827,22 @@ bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, state = cached; else state = tree_search(tree, start); - while (state && start <= end) { + while (state) { if (state->start > start) { bitset = false; break; } - if (state->start > end) - break; - if ((state->state & bit) == 0) { bitset = false; break; } - if (state->end == (u64)-1) + if (state->end >= end) break; - /* - * Last entry (if state->end is (u64)-1 and overflow happens), - * or next entry starts after the range. - */ + /* Next state must start where this one ends. */ start = state->end + 1; - if (start > end || start == 0) - break; state = next_state(state); } @@ -1804,8 +1854,8 @@ bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, } /* Wrappers around set/clear extent bit */ -int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_changeset *changeset) +int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_changeset *changeset) { /* * We don't support EXTENT_LOCK_BITS yet, as current changeset will @@ -1814,11 +1864,11 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, */ ASSERT(!(bits & EXTENT_LOCK_BITS)); - return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset); + return set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset); } -int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_changeset *changeset) +int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_changeset *changeset) { /* * Don't support EXTENT_LOCK_BITS case, same reason as @@ -1826,20 +1876,21 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, */ ASSERT(!(bits & EXTENT_LOCK_BITS)); - return __clear_extent_bit(tree, start, end, bits, NULL, changeset); + return btrfs_clear_extent_bit_changeset(tree, start, end, bits, NULL, changeset); } -bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, - struct extent_state **cached) +bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_state **cached) { int err; u64 failed_start; - err = __set_extent_bit(tree, start, end, bits, &failed_start, - NULL, cached, NULL); + err = set_extent_bit(tree, start, end, bits, &failed_start, NULL, + cached, NULL); if (err == -EEXIST) { if (failed_start > start) - clear_extent_bit(tree, start, failed_start - 1, bits, cached); + btrfs_clear_extent_bit(tree, start, failed_start - 1, + bits, cached); return 0; } return 1; @@ -1849,35 +1900,54 @@ bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits * Either insert or lock state struct between start and end use mask to tell * us if waiting is desired. */ -int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, - struct extent_state **cached_state) +int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, + struct extent_state **cached_state) { struct extent_state *failed_state = NULL; int err; u64 failed_start; - err = __set_extent_bit(tree, start, end, bits, &failed_start, - &failed_state, cached_state, NULL); + err = set_extent_bit(tree, start, end, bits, &failed_start, + &failed_state, cached_state, NULL); while (err == -EEXIST) { if (failed_start != start) - clear_extent_bit(tree, start, failed_start - 1, - bits, cached_state); + btrfs_clear_extent_bit(tree, start, failed_start - 1, + bits, cached_state); wait_extent_bit(tree, failed_start, end, bits, &failed_state); - err = __set_extent_bit(tree, start, end, bits, - &failed_start, &failed_state, - cached_state, NULL); + err = set_extent_bit(tree, start, end, bits, &failed_start, + &failed_state, cached_state, NULL); } return err; } -void __cold extent_state_free_cachep(void) +/* + * Get the extent state that follows the given extent state. + * This is meant to be used in a context where we know no other tasks can + * concurrently modify the tree. + */ +struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree, + struct extent_state *state) +{ + struct extent_state *next; + + spin_lock(&tree->lock); + ASSERT(extent_state_in_tree(state)); + next = next_state(state); + if (next) + refcount_inc(&next->refs); + spin_unlock(&tree->lock); + + return next; +} + +void __cold btrfs_extent_state_free_cachep(void) { btrfs_extent_state_leak_debug_check(); kmem_cache_destroy(extent_state_cache); } -int __init extent_state_init_cachep(void) +int __init btrfs_extent_state_init_cachep(void) { extent_state_cache = kmem_cache_create("btrfs_extent_state", sizeof(struct extent_state), 0, 0, diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index 6ffef1cd37c1..0a18ca9c59c3 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -17,7 +17,6 @@ struct btrfs_inode; /* Bits for the extent state */ enum { ENUM_BIT(EXTENT_DIRTY), - ENUM_BIT(EXTENT_UPTODATE), ENUM_BIT(EXTENT_LOCKED), ENUM_BIT(EXTENT_DIO_LOCKED), ENUM_BIT(EXTENT_NEW), @@ -39,6 +38,11 @@ enum { */ ENUM_BIT(EXTENT_DELALLOC_NEW), /* + * Mark that a range is being locked for finishing an ordered extent. + * Used together with EXTENT_LOCKED. + */ + ENUM_BIT(EXTENT_FINISHING_ORDERED), + /* * When an ordered extent successfully completes for a region marked as * a new delalloc range, use this flag when clearing a new delalloc * range to indicate that the VFS' inode number of bytes should be @@ -130,117 +134,116 @@ struct extent_state { #endif }; -struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree); -const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree); -const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree); +const struct btrfs_inode *btrfs_extent_io_tree_to_inode(const struct extent_io_tree *tree); +const struct btrfs_fs_info *btrfs_extent_io_tree_to_fs_info(const struct extent_io_tree *tree); -void extent_io_tree_init(struct btrfs_fs_info *fs_info, - struct extent_io_tree *tree, unsigned int owner); -void extent_io_tree_release(struct extent_io_tree *tree); -int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, - struct extent_state **cached); -bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, - struct extent_state **cached); +void btrfs_extent_io_tree_init(struct btrfs_fs_info *fs_info, + struct extent_io_tree *tree, unsigned int owner); +void btrfs_extent_io_tree_release(struct extent_io_tree *tree); +int btrfs_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, + struct extent_state **cached); +bool btrfs_try_lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_state **cached); -static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, - struct extent_state **cached) +static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, + struct extent_state **cached) { - return __lock_extent(tree, start, end, EXTENT_LOCKED, cached); + return btrfs_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached); } -static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) +static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) { - return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached); + return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_LOCKED, cached); } -int __init extent_state_init_cachep(void); -void __cold extent_state_free_cachep(void); - -u64 count_range_bits(struct extent_io_tree *tree, - u64 *start, u64 search_end, - u64 max_bytes, u32 bits, int contig, - struct extent_state **cached_state); - -void free_extent_state(struct extent_state *state); -bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, - struct extent_state *cached_state); -bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit); -int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_changeset *changeset); -int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_state **cached, - struct extent_changeset *changeset); - -static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start, - u64 end, u32 bits, - struct extent_state **cached) -{ - return __clear_extent_bit(tree, start, end, bits, cached, NULL); -} +int __init btrfs_extent_state_init_cachep(void); +void __cold btrfs_extent_state_free_cachep(void); + +u64 btrfs_count_range_bits(struct extent_io_tree *tree, + u64 *start, u64 search_end, + u64 max_bytes, u32 bits, int contig, + struct extent_state **cached_state); -static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, - struct extent_state **cached) +void btrfs_free_extent_state(struct extent_state *state); +bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, + struct extent_state *cached_state); +bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit); +void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits, + struct extent_state **cached_state); +int btrfs_clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_changeset *changeset); +int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_state **cached, + struct extent_changeset *changeset); + +static inline int btrfs_clear_extent_bit(struct extent_io_tree *tree, u64 start, + u64 end, u32 bits, + struct extent_state **cached) { - return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL); + return btrfs_clear_extent_bit_changeset(tree, start, end, bits, cached, NULL); } -static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, - u64 end, u32 bits) +static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, + struct extent_state **cached) { - return clear_extent_bit(tree, start, end, bits, NULL); + return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_LOCKED, + cached, NULL); } -int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_changeset *changeset); -int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_state **cached_state); - -static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached_state) +static inline int btrfs_clear_extent_bits(struct extent_io_tree *tree, u64 start, + u64 end, u32 bits) { - return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, - cached_state, NULL); + return btrfs_clear_extent_bit(tree, start, end, bits, NULL); } -static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) +int btrfs_set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_changeset *changeset); +int btrfs_set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, struct extent_state **cached_state); + +static inline int btrfs_clear_extent_dirty(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) { - return clear_extent_bit(tree, start, end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, cached); + return btrfs_clear_extent_bit(tree, start, end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, cached); } -int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, u32 clear_bits, - struct extent_state **cached_state); - -bool find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, u32 bits, - struct extent_state **cached_state); -void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, u32 bits); -int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, u32 bits); +int btrfs_convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + u32 bits, u32 clear_bits, + struct extent_state **cached_state); + +bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, u32 bits, + struct extent_state **cached_state); +void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, u32 bits); +bool btrfs_find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, u32 bits); bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, u64 *end, u64 max_bytes, struct extent_state **cached_state); -static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) +static inline int btrfs_lock_dio_extent(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) { - return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached); + return btrfs_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached); } -static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) +static inline bool btrfs_try_lock_dio_extent(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) { - return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached); + return btrfs_try_lock_extent_bits(tree, start, end, EXTENT_DIO_LOCKED, cached); } -static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start, - u64 end, struct extent_state **cached) +static inline int btrfs_unlock_dio_extent(struct extent_io_tree *tree, u64 start, + u64 end, struct extent_state **cached) { - return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL); + return btrfs_clear_extent_bit_changeset(tree, start, end, EXTENT_DIO_LOCKED, + cached, NULL); } +struct extent_state *btrfs_next_extent_state(struct extent_io_tree *tree, + struct extent_state *state); + #endif /* BTRFS_EXTENT_IO_TREE_H */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 957230abd827..cb6128778a83 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -409,15 +409,15 @@ static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, btrfs_extent_data_ref_offset(leaf, ref)); } -static int match_extent_data_ref(struct extent_buffer *leaf, - struct btrfs_extent_data_ref *ref, - u64 root_objectid, u64 owner, u64 offset) +static bool match_extent_data_ref(struct extent_buffer *leaf, + struct btrfs_extent_data_ref *ref, + u64 root_objectid, u64 owner, u64 offset) { if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || btrfs_extent_data_ref_objectid(leaf, ref) != owner || btrfs_extent_data_ref_offset(leaf, ref) != offset) - return 0; - return 1; + return false; + return true; } static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, @@ -2006,7 +2006,12 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, delayed_refs = &trans->transaction->delayed_refs; if (min_bytes == 0) { - max_count = delayed_refs->num_heads_ready; + /* + * We may be subject to a harmless race if some task is + * concurrently adding or removing a delayed ref, so silence + * KCSAN and similar tools. + */ + max_count = data_race(delayed_refs->num_heads_ready); min_bytes = U64_MAX; } @@ -2598,8 +2603,8 @@ static int pin_down_extent(struct btrfs_trans_handle *trans, spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - set_extent_bit(&trans->transaction->pinned_extents, bytenr, - bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); + btrfs_set_extent_bit(&trans->transaction->pinned_extents, bytenr, + bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); return 0; } @@ -2818,34 +2823,63 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *block_group, *tmp; struct list_head *deleted_bgs; - struct extent_io_tree *unpin; + struct extent_io_tree *unpin = &trans->transaction->pinned_extents; + struct extent_state *cached_state = NULL; u64 start; u64 end; + int unpin_error = 0; int ret; - unpin = &trans->transaction->pinned_extents; + mutex_lock(&fs_info->unused_bg_unpin_mutex); + btrfs_find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, &cached_state); - while (!TRANS_ABORTED(trans)) { - struct extent_state *cached_state = NULL; - - mutex_lock(&fs_info->unused_bg_unpin_mutex); - if (!find_first_extent_bit(unpin, 0, &start, &end, - EXTENT_DIRTY, &cached_state)) { - mutex_unlock(&fs_info->unused_bg_unpin_mutex); - break; - } + while (!TRANS_ABORTED(trans) && cached_state) { + struct extent_state *next_state; if (btrfs_test_opt(fs_info, DISCARD_SYNC)) ret = btrfs_discard_extent(fs_info, start, end + 1 - start, NULL); - clear_extent_dirty(unpin, start, end, &cached_state); + next_state = btrfs_next_extent_state(unpin, cached_state); + btrfs_clear_extent_dirty(unpin, start, end, &cached_state); ret = unpin_extent_range(fs_info, start, end, true); - BUG_ON(ret); - mutex_unlock(&fs_info->unused_bg_unpin_mutex); - free_extent_state(cached_state); - cond_resched(); + /* + * If we get an error unpinning an extent range, store the first + * error to return later after trying to unpin all ranges and do + * the sync discards. Our caller will abort the transaction + * (which already wrote new superblocks) and on the next mount + * the space will be available as it was pinned by in-memory + * only structures in this phase. + */ + if (ret) { + btrfs_err_rl(fs_info, +"failed to unpin extent range [%llu, %llu] when committing transaction %llu: %s (%d)", + start, end, trans->transid, + btrfs_decode_error(ret), ret); + if (!unpin_error) + unpin_error = ret; + } + + btrfs_free_extent_state(cached_state); + + if (need_resched()) { + btrfs_free_extent_state(next_state); + mutex_unlock(&fs_info->unused_bg_unpin_mutex); + cond_resched(); + cached_state = NULL; + mutex_lock(&fs_info->unused_bg_unpin_mutex); + btrfs_find_first_extent_bit(unpin, 0, &start, &end, + EXTENT_DIRTY, &cached_state); + } else { + cached_state = next_state; + if (cached_state) { + start = cached_state->start; + end = cached_state->end; + } + } } + mutex_unlock(&fs_info->unused_bg_unpin_mutex); + btrfs_free_extent_state(cached_state); if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { btrfs_discard_calc_delay(&fs_info->discard_ctl); @@ -2859,14 +2893,10 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) */ deleted_bgs = &trans->transaction->deleted_bgs; list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { - u64 trimmed = 0; - ret = -EROFS; if (!TRANS_ABORTED(trans)) - ret = btrfs_discard_extent(fs_info, - block_group->start, - block_group->length, - &trimmed); + ret = btrfs_discard_extent(fs_info, block_group->start, + block_group->length, NULL); /* * Not strictly necessary to lock, as the block_group should be @@ -2888,7 +2918,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) } } - return 0; + return unpin_error; } /* @@ -3483,17 +3513,11 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans, WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); btrfs_add_free_space(bg, buf->start, buf->len); - btrfs_free_reserved_bytes(bg, buf->len, 0); + btrfs_free_reserved_bytes(bg, buf->len, false); btrfs_put_block_group(bg); trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); out: - - /* - * Deleting the buffer, clear the corrupt flag since it doesn't - * matter anymore. - */ - clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); return 0; } @@ -4111,6 +4135,7 @@ static int can_allocate_chunk(struct btrfs_fs_info *fs_info, static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, struct btrfs_key *ins, struct find_free_extent_ctl *ffe_ctl, + struct btrfs_space_info *space_info, bool full_search) { struct btrfs_root *root = fs_info->chunk_root; @@ -4165,7 +4190,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, return ret; } - ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, + ret = btrfs_chunk_alloc(trans, space_info, ffe_ctl->flags, CHUNK_ALLOC_FORCE_FOR_EXTENT); /* Do not bail out on ENOSPC since we can do more. */ @@ -4382,11 +4407,22 @@ static noinline int find_free_extent(struct btrfs_root *root, ins->objectid = 0; ins->offset = 0; - trace_find_free_extent(root, ffe_ctl); + trace_btrfs_find_free_extent(root, ffe_ctl); space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); + if (btrfs_is_zoned(fs_info) && space_info) { + /* Use dedicated sub-space_info for dedicated block group users. */ + if (ffe_ctl->for_data_reloc) { + space_info = space_info->sub_group[0]; + ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC); + } else if (ffe_ctl->for_treelog) { + space_info = space_info->sub_group[0]; + ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_TREELOG); + } + } if (!space_info) { - btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); + btrfs_err(fs_info, "no space info for %llu, tree-log %d, relocation %d", + ffe_ctl->flags, ffe_ctl->for_treelog, ffe_ctl->for_data_reloc); return -ENOSPC; } @@ -4408,6 +4444,7 @@ static noinline int find_free_extent(struct btrfs_root *root, * picked out then we don't care that the block group is cached. */ if (block_group && block_group_bits(block_group, ffe_ctl->flags) && + block_group->space_info == space_info && block_group->cached != BTRFS_CACHE_NO) { down_read(&space_info->groups_sem); if (list_empty(&block_group->list) || @@ -4433,7 +4470,7 @@ static noinline int find_free_extent(struct btrfs_root *root, } } search: - trace_find_free_extent_search_loop(root, ffe_ctl); + trace_btrfs_find_free_extent_search_loop(root, ffe_ctl); ffe_ctl->have_caching_bg = false; if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || ffe_ctl->index == 0) @@ -4485,7 +4522,7 @@ search: } have_block_group: - trace_find_free_extent_have_block_group(root, ffe_ctl, block_group); + trace_btrfs_find_free_extent_have_block_group(root, ffe_ctl, block_group); ffe_ctl->cached = btrfs_block_group_done(block_group); if (unlikely(!ffe_ctl->cached)) { ffe_ctl->have_caching_bg = true; @@ -4578,7 +4615,8 @@ loop: } up_read(&space_info->groups_sem); - ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); + ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, space_info, + full_search); if (ret > 0) goto search; @@ -4700,8 +4738,8 @@ again: return ret; } -int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, - u64 start, u64 len, int delalloc) +int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, + bool is_delalloc) { struct btrfs_block_group *cache; @@ -4713,7 +4751,7 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, } btrfs_add_free_space(cache, start, len); - btrfs_free_reserved_bytes(cache, len, delalloc); + btrfs_free_reserved_bytes(cache, len, is_delalloc); trace_btrfs_reserved_extent_free(fs_info, start, len); btrfs_put_block_group(cache); @@ -5071,17 +5109,17 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, * EXTENT bit to differentiate dirty pages. */ if (buf->log_index == 0) - set_extent_bit(&root->dirty_log_pages, buf->start, - buf->start + buf->len - 1, - EXTENT_DIRTY, NULL); + btrfs_set_extent_bit(&root->dirty_log_pages, buf->start, + buf->start + buf->len - 1, + EXTENT_DIRTY, NULL); else - set_extent_bit(&root->dirty_log_pages, buf->start, - buf->start + buf->len - 1, - EXTENT_NEW, NULL); + btrfs_set_extent_bit(&root->dirty_log_pages, buf->start, + buf->start + buf->len - 1, + EXTENT_NEW, NULL); } else { buf->log_index = -1; - set_extent_bit(&trans->transaction->dirty_pages, buf->start, - buf->start + buf->len - 1, EXTENT_DIRTY, NULL); + btrfs_set_extent_bit(&trans->transaction->dirty_pages, buf->start, + buf->start + buf->len - 1, EXTENT_DIRTY, NULL); } /* this returns a buffer locked for blocking */ return buf; @@ -5187,7 +5225,7 @@ out_free_buf: btrfs_tree_unlock(buf); free_extent_buffer(buf); out_free_reserved: - btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); + btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, false); out_unuse: btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); return ERR_PTR(ret); @@ -6397,13 +6435,13 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) if (ret) break; - find_first_clear_extent_bit(&device->alloc_state, start, - &start, &end, - CHUNK_TRIMMED | CHUNK_ALLOCATED); + btrfs_find_first_clear_extent_bit(&device->alloc_state, start, + &start, &end, + CHUNK_TRIMMED | CHUNK_ALLOCATED); /* Check if there are any CHUNK_* bits left */ if (start > device->total_bytes) { - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN(); btrfs_warn_in_rcu(fs_info, "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", start, end - start + 1, @@ -6436,8 +6474,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) ret = btrfs_issue_discard(device->bdev, start, len, &bytes); if (!ret) - set_extent_bit(&device->alloc_state, start, - start + bytes - 1, CHUNK_TRIMMED, NULL); + btrfs_set_extent_bit(&device->alloc_state, start, + start + bytes - 1, CHUNK_TRIMMED, NULL); mutex_unlock(&fs_info->chunk_mutex); if (ret) diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h index 0ed682d9ed7b..72914074c304 100644 --- a/fs/btrfs/extent-tree.h +++ b/fs/btrfs/extent-tree.h @@ -149,8 +149,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref); u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, int slot); -int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, - u64 start, u64 len, int delalloc); +int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, + bool is_delalloc); int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, const struct extent_buffer *eb); int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 13bdd60da3c7..e43f6280f954 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -96,6 +96,8 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) */ struct btrfs_bio_ctrl { struct btrfs_bio *bbio; + /* Last byte contained in bbio + 1 . */ + loff_t next_file_offset; enum btrfs_compression_type compress_type; u32 len_to_oe_boundary; blk_opf_t opf; @@ -221,22 +223,17 @@ static void __process_folios_contig(struct address_space *mapping, } static noinline void unlock_delalloc_folio(const struct inode *inode, - const struct folio *locked_folio, + struct folio *locked_folio, u64 start, u64 end) { - unsigned long index = start >> PAGE_SHIFT; - unsigned long end_index = end >> PAGE_SHIFT; - ASSERT(locked_folio); - if (index == locked_folio->index && end_index == index) - return; __process_folios_contig(inode->i_mapping, locked_folio, start, end, PAGE_UNLOCK); } static noinline int lock_delalloc_folios(struct inode *inode, - const struct folio *locked_folio, + struct folio *locked_folio, u64 start, u64 end) { struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); @@ -246,9 +243,6 @@ static noinline int lock_delalloc_folios(struct inode *inode, u64 processed_end = start; struct folio_batch fbatch; - if (index == locked_folio->index && index == end_index) - return 0; - folio_batch_init(&fbatch); while (index <= end_index) { unsigned int found_folios, i; @@ -340,7 +334,7 @@ again: /* @delalloc_end can be -1, never go beyond @orig_end */ *end = min(delalloc_end, orig_end); - free_extent_state(cached_state); + btrfs_free_extent_state(cached_state); return false; } @@ -366,7 +360,7 @@ again: /* some of the folios are gone, lets avoid looping by * shortening the size of the delalloc range we're searching */ - free_extent_state(cached_state); + btrfs_free_extent_state(cached_state); cached_state = NULL; if (!loops) { max_bytes = PAGE_SIZE; @@ -379,13 +373,13 @@ again: } /* step three, lock the state bits for the whole range */ - lock_extent(tree, delalloc_start, delalloc_end, &cached_state); + btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state); /* then test to make sure it is all still delalloc */ - ret = test_range_bit(tree, delalloc_start, delalloc_end, - EXTENT_DELALLOC, cached_state); + ret = btrfs_test_range_bit(tree, delalloc_start, delalloc_end, + EXTENT_DELALLOC, cached_state); - unlock_extent(tree, delalloc_start, delalloc_end, &cached_state); + btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state); if (!ret) { unlock_delalloc_folio(inode, locked_folio, delalloc_start, delalloc_end); @@ -403,7 +397,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, struct extent_state **cached, u32 clear_bits, unsigned long page_ops) { - clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached); + btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached); __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start, end, page_ops); @@ -462,9 +456,6 @@ static void end_bbio_data_write(struct btrfs_bio *bbio) u64 start = folio_pos(folio) + fi.offset; u32 len = fi.length; - /* Only order 0 (single page) folios are allowed for data. */ - ASSERT(folio_order(folio) == 0); - /* Our read/write should always be sector aligned. */ if (!IS_ALIGNED(fi.offset, sectorsize)) btrfs_err(fs_info, @@ -512,43 +503,22 @@ static void end_bbio_data_read(struct btrfs_bio *bbio) struct btrfs_fs_info *fs_info = bbio->fs_info; struct bio *bio = &bbio->bio; struct folio_iter fi; - const u32 sectorsize = fs_info->sectorsize; ASSERT(!bio_flagged(bio, BIO_CLONED)); bio_for_each_folio_all(fi, &bbio->bio) { bool uptodate = !bio->bi_status; struct folio *folio = fi.folio; struct inode *inode = folio->mapping->host; - u64 start; - u64 end; - u32 len; + u64 start = folio_pos(folio) + fi.offset; btrfs_debug(fs_info, "%s: bi_sector=%llu, err=%d, mirror=%u", __func__, bio->bi_iter.bi_sector, bio->bi_status, bbio->mirror_num); - /* - * We always issue full-sector reads, but if some block in a - * folio fails to read, blk_update_request() will advance - * bv_offset and adjust bv_len to compensate. Print a warning - * for unaligned offsets, and an error if they don't add up to - * a full sector. - */ - if (!IS_ALIGNED(fi.offset, sectorsize)) - btrfs_err(fs_info, - "partial page read in btrfs with offset %zu and length %zu", - fi.offset, fi.length); - else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize)) - btrfs_info(fs_info, - "incomplete page read with offset %zu and length %zu", - fi.offset, fi.length); - - start = folio_pos(folio) + fi.offset; - end = start + fi.length - 1; - len = fi.length; if (likely(uptodate)) { + u64 end = start + fi.length - 1; loff_t i_size = i_size_read(inode); /* @@ -573,7 +543,7 @@ static void end_bbio_data_read(struct btrfs_bio *bbio) } /* Update page status and unlock. */ - end_folio_read(folio, uptodate, start, len); + end_folio_read(folio, uptodate, start, fi.length); } bio_put(bio); } @@ -664,13 +634,10 @@ static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail) } static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl, - struct folio *folio, u64 disk_bytenr, - unsigned int pg_offset) + u64 disk_bytenr, loff_t file_offset) { struct bio *bio = &bio_ctrl->bbio->bio; - struct bio_vec *bvec = bio_last_bvec_all(bio); const sector_t sector = disk_bytenr >> SECTOR_SHIFT; - struct folio *bv_folio = page_folio(bvec->bv_page); if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) { /* @@ -681,19 +648,11 @@ static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl, } /* - * The contig check requires the following conditions to be met: - * - * 1) The folios are belonging to the same inode - * This is implied by the call chain. - * - * 2) The range has adjacent logical bytenr - * - * 3) The range has adjacent file offset - * This is required for the usage of btrfs_bio->file_offset. + * To merge into a bio both the disk sector and the logical offset in + * the file need to be contiguous. */ - return bio_end_sector(bio) == sector && - folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len == - folio_pos(folio) + pg_offset; + return bio_ctrl->next_file_offset == file_offset && + bio_end_sector(bio) == sector; } static void alloc_new_bio(struct btrfs_inode *inode, @@ -711,6 +670,7 @@ static void alloc_new_bio(struct btrfs_inode *inode, bbio->file_offset = file_offset; bio_ctrl->bbio = bbio; bio_ctrl->len_to_oe_boundary = U32_MAX; + bio_ctrl->next_file_offset = file_offset; /* Limit data write bios to the ordered boundary. */ if (bio_ctrl->wbc) { @@ -752,22 +712,21 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl, size_t size, unsigned long pg_offset) { struct btrfs_inode *inode = folio_to_inode(folio); + loff_t file_offset = folio_pos(folio) + pg_offset; ASSERT(pg_offset + size <= folio_size(folio)); ASSERT(bio_ctrl->end_io_func); if (bio_ctrl->bbio && - !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset)) + !btrfs_bio_is_contig(bio_ctrl, disk_bytenr, file_offset)) submit_one_bio(bio_ctrl); do { u32 len = size; /* Allocate new bio if needed */ - if (!bio_ctrl->bbio) { - alloc_new_bio(inode, bio_ctrl, disk_bytenr, - folio_pos(folio) + pg_offset); - } + if (!bio_ctrl->bbio) + alloc_new_bio(inode, bio_ctrl, disk_bytenr, file_offset); /* Cap to the current ordered extent boundary if there is one. */ if (len > bio_ctrl->len_to_oe_boundary) { @@ -781,14 +740,15 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl, submit_one_bio(bio_ctrl); continue; } + bio_ctrl->next_file_offset += len; if (bio_ctrl->wbc) - wbc_account_cgroup_owner(bio_ctrl->wbc, folio, - len); + wbc_account_cgroup_owner(bio_ctrl->wbc, folio, len); size -= len; pg_offset += len; disk_bytenr += len; + file_offset += len; /* * len_to_oe_boundary defaults to U32_MAX, which isn't folio or @@ -903,13 +863,13 @@ static struct extent_map *get_extent_map(struct btrfs_inode *inode, if (*em_cached) { em = *em_cached; - if (extent_map_in_tree(em) && start >= em->start && - start < extent_map_end(em)) { + if (btrfs_extent_map_in_tree(em) && start >= em->start && + start < btrfs_extent_map_end(em)) { refcount_inc(&em->refs); return em; } - free_extent_map(em); + btrfs_free_extent_map(em); *em_cached = NULL; } @@ -980,20 +940,20 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, return PTR_ERR(em); } extent_offset = cur - em->start; - BUG_ON(extent_map_end(em) <= cur); + BUG_ON(btrfs_extent_map_end(em) <= cur); BUG_ON(end < cur); - compress_type = extent_map_compression(em); + compress_type = btrfs_extent_map_compression(em); if (compress_type != BTRFS_COMPRESS_NONE) disk_bytenr = em->disk_bytenr; else - disk_bytenr = extent_map_block_start(em) + extent_offset; + disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset; if (em->flags & EXTENT_FLAG_PREALLOC) block_start = EXTENT_MAP_HOLE; else - block_start = extent_map_block_start(em); + block_start = btrfs_extent_map_block_start(em); /* * If we have a file range that points to a compressed extent @@ -1037,7 +997,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, if (prev_em_start) *prev_em_start = em->start; - free_extent_map(em); + btrfs_free_extent_map(em); em = NULL; /* we've found a hole, just zero and go on */ @@ -1212,7 +1172,7 @@ static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end, ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE)); again: - lock_extent(&inode->io_tree, start, end, cached_state); + btrfs_lock_extent(&inode->io_tree, start, end, cached_state); cur_pos = start; while (cur_pos < end) { struct btrfs_ordered_extent *ordered; @@ -1235,7 +1195,7 @@ again: } /* Now wait for the OE to finish. */ - unlock_extent(&inode->io_tree, start, end, cached_state); + btrfs_unlock_extent(&inode->io_tree, start, end, cached_state); btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start); btrfs_put_ordered_extent(ordered); /* We have unlocked the whole range, restart from the beginning. */ @@ -1255,9 +1215,9 @@ int btrfs_read_folio(struct file *file, struct folio *folio) lock_extents_for_read(inode, start, end, &cached_state); ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL); - unlock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); - free_extent_map(em_cached); + btrfs_free_extent_map(em_cached); /* * If btrfs_do_readpage() failed we will want to submit the assembled @@ -1443,8 +1403,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, * We've hit an error during previous delalloc range, * have to cleanup the remaining locked ranges. */ - unlock_extent(&inode->io_tree, found_start, - found_start + found_len - 1, NULL); + btrfs_unlock_extent(&inode->io_tree, found_start, + found_start + found_len - 1, NULL); unlock_delalloc_folio(&inode->vfs_inode, folio, found_start, found_start + found_len - 1); @@ -1550,19 +1510,19 @@ static int submit_one_sector(struct btrfs_inode *inode, return PTR_ERR(em); extent_offset = filepos - em->start; - em_end = extent_map_end(em); + em_end = btrfs_extent_map_end(em); ASSERT(filepos <= em_end); ASSERT(IS_ALIGNED(em->start, sectorsize)); ASSERT(IS_ALIGNED(em->len, sectorsize)); - block_start = extent_map_block_start(em); - disk_bytenr = extent_map_block_start(em) + extent_offset; + block_start = btrfs_extent_map_block_start(em); + disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset; - ASSERT(!extent_map_is_compressed(em)); + ASSERT(!btrfs_extent_map_is_compressed(em)); ASSERT(block_start != EXTENT_MAP_HOLE); ASSERT(block_start != EXTENT_MAP_INLINE); - free_extent_map(em); + btrfs_free_extent_map(em); em = NULL; /* @@ -1718,7 +1678,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl return 0; } - if (folio->index == end_index) + if (folio_contains(folio, end_index)) folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset); /* @@ -1814,8 +1774,18 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e */ spin_lock(&eb->refs_lock); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { + XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits); + unsigned long flags; + set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); spin_unlock(&eb->refs_lock); + + xas_lock_irqsave(&xas, flags); + xas_load(&xas); + xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); + xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); + xas_unlock_irqrestore(&xas, flags); + btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, @@ -1901,24 +1871,151 @@ static void set_btree_ioerr(struct extent_buffer *eb) } } +static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark) +{ + struct btrfs_fs_info *fs_info = eb->fs_info; + XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits); + unsigned long flags; + + xas_lock_irqsave(&xas, flags); + xas_load(&xas); + xas_set_mark(&xas, mark); + xas_unlock_irqrestore(&xas, flags); +} + +static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark) +{ + struct btrfs_fs_info *fs_info = eb->fs_info; + XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->sectorsize_bits); + unsigned long flags; + + xas_lock_irqsave(&xas, flags); + xas_load(&xas); + xas_clear_mark(&xas, mark); + xas_unlock_irqrestore(&xas, flags); +} + +static void buffer_tree_tag_for_writeback(struct btrfs_fs_info *fs_info, + unsigned long start, unsigned long end) +{ + XA_STATE(xas, &fs_info->buffer_tree, start); + unsigned int tagged = 0; + void *eb; + + xas_lock_irq(&xas); + xas_for_each_marked(&xas, eb, end, PAGECACHE_TAG_DIRTY) { + xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); + if (++tagged % XA_CHECK_SCHED) + continue; + xas_pause(&xas); + xas_unlock_irq(&xas); + cond_resched(); + xas_lock_irq(&xas); + } + xas_unlock_irq(&xas); +} + +struct eb_batch { + unsigned int nr; + unsigned int cur; + struct extent_buffer *ebs[PAGEVEC_SIZE]; +}; + +static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb) +{ + batch->ebs[batch->nr++] = eb; + return (batch->nr < PAGEVEC_SIZE); +} + +static inline void eb_batch_init(struct eb_batch *batch) +{ + batch->nr = 0; + batch->cur = 0; +} + +static inline struct extent_buffer *eb_batch_next(struct eb_batch *batch) +{ + if (batch->cur >= batch->nr) + return NULL; + return batch->ebs[batch->cur++]; +} + +static inline void eb_batch_release(struct eb_batch *batch) +{ + for (unsigned int i = 0; i < batch->nr; i++) + free_extent_buffer(batch->ebs[i]); + eb_batch_init(batch); +} + +static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned long max, + xa_mark_t mark) +{ + struct extent_buffer *eb; + +retry: + eb = xas_find_marked(xas, max, mark); + + if (xas_retry(xas, eb)) + goto retry; + + if (!eb) + return NULL; + + if (!atomic_inc_not_zero(&eb->refs)) { + xas_reset(xas); + goto retry; + } + + if (unlikely(eb != xas_reload(xas))) { + free_extent_buffer(eb); + xas_reset(xas); + goto retry; + } + + return eb; +} + +static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info, + unsigned long *start, + unsigned long end, xa_mark_t tag, + struct eb_batch *batch) +{ + XA_STATE(xas, &fs_info->buffer_tree, *start); + struct extent_buffer *eb; + + rcu_read_lock(); + while ((eb = find_get_eb(&xas, end, tag)) != NULL) { + if (!eb_batch_add(batch, eb)) { + *start = ((eb->start + eb->len) >> fs_info->sectorsize_bits); + goto out; + } + } + if (end == ULONG_MAX) + *start = ULONG_MAX; + else + *start = end + 1; +out: + rcu_read_unlock(); + + return batch->nr; +} + /* * The endio specific version which won't touch any unsafe spinlock in endio * context. */ static struct extent_buffer *find_extent_buffer_nolock( - const struct btrfs_fs_info *fs_info, u64 start) + struct btrfs_fs_info *fs_info, u64 start) { struct extent_buffer *eb; + unsigned long index = (start >> fs_info->sectorsize_bits); rcu_read_lock(); - eb = radix_tree_lookup(&fs_info->buffer_radix, - start >> fs_info->sectorsize_bits); - if (eb && atomic_inc_not_zero(&eb->refs)) { - rcu_read_unlock(); - return eb; - } + eb = xa_load(&fs_info->buffer_tree, index); + if (eb && !atomic_inc_not_zero(&eb->refs)) + eb = NULL; rcu_read_unlock(); - return NULL; + return eb; } static void end_bbio_meta_write(struct btrfs_bio *bbio) @@ -1933,6 +2030,7 @@ static void end_bbio_meta_write(struct btrfs_bio *bbio) btrfs_meta_folio_clear_writeback(fi.folio, eb); } + buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK); clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); smp_mb__after_atomic(); wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); @@ -2004,163 +2102,36 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, } /* - * Submit one subpage btree page. - * - * The main difference to submit_eb_page() is: - * - Page locking - * For subpage, we don't rely on page locking at all. - * - * - Flush write bio - * We only flush bio if we may be unable to fit current extent buffers into - * current bio. + * Wait for all eb writeback in the given range to finish. * - * Return >=0 for the number of submitted extent buffers. - * Return <0 for fatal error. + * @fs_info: The fs_info for this file system. + * @start: The offset of the range to start waiting on writeback. + * @end: The end of the range, inclusive. This is meant to be used in + * conjuction with wait_marked_extents, so this will usually be + * the_next_eb->start - 1. */ -static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc) +void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, + u64 end) { - struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); - int submitted = 0; - u64 folio_start = folio_pos(folio); - int bit_start = 0; - int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits; - const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio); + struct eb_batch batch; + unsigned long start_index = (start >> fs_info->sectorsize_bits); + unsigned long end_index = (end >> fs_info->sectorsize_bits); - /* Lock and write each dirty extent buffers in the range */ - while (bit_start < blocks_per_folio) { - struct btrfs_subpage *subpage = folio_get_private(folio); + eb_batch_init(&batch); + while (start_index <= end_index) { struct extent_buffer *eb; - unsigned long flags; - u64 start; + unsigned int nr_ebs; - /* - * Take private lock to ensure the subpage won't be detached - * in the meantime. - */ - spin_lock(&folio->mapping->i_private_lock); - if (!folio_test_private(folio)) { - spin_unlock(&folio->mapping->i_private_lock); + nr_ebs = buffer_tree_get_ebs_tag(fs_info, &start_index, end_index, + PAGECACHE_TAG_WRITEBACK, &batch); + if (!nr_ebs) break; - } - spin_lock_irqsave(&subpage->lock, flags); - if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * blocks_per_folio, - subpage->bitmaps)) { - spin_unlock_irqrestore(&subpage->lock, flags); - spin_unlock(&folio->mapping->i_private_lock); - bit_start += sectors_per_node; - continue; - } - - start = folio_start + bit_start * fs_info->sectorsize; - bit_start += sectors_per_node; - - /* - * Here we just want to grab the eb without touching extra - * spin locks, so call find_extent_buffer_nolock(). - */ - eb = find_extent_buffer_nolock(fs_info, start); - spin_unlock_irqrestore(&subpage->lock, flags); - spin_unlock(&folio->mapping->i_private_lock); - - /* - * The eb has already reached 0 refs thus find_extent_buffer() - * doesn't return it. We don't need to write back such eb - * anyway. - */ - if (!eb) - continue; - - if (lock_extent_buffer_for_io(eb, wbc)) { - write_one_eb(eb, wbc); - submitted++; - } - free_extent_buffer(eb); - } - return submitted; -} - -/* - * Submit all page(s) of one extent buffer. - * - * @page: the page of one extent buffer - * @eb_context: to determine if we need to submit this page, if current page - * belongs to this eb, we don't need to submit - * - * The caller should pass each page in their bytenr order, and here we use - * @eb_context to determine if we have submitted pages of one extent buffer. - * - * If we have, we just skip until we hit a new page that doesn't belong to - * current @eb_context. - * - * If not, we submit all the page(s) of the extent buffer. - * - * Return >0 if we have submitted the extent buffer successfully. - * Return 0 if we don't need to submit the page, as it's already submitted by - * previous call. - * Return <0 for fatal error. - */ -static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx) -{ - struct writeback_control *wbc = ctx->wbc; - struct address_space *mapping = folio->mapping; - struct extent_buffer *eb; - int ret; - - if (!folio_test_private(folio)) - return 0; - - if (btrfs_meta_is_subpage(folio_to_fs_info(folio))) - return submit_eb_subpage(folio, wbc); - - spin_lock(&mapping->i_private_lock); - if (!folio_test_private(folio)) { - spin_unlock(&mapping->i_private_lock); - return 0; - } - - eb = folio_get_private(folio); - - /* - * Shouldn't happen and normally this would be a BUG_ON but no point - * crashing the machine for something we can survive anyway. - */ - if (WARN_ON(!eb)) { - spin_unlock(&mapping->i_private_lock); - return 0; - } - - if (eb == ctx->eb) { - spin_unlock(&mapping->i_private_lock); - return 0; - } - ret = atomic_inc_not_zero(&eb->refs); - spin_unlock(&mapping->i_private_lock); - if (!ret) - return 0; - - ctx->eb = eb; - - ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx); - if (ret) { - if (ret == -EBUSY) - ret = 0; - free_extent_buffer(eb); - return ret; - } - if (!lock_extent_buffer_for_io(eb, wbc)) { - free_extent_buffer(eb); - return 0; - } - /* Implies write in zoned mode. */ - if (ctx->zoned_bg) { - /* Mark the last eb in the block group. */ - btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb); - ctx->zoned_bg->meta_write_pointer += eb->len; + while ((eb = eb_batch_next(&batch)) != NULL) + wait_on_extent_buffer_writeback(eb); + eb_batch_release(&batch); + cond_resched(); } - write_one_eb(eb, wbc); - free_extent_buffer(eb); - return 1; } int btree_write_cache_pages(struct address_space *mapping, @@ -2171,25 +2142,27 @@ int btree_write_cache_pages(struct address_space *mapping, int ret = 0; int done = 0; int nr_to_write_done = 0; - struct folio_batch fbatch; - unsigned int nr_folios; - pgoff_t index; - pgoff_t end; /* Inclusive */ + struct eb_batch batch; + unsigned int nr_ebs; + unsigned long index; + unsigned long end; int scanned = 0; xa_mark_t tag; - folio_batch_init(&fbatch); + eb_batch_init(&batch); if (wbc->range_cyclic) { - index = mapping->writeback_index; /* Start from prev offset */ + index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->sectorsize_bits); end = -1; + /* * Start from the beginning does not need to cycle over the * range, mark it as scanned. */ scanned = (index == 0); } else { - index = wbc->range_start >> PAGE_SHIFT; - end = wbc->range_end >> PAGE_SHIFT; + index = (wbc->range_start >> fs_info->sectorsize_bits); + end = (wbc->range_end >> fs_info->sectorsize_bits); + scanned = 1; } if (wbc->sync_mode == WB_SYNC_ALL) @@ -2199,31 +2172,40 @@ int btree_write_cache_pages(struct address_space *mapping, btrfs_zoned_meta_io_lock(fs_info); retry: if (wbc->sync_mode == WB_SYNC_ALL) - tag_pages_for_writeback(mapping, index, end); + buffer_tree_tag_for_writeback(fs_info, index, end); while (!done && !nr_to_write_done && (index <= end) && - (nr_folios = filemap_get_folios_tag(mapping, &index, end, - tag, &fbatch))) { - unsigned i; + (nr_ebs = buffer_tree_get_ebs_tag(fs_info, &index, end, tag, &batch))) { + struct extent_buffer *eb; - for (i = 0; i < nr_folios; i++) { - struct folio *folio = fbatch.folios[i]; + while ((eb = eb_batch_next(&batch)) != NULL) { + ctx.eb = eb; + + ret = btrfs_check_meta_write_pointer(eb->fs_info, &ctx); + if (ret) { + if (ret == -EBUSY) + ret = 0; - ret = submit_eb_page(folio, &ctx); - if (ret == 0) + if (ret) { + done = 1; + break; + } + free_extent_buffer(eb); continue; - if (ret < 0) { - done = 1; - break; } - /* - * the filesystem may choose to bump up nr_to_write. - * We have to make sure to honor the new nr_to_write - * at any time - */ - nr_to_write_done = wbc->nr_to_write <= 0; + if (!lock_extent_buffer_for_io(eb, wbc)) + continue; + + /* Implies write in zoned mode. */ + if (ctx.zoned_bg) { + /* Mark the last eb in the block group. */ + btrfs_schedule_zone_finish_bg(ctx.zoned_bg, eb); + ctx.zoned_bg->meta_write_pointer += eb->len; + } + write_one_eb(eb, wbc); } - folio_batch_release(&fbatch); + nr_to_write_done = (wbc->nr_to_write <= 0); + eb_batch_release(&batch); cond_resched(); } if (!scanned && !done) { @@ -2574,10 +2556,10 @@ void btrfs_readahead(struct readahead_control *rac) while ((folio = readahead_folio(rac)) != NULL) btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start); - unlock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); if (em_cached) - free_extent_map(em_cached); + btrfs_free_extent_map(em_cached); submit_one_bio(&bio_ctrl); } @@ -2601,7 +2583,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree, if (start > end) return 0; - lock_extent(tree, start, end, &cached_state); + btrfs_lock_extent(tree, start, end, &cached_state); folio_wait_writeback(folio); /* @@ -2609,46 +2591,54 @@ int extent_invalidate_folio(struct extent_io_tree *tree, * so here we only need to unlock the extent range to free any * existing extent state. */ - unlock_extent(tree, start, end, &cached_state); + btrfs_unlock_extent(tree, start, end, &cached_state); return 0; } /* - * a helper for release_folio, this tests for areas of the page that - * are locked or under IO and drops the related state bits if it is safe - * to drop the page. + * A helper for struct address_space_operations::release_folio, this tests for + * areas of the folio that are locked or under IO and drops the related state + * bits if it is safe to drop the folio. */ static bool try_release_extent_state(struct extent_io_tree *tree, struct folio *folio) { + struct extent_state *cached_state = NULL; u64 start = folio_pos(folio); u64 end = start + folio_size(folio) - 1; - bool ret; + u32 range_bits; + u32 clear_bits; + bool ret = false; + int ret2; - if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) { - ret = false; - } else { - u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | - EXTENT_DELALLOC_NEW | EXTENT_CTLBITS | - EXTENT_QGROUP_RESERVED); - int ret2; + btrfs_get_range_bits(tree, start, end, &range_bits, &cached_state); - /* - * At this point we can safely clear everything except the - * locked bit, the nodatasum bit and the delalloc new bit. - * The delalloc new bit will be cleared by ordered extent - * completion. - */ - ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL); + /* + * We can release the folio if it's locked only for ordered extent + * completion, since that doesn't require using the folio. + */ + if ((range_bits & EXTENT_LOCKED) && + !(range_bits & EXTENT_FINISHING_ORDERED)) + goto out; + + clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW | + EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED | + EXTENT_FINISHING_ORDERED); + /* + * At this point we can safely clear everything except the locked, + * nodatasum, delalloc new and finishing ordered bits. The delalloc new + * bit will be cleared by ordered extent completion. + */ + ret2 = btrfs_clear_extent_bit(tree, start, end, clear_bits, &cached_state); + /* + * If clear_extent_bit failed for enomem reasons, we can't allow the + * release to continue. + */ + if (ret2 == 0) + ret = true; +out: + btrfs_free_extent_state(cached_state); - /* if clear_extent_bit failed for enomem reasons, - * we can't allow the release to continue. - */ - if (ret2 < 0) - ret = false; - else - ret = true; - } return ret; } @@ -2671,18 +2661,19 @@ bool try_release_extent_mapping(struct folio *folio, gfp_t mask) struct extent_map *em; write_lock(&extent_tree->lock); - em = lookup_extent_mapping(extent_tree, start, len); + em = btrfs_lookup_extent_mapping(extent_tree, start, len); if (!em) { write_unlock(&extent_tree->lock); break; } if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) { write_unlock(&extent_tree->lock); - free_extent_map(em); + btrfs_free_extent_map(em); break; } - if (test_range_bit_exists(io_tree, em->start, - extent_map_end(em) - 1, EXTENT_LOCKED)) + if (btrfs_test_range_bit_exists(io_tree, em->start, + btrfs_extent_map_end(em) - 1, + EXTENT_LOCKED)) goto next; /* * If it's not in the list of modified extents, used by a fast @@ -2709,15 +2700,15 @@ remove_em: * fsync performance for workloads with a data size that exceeds * or is close to the system's memory). */ - remove_extent_mapping(inode, em); + btrfs_remove_extent_mapping(inode, em); /* Once for the inode's extent map tree. */ - free_extent_map(em); + btrfs_free_extent_map(em); next: - start = extent_map_end(em); + start = btrfs_extent_map_end(em); write_unlock(&extent_tree->lock); /* Once for us, for the lookup_extent_mapping() reference. */ - free_extent_map(em); + btrfs_free_extent_map(em); if (need_resched()) { /* @@ -2756,6 +2747,7 @@ static bool folio_range_has_eb(struct folio *folio) static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio) { struct btrfs_fs_info *fs_info = eb->fs_info; + struct address_space *mapping = folio->mapping; const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); /* @@ -2763,21 +2755,20 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo * be done under the i_private_lock. */ if (mapped) - spin_lock(&folio->mapping->i_private_lock); + spin_lock(&mapping->i_private_lock); if (!folio_test_private(folio)) { if (mapped) - spin_unlock(&folio->mapping->i_private_lock); + spin_unlock(&mapping->i_private_lock); return; } if (!btrfs_meta_is_subpage(fs_info)) { /* - * We do this since we'll remove the pages after we've - * removed the eb from the radix tree, so we could race - * and have this page now attached to the new eb. So - * only clear folio if it's still connected to - * this eb. + * We do this since we'll remove the pages after we've removed + * the eb from the xarray, so we could race and have this page + * now attached to the new eb. So only clear folio if it's + * still connected to this eb. */ if (folio_test_private(folio) && folio_get_private(folio) == eb) { BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); @@ -2787,7 +2778,7 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo folio_detach_private(folio); } if (mapped) - spin_unlock(&folio->mapping->i_private_lock); + spin_unlock(&mapping->i_private_lock); return; } @@ -2810,7 +2801,7 @@ static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct fo if (!folio_range_has_eb(folio)) btrfs_detach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA); - spin_unlock(&folio->mapping->i_private_lock); + spin_unlock(&mapping->i_private_lock); } /* Release all folios attached to the extent buffer */ @@ -2825,9 +2816,6 @@ static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb) continue; detach_extent_buffer_folio(eb, folio); - - /* One for when we allocated the folio. */ - folio_put(folio); } } @@ -2862,9 +2850,28 @@ static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info return eb; } +/* + * For use in eb allocation error cleanup paths, as btrfs_release_extent_buffer() + * does not call folio_put(), and we need to set the folios to NULL so that + * btrfs_release_extent_buffer() will not detach them a second time. + */ +static void cleanup_extent_buffer_folios(struct extent_buffer *eb) +{ + const int num_folios = num_extent_folios(eb); + + /* We canont use num_extent_folios() as loop bound as eb->folios changes. */ + for (int i = 0; i < num_folios; i++) { + ASSERT(eb->folios[i]); + detach_extent_buffer_folio(eb, eb->folios[i]); + folio_put(eb->folios[i]); + eb->folios[i] = NULL; + } +} + struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src) { struct extent_buffer *new; + int num_folios; int ret; new = __alloc_extent_buffer(src->fs_info, src->start); @@ -2879,25 +2886,34 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src) set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags); ret = alloc_eb_folio_array(new, false); - if (ret) { - btrfs_release_extent_buffer(new); - return NULL; - } + if (ret) + goto release_eb; - for (int i = 0; i < num_extent_folios(src); i++) { + ASSERT(num_extent_folios(src) == num_extent_folios(new), + "%d != %d", num_extent_folios(src), num_extent_folios(new)); + /* Explicitly use the cached num_extent value from now on. */ + num_folios = num_extent_folios(src); + for (int i = 0; i < num_folios; i++) { struct folio *folio = new->folios[i]; ret = attach_extent_buffer_folio(new, folio, NULL); - if (ret < 0) { - btrfs_release_extent_buffer(new); - return NULL; - } + if (ret < 0) + goto cleanup_folios; WARN_ON(folio_test_dirty(folio)); } + for (int i = 0; i < num_folios; i++) + folio_put(new->folios[i]); + copy_extent_buffer_full(new, src); set_extent_buffer_uptodate(new); return new; + +cleanup_folios: + cleanup_extent_buffer_folios(new); +release_eb: + btrfs_release_extent_buffer(new); + return NULL; } struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, @@ -2912,13 +2928,15 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, ret = alloc_eb_folio_array(eb, false); if (ret) - goto out; + goto release_eb; for (int i = 0; i < num_extent_folios(eb); i++) { ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL); if (ret < 0) - goto out_detach; + goto cleanup_folios; } + for (int i = 0; i < num_extent_folios(eb); i++) + folio_put(eb->folios[i]); set_extent_buffer_uptodate(eb); btrfs_set_header_nritems(eb, 0); @@ -2926,15 +2944,10 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, return eb; -out_detach: - for (int i = 0; i < num_extent_folios(eb); i++) { - if (eb->folios[i]) { - detach_extent_buffer_folio(eb, eb->folios[i]); - folio_put(eb->folios[i]); - } - } -out: - kmem_cache_free(extent_buffer_cache, eb); +cleanup_folios: + cleanup_extent_buffer_folios(eb); +release_eb: + btrfs_release_extent_buffer(eb); return NULL; } @@ -2942,9 +2955,9 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) { int refs; /* - * The TREE_REF bit is first set when the extent_buffer is added - * to the radix tree. It is also reset, if unset, when a new reference - * is created by find_extent_buffer. + * The TREE_REF bit is first set when the extent_buffer is added to the + * xarray. It is also reset, if unset, when a new reference is created + * by find_extent_buffer. * * It is only cleared in two cases: freeing the last non-tree * reference to the extent_buffer when its STALE bit is set or @@ -2956,13 +2969,12 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) * conditions between the calls to check_buffer_tree_ref in those * codepaths and clearing TREE_REF in try_release_extent_buffer. * - * The actual lifetime of the extent_buffer in the radix tree is - * adequately protected by the refcount, but the TREE_REF bit and - * its corresponding reference are not. To protect against this - * class of races, we call check_buffer_tree_ref from the codepaths - * which trigger io. Note that once io is initiated, TREE_REF can no - * longer be cleared, so that is the moment at which any such race is - * best fixed. + * The actual lifetime of the extent_buffer in the xarray is adequately + * protected by the refcount, but the TREE_REF bit and its corresponding + * reference are not. To protect against this class of races, we call + * check_buffer_tree_ref() from the code paths which trigger io. Note that + * once io is initiated, TREE_REF can no longer be cleared, so that is + * the moment at which any such race is best fixed. */ refs = atomic_read(&eb->refs); if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) @@ -3026,30 +3038,29 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, return ERR_PTR(-ENOMEM); eb->fs_info = fs_info; again: - ret = radix_tree_preload(GFP_NOFS); - if (ret) { - exists = ERR_PTR(ret); - goto free_eb; + xa_lock_irq(&fs_info->buffer_tree); + exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->sectorsize_bits, + NULL, eb, GFP_NOFS); + if (xa_is_err(exists)) { + ret = xa_err(exists); + xa_unlock_irq(&fs_info->buffer_tree); + btrfs_release_extent_buffer(eb); + return ERR_PTR(ret); } - spin_lock(&fs_info->buffer_lock); - ret = radix_tree_insert(&fs_info->buffer_radix, - start >> fs_info->sectorsize_bits, eb); - spin_unlock(&fs_info->buffer_lock); - radix_tree_preload_end(); - if (ret == -EEXIST) { - exists = find_extent_buffer(fs_info, start); - if (exists) - goto free_eb; - else + if (exists) { + if (!atomic_inc_not_zero(&exists->refs)) { + /* The extent buffer is being freed, retry. */ + xa_unlock_irq(&fs_info->buffer_tree); goto again; + } + xa_unlock_irq(&fs_info->buffer_tree); + btrfs_release_extent_buffer(eb); + return exists; } + xa_unlock_irq(&fs_info->buffer_tree); check_buffer_tree_ref(eb); - set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); return eb; -free_eb: - btrfs_release_extent_buffer(eb); - return exists; #else /* Stub to avoid linker error when compiled with optimizations turned off. */ return NULL; @@ -3064,9 +3075,9 @@ static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info, lockdep_assert_held(&folio->mapping->i_private_lock); /* - * For subpage case, we completely rely on radix tree to ensure we - * don't try to insert two ebs for the same bytenr. So here we always - * return NULL and just continue. + * For subpage case, we completely rely on xarray to ensure we don't try + * to insert two ebs for the same bytenr. So here we always return NULL + * and just continue. */ if (btrfs_meta_is_subpage(fs_info)) return NULL; @@ -3100,10 +3111,9 @@ static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start) return true; } - if (fs_info->nodesize < PAGE_SIZE && - offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) { + if (fs_info->nodesize < PAGE_SIZE && !IS_ALIGNED(start, fs_info->nodesize)) { btrfs_err(fs_info, - "tree block crosses page boundary, start %llu nodesize %u", + "tree block is not nodesize aligned, start %llu nodesize %u", start, fs_info->nodesize); return true; } @@ -3139,7 +3149,7 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, struct btrfs_fs_info *fs_info = eb->fs_info; struct address_space *mapping = fs_info->btree_inode->i_mapping; const unsigned long index = eb->start >> PAGE_SHIFT; - struct folio *existing_folio = NULL; + struct folio *existing_folio; int ret; ASSERT(found_eb_ret); @@ -3148,6 +3158,7 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, ASSERT(eb->folios[i]); retry: + existing_folio = NULL; ret = filemap_add_folio(mapping, eb->folios[i], index + i, GFP_NOFS | __GFP_NOFAIL); if (!ret) @@ -3155,10 +3166,8 @@ retry: existing_folio = filemap_lock_folio(mapping, index + i); /* The page cache only exists for a very short time, just retry. */ - if (IS_ERR(existing_folio)) { - existing_folio = NULL; + if (IS_ERR(existing_folio)) goto retry; - } /* For now, we should only have single-page folios for btree inode. */ ASSERT(folio_nr_pages(existing_folio) == 1); @@ -3199,7 +3208,7 @@ finish: /* * To inform we have an extra eb under allocation, so that * detach_extent_buffer_page() won't release the folio private when the - * eb hasn't been inserted into radix tree yet. + * eb hasn't been inserted into the xarray yet. * * The ref will be decreased when the eb releases the page, in * detach_extent_buffer_page(). Thus needs no special handling in the @@ -3306,7 +3315,7 @@ reallocate: * using 0-order folios. */ if (unlikely(ret == -EAGAIN)) { - ASSERT(0); + DEBUG_WARN("folio order mismatch between new eb and filemap"); goto reallocate; } attached++; @@ -3333,10 +3342,9 @@ reallocate: /* * We can't unlock the pages just yet since the extent buffer - * hasn't been properly inserted in the radix tree, this - * opens a race with btree_release_folio which can free a page - * while we are still filling in all pages for the buffer and - * we could crash. + * hasn't been properly inserted into the xarray, this opens a + * race with btree_release_folio() which can free a page while we + * are still filling in all pages for the buffer and we could crash. */ } if (uptodate) @@ -3345,34 +3353,42 @@ reallocate: if (page_contig) eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start); again: - ret = radix_tree_preload(GFP_NOFS); - if (ret) + xa_lock_irq(&fs_info->buffer_tree); + existing_eb = __xa_cmpxchg(&fs_info->buffer_tree, + start >> fs_info->sectorsize_bits, NULL, eb, + GFP_NOFS); + if (xa_is_err(existing_eb)) { + ret = xa_err(existing_eb); + xa_unlock_irq(&fs_info->buffer_tree); goto out; - - spin_lock(&fs_info->buffer_lock); - ret = radix_tree_insert(&fs_info->buffer_radix, - start >> fs_info->sectorsize_bits, eb); - spin_unlock(&fs_info->buffer_lock); - radix_tree_preload_end(); - if (ret == -EEXIST) { - ret = 0; - existing_eb = find_extent_buffer(fs_info, start); - if (existing_eb) - goto out; - else + } + if (existing_eb) { + if (!atomic_inc_not_zero(&existing_eb->refs)) { + xa_unlock_irq(&fs_info->buffer_tree); goto again; + } + xa_unlock_irq(&fs_info->buffer_tree); + goto out; } + xa_unlock_irq(&fs_info->buffer_tree); + /* add one reference for the tree */ check_buffer_tree_ref(eb); - set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); /* * Now it's safe to unlock the pages because any calls to * btree_release_folio will correctly detect that a page belongs to a * live buffer and won't free them prematurely. */ - for (int i = 0; i < num_extent_folios(eb); i++) + for (int i = 0; i < num_extent_folios(eb); i++) { folio_unlock(eb->folios[i]); + /* + * A folio that has been added to an address_space mapping + * should not continue holding the refcount from its original + * allocation indefinitely. + */ + folio_put(eb->folios[i]); + } return eb; out: @@ -3386,26 +3402,22 @@ out: * want that to grab this eb, as we're getting ready to free it. So we * have to detach it first and then unlock it. * - * We have to drop our reference and NULL it out here because in the - * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb. - * Below when we call btrfs_release_extent_buffer() we will call - * detach_extent_buffer_folio() on our remaining pages in the !subpage - * case. If we left eb->folios[i] populated in the subpage case we'd - * double put our reference and be super sad. + * Note: the bounds is num_extent_pages() as we need to go through all slots. */ - for (int i = 0; i < attached; i++) { - ASSERT(eb->folios[i]); - detach_extent_buffer_folio(eb, eb->folios[i]); - folio_unlock(eb->folios[i]); - folio_put(eb->folios[i]); + for (int i = 0; i < num_extent_pages(eb); i++) { + struct folio *folio = eb->folios[i]; + + if (i < attached) { + ASSERT(folio); + detach_extent_buffer_folio(eb, folio); + folio_unlock(folio); + } else if (!folio) { + continue; + } + + folio_put(folio); eb->folios[i] = NULL; } - /* - * Now all pages of that extent buffer is unmapped, set UNMAPPED flag, - * so it can be cleaned up without utilizing folio->mapping. - */ - set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); - btrfs_release_extent_buffer(eb); if (ret < 0) return ERR_PTR(ret); @@ -3428,18 +3440,27 @@ static int release_extent_buffer(struct extent_buffer *eb) WARN_ON(atomic_read(&eb->refs) == 0); if (atomic_dec_and_test(&eb->refs)) { - if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { - struct btrfs_fs_info *fs_info = eb->fs_info; + struct btrfs_fs_info *fs_info = eb->fs_info; - spin_unlock(&eb->refs_lock); + spin_unlock(&eb->refs_lock); - spin_lock(&fs_info->buffer_lock); - radix_tree_delete(&fs_info->buffer_radix, - eb->start >> fs_info->sectorsize_bits); - spin_unlock(&fs_info->buffer_lock); - } else { - spin_unlock(&eb->refs_lock); - } + /* + * We're erasing, theoretically there will be no allocations, so + * just use GFP_ATOMIC. + * + * We use cmpxchg instead of erase because we do not know if + * this eb is actually in the tree or not, we could be cleaning + * up an eb that we allocated but never inserted into the tree. + * Thus use cmpxchg to remove it from the tree if it is there, + * or leave the other entry if this isn't in the tree. + * + * The documentation says that putting a NULL value is the same + * as erase as long as XA_FLAGS_ALLOC is not set, which it isn't + * in this case. + */ + xa_cmpxchg_irq(&fs_info->buffer_tree, + eb->start >> fs_info->sectorsize_bits, eb, NULL, + GFP_ATOMIC); btrfs_leak_debug_del_eb(eb); /* Should be safe to release folios at this point. */ @@ -3540,6 +3561,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans, if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) return; + buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY); percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, fs_info->dirty_metadata_batch); @@ -3588,6 +3610,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb) folio_lock(eb->folios[0]); for (int i = 0; i < num_extent_folios(eb); i++) btrfs_meta_folio_set_dirty(eb->folios[i], eb); + buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY); if (subpage) folio_unlock(eb->folios[0]); percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, @@ -3647,12 +3670,10 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio) btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) uptodate = false; - if (uptodate) { + if (uptodate) set_extent_buffer_uptodate(eb); - } else { + else clear_extent_buffer_uptodate(eb); - set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); - } clear_extent_buffer_reading(eb); free_extent_buffer(eb); @@ -3691,7 +3712,6 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, return 0; } - clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = 0; check_buffer_tree_ref(eb); atomic_inc(&eb->refs); @@ -3737,7 +3757,7 @@ static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, btrfs_warn(eb->fs_info, "access to eb bytenr %llu len %u out of range start %lu len %lu", eb->start, eb->len, start, len); - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN(); return true; } @@ -4273,71 +4293,17 @@ void memmove_extent_buffer(const struct extent_buffer *dst, } } -#define GANG_LOOKUP_SIZE 16 -static struct extent_buffer *get_next_extent_buffer( - const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr) -{ - struct extent_buffer *gang[GANG_LOOKUP_SIZE]; - struct extent_buffer *found = NULL; - u64 folio_start = folio_pos(folio); - u64 cur = folio_start; - - ASSERT(in_range(bytenr, folio_start, PAGE_SIZE)); - lockdep_assert_held(&fs_info->buffer_lock); - - while (cur < folio_start + PAGE_SIZE) { - int ret; - int i; - - ret = radix_tree_gang_lookup(&fs_info->buffer_radix, - (void **)gang, cur >> fs_info->sectorsize_bits, - min_t(unsigned int, GANG_LOOKUP_SIZE, - PAGE_SIZE / fs_info->nodesize)); - if (ret == 0) - goto out; - for (i = 0; i < ret; i++) { - /* Already beyond page end */ - if (gang[i]->start >= folio_start + PAGE_SIZE) - goto out; - /* Found one */ - if (gang[i]->start >= bytenr) { - found = gang[i]; - goto out; - } - } - cur = gang[ret - 1]->start + gang[ret - 1]->len; - } -out: - return found; -} - static int try_release_subpage_extent_buffer(struct folio *folio) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); - u64 cur = folio_pos(folio); - const u64 end = cur + PAGE_SIZE; + struct extent_buffer *eb; + unsigned long start = (folio_pos(folio) >> fs_info->sectorsize_bits); + unsigned long index = start; + unsigned long end = index + (PAGE_SIZE >> fs_info->sectorsize_bits) - 1; int ret; - while (cur < end) { - struct extent_buffer *eb = NULL; - - /* - * Unlike try_release_extent_buffer() which uses folio private - * to grab buffer, for subpage case we rely on radix tree, thus - * we need to ensure radix tree consistency. - * - * We also want an atomic snapshot of the radix tree, thus go - * with spinlock rather than RCU. - */ - spin_lock(&fs_info->buffer_lock); - eb = get_next_extent_buffer(fs_info, folio, cur); - if (!eb) { - /* No more eb in the page range after or at cur */ - spin_unlock(&fs_info->buffer_lock); - break; - } - cur = eb->start + eb->len; - + xa_lock_irq(&fs_info->buffer_tree); + xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) { /* * The same as try_release_extent_buffer(), to ensure the eb * won't disappear out from under us. @@ -4345,10 +4311,9 @@ static int try_release_subpage_extent_buffer(struct folio *folio) spin_lock(&eb->refs_lock); if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { spin_unlock(&eb->refs_lock); - spin_unlock(&fs_info->buffer_lock); - break; + continue; } - spin_unlock(&fs_info->buffer_lock); + xa_unlock_irq(&fs_info->buffer_tree); /* * If tree ref isn't set then we know the ref on this eb is a @@ -4366,7 +4331,10 @@ static int try_release_subpage_extent_buffer(struct folio *folio) * release_extent_buffer() will release the refs_lock. */ release_extent_buffer(eb); + xa_lock_irq(&fs_info->buffer_tree); } + xa_unlock_irq(&fs_info->buffer_tree); + /* * Finally to check if we have cleared folio private, as if we have * released all ebs in the page, the folio private should be cleared now. diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f5b28b5c4908..e36e8d6a00bc 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -38,16 +38,10 @@ struct btrfs_tree_parent_check; enum { EXTENT_BUFFER_UPTODATE, EXTENT_BUFFER_DIRTY, - EXTENT_BUFFER_CORRUPT, - /* this got triggered by readahead */ - EXTENT_BUFFER_READAHEAD, EXTENT_BUFFER_TREE_REF, EXTENT_BUFFER_STALE, EXTENT_BUFFER_WRITEBACK, - /* read IO error */ - EXTENT_BUFFER_READ_ERR, EXTENT_BUFFER_UNMAPPED, - EXTENT_BUFFER_IN_TREE, /* write IO error */ EXTENT_BUFFER_WRITE_ERR, /* Indicate the extent buffer is written zeroed out (for zoned) */ @@ -79,7 +73,7 @@ enum { * single word in a bitmap may straddle two pages in the extent buffer. */ #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) -#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) +#define BYTE_MASK ((1U << BITS_PER_BYTE) - 1) #define BITMAP_FIRST_BYTE_MASK(start) \ ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) #define BITMAP_LAST_BYTE_MASK(nbits) \ @@ -246,6 +240,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc); int btree_write_cache_pages(struct address_space *mapping, struct writeback_control *wbc); +void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, u64 end); void btrfs_readahead(struct readahead_control *rac); int set_folio_extent_mapped(struct folio *folio); void clear_folio_extent_mapped(struct folio *folio); diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 7f46abbd6311..02bfdb976e40 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -13,7 +13,7 @@ static struct kmem_cache *extent_map_cache; -int __init extent_map_init(void) +int __init btrfs_extent_map_init(void) { extent_map_cache = kmem_cache_create("btrfs_extent_map", sizeof(struct extent_map), 0, 0, NULL); @@ -22,7 +22,7 @@ int __init extent_map_init(void) return 0; } -void __cold extent_map_exit(void) +void __cold btrfs_extent_map_exit(void) { kmem_cache_destroy(extent_map_cache); } @@ -31,7 +31,7 @@ void __cold extent_map_exit(void) * Initialize the extent tree @tree. Should be called for each new inode or * other user of the extent_map interface. */ -void extent_map_tree_init(struct extent_map_tree *tree) +void btrfs_extent_map_tree_init(struct extent_map_tree *tree) { tree->root = RB_ROOT; INIT_LIST_HEAD(&tree->modified_extents); @@ -42,7 +42,7 @@ void extent_map_tree_init(struct extent_map_tree *tree) * Allocate a new extent_map structure. The new structure is returned with a * reference count of one and needs to be freed using free_extent_map() */ -struct extent_map *alloc_extent_map(void) +struct extent_map *btrfs_alloc_extent_map(void) { struct extent_map *em; em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); @@ -58,12 +58,12 @@ struct extent_map *alloc_extent_map(void) * Drop the reference out on @em by one and free the structure if the reference * count hits zero. */ -void free_extent_map(struct extent_map *em) +void btrfs_free_extent_map(struct extent_map *em) { if (!em) return; if (refcount_dec_and_test(&em->refs)) { - WARN_ON(extent_map_in_tree(em)); + WARN_ON(btrfs_extent_map_in_tree(em)); WARN_ON(!list_empty(&em->list)); kmem_cache_free(extent_map_cache, em); } @@ -102,19 +102,19 @@ static int tree_insert(struct rb_root *root, struct extent_map *em) if (em->start < entry->start) p = &(*p)->rb_left; - else if (em->start >= extent_map_end(entry)) + else if (em->start >= btrfs_extent_map_end(entry)) p = &(*p)->rb_right; else return -EEXIST; } orig_parent = parent; - while (parent && em->start >= extent_map_end(entry)) { + while (parent && em->start >= btrfs_extent_map_end(entry)) { parent = rb_next(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) - if (end > entry->start && em->start < extent_map_end(entry)) + if (end > entry->start && em->start < btrfs_extent_map_end(entry)) return -EEXIST; parent = orig_parent; @@ -124,7 +124,7 @@ static int tree_insert(struct rb_root *root, struct extent_map *em) entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) - if (end > entry->start && em->start < extent_map_end(entry)) + if (end > entry->start && em->start < btrfs_extent_map_end(entry)) return -EEXIST; rb_link_node(&em->rb_node, orig_parent, p); @@ -136,8 +136,8 @@ static int tree_insert(struct rb_root *root, struct extent_map *em) * Search through the tree for an extent_map with a given offset. If it can't * be found, try to find some neighboring extents */ -static struct rb_node *__tree_search(struct rb_root *root, u64 offset, - struct rb_node **prev_or_next_ret) +static struct rb_node *tree_search(struct rb_root *root, u64 offset, + struct rb_node **prev_or_next_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; @@ -154,14 +154,14 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, if (offset < entry->start) n = n->rb_left; - else if (offset >= extent_map_end(entry)) + else if (offset >= btrfs_extent_map_end(entry)) n = n->rb_right; else return n; } orig_prev = prev; - while (prev && offset >= extent_map_end(prev_entry)) { + while (prev && offset >= btrfs_extent_map_end(prev_entry)) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } @@ -188,14 +188,14 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, static inline u64 extent_map_block_len(const struct extent_map *em) { - if (extent_map_is_compressed(em)) + if (btrfs_extent_map_is_compressed(em)) return em->disk_num_bytes; return em->len; } static inline u64 extent_map_block_end(const struct extent_map *em) { - const u64 block_start = extent_map_block_start(em); + const u64 block_start = btrfs_extent_map_block_start(em); const u64 block_end = block_start + extent_map_block_len(em); if (block_end < block_start) @@ -210,7 +210,7 @@ static bool can_merge_extent_map(const struct extent_map *em) return false; /* Don't merge compressed extents, we need to know their actual size. */ - if (extent_map_is_compressed(em)) + if (btrfs_extent_map_is_compressed(em)) return false; if (em->flags & EXTENT_FLAG_LOGGING) @@ -230,7 +230,7 @@ static bool can_merge_extent_map(const struct extent_map *em) /* Check to see if two extent_map structs are adjacent and safe to merge. */ static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next) { - if (extent_map_end(prev) != next->start) + if (btrfs_extent_map_end(prev) != next->start) return false; /* @@ -242,7 +242,7 @@ static bool mergeable_maps(const struct extent_map *prev, const struct extent_ma return false; if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1) - return extent_map_block_start(next) == extent_map_block_end(prev); + return btrfs_extent_map_block_start(next) == extent_map_block_end(prev); /* HOLES and INLINE extents. */ return next->disk_bytenr == prev->disk_bytenr; @@ -270,8 +270,8 @@ static void merge_ondisk_extents(const struct extent_map *prev, const struct ext u64 new_offset; /* @prev and @next should not be compressed. */ - ASSERT(!extent_map_is_compressed(prev)); - ASSERT(!extent_map_is_compressed(next)); + ASSERT(!btrfs_extent_map_is_compressed(prev)); + ASSERT(!btrfs_extent_map_is_compressed(next)); /* * There are two different cases where @prev and @next can be merged. @@ -327,9 +327,9 @@ static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map if (em->offset + em->len > em->ram_bytes) dump_extent_map(fs_info, "ram_bytes too small", em); if (em->offset + em->len > em->disk_num_bytes && - !extent_map_is_compressed(em)) + !btrfs_extent_map_is_compressed(em)) dump_extent_map(fs_info, "disk_num_bytes too small", em); - if (!extent_map_is_compressed(em) && + if (!btrfs_extent_map_is_compressed(em) && em->ram_bytes != em->disk_num_bytes) dump_extent_map(fs_info, "ram_bytes mismatch with disk_num_bytes for non-compressed em", @@ -361,8 +361,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em) if (em->start != 0) { rb = rb_prev(&em->rb_node); - if (rb) - merge = rb_entry(rb, struct extent_map, rb_node); + merge = rb_entry_safe(rb, struct extent_map, rb_node); + if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) { em->start = merge->start; em->len += merge->len; @@ -374,13 +374,13 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em) validate_extent_map(fs_info, em); remove_em(inode, merge); - free_extent_map(merge); + btrfs_free_extent_map(merge); } } rb = rb_next(&em->rb_node); - if (rb) - merge = rb_entry(rb, struct extent_map, rb_node); + merge = rb_entry_safe(rb, struct extent_map, rb_node); + if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) { em->len += merge->len; if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) @@ -389,7 +389,7 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em) em->generation = max(em->generation, merge->generation); em->flags |= EXTENT_FLAG_MERGED; remove_em(inode, merge); - free_extent_map(merge); + btrfs_free_extent_map(merge); } } @@ -409,7 +409,7 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em) * -ENOENT when the extent is not found in the tree * -EUCLEAN if the found extent does not match the expected start */ -int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) +int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *tree = &inode->extent_tree; @@ -417,7 +417,7 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) struct extent_map *em; write_lock(&tree->lock); - em = lookup_extent_mapping(tree, start, len); + em = btrfs_lookup_extent_mapping(tree, start, len); if (WARN_ON(!em)) { btrfs_warn(fs_info, @@ -444,17 +444,17 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) out: write_unlock(&tree->lock); - free_extent_map(em); + btrfs_free_extent_map(em); return ret; } -void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em) +void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em) { lockdep_assert_held_write(&inode->extent_tree.lock); em->flags &= ~EXTENT_FLAG_LOGGING; - if (extent_map_in_tree(em)) + if (btrfs_extent_map_in_tree(em)) try_merge_map(inode, em); } @@ -508,16 +508,15 @@ static int add_extent_mapping(struct btrfs_inode *inode, return 0; } -static struct extent_map * -__lookup_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len, int strict) +static struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len, int strict) { struct extent_map *em; struct rb_node *rb_node; struct rb_node *prev_or_next = NULL; u64 end = range_end(start, len); - rb_node = __tree_search(&tree->root, start, &prev_or_next); + rb_node = tree_search(&tree->root, start, &prev_or_next); if (!rb_node) { if (prev_or_next) rb_node = prev_or_next; @@ -527,7 +526,7 @@ __lookup_extent_mapping(struct extent_map_tree *tree, em = rb_entry(rb_node, struct extent_map, rb_node); - if (strict && !(end > em->start && start < extent_map_end(em))) + if (strict && !(end > em->start && start < btrfs_extent_map_end(em))) return NULL; refcount_inc(&em->refs); @@ -546,10 +545,10 @@ __lookup_extent_mapping(struct extent_map_tree *tree, * intersect, so check the object returned carefully to make sure that no * additional lookups are needed. */ -struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len) +struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len) { - return __lookup_extent_mapping(tree, start, len, 1); + return lookup_extent_mapping(tree, start, len, 1); } /* @@ -564,10 +563,10 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, * * If one can't be found, any nearby extent may be returned */ -struct extent_map *search_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len) +struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len) { - return __lookup_extent_mapping(tree, start, len, 0); + return lookup_extent_mapping(tree, start, len, 0); } /* @@ -579,7 +578,7 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree, * Remove @em from the extent tree of @inode. No reference counts are dropped, * and no checks are done to see if the range is in use. */ -void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em) +void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em) { struct extent_map_tree *tree = &inode->extent_tree; @@ -605,7 +604,7 @@ static void replace_extent_mapping(struct btrfs_inode *inode, validate_extent_map(fs_info, new); WARN_ON(cur->flags & EXTENT_FLAG_PINNED); - ASSERT(extent_map_in_tree(cur)); + ASSERT(btrfs_extent_map_in_tree(cur)); if (!(cur->flags & EXTENT_FLAG_LOGGING)) list_del_init(&cur->list); rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root); @@ -651,7 +650,7 @@ static noinline int merge_extent_mapping(struct btrfs_inode *inode, u64 end; u64 start_diff; - if (map_start < em->start || map_start >= extent_map_end(em)) + if (map_start < em->start || map_start >= btrfs_extent_map_end(em)) return -EINVAL; if (existing->start > map_start) { @@ -662,10 +661,10 @@ static noinline int merge_extent_mapping(struct btrfs_inode *inode, next = next_extent_map(prev); } - start = prev ? extent_map_end(prev) : em->start; + start = prev ? btrfs_extent_map_end(prev) : em->start; start = max_t(u64, start, em->start); - end = next ? next->start : extent_map_end(em); - end = min_t(u64, end, extent_map_end(em)); + end = next ? next->start : btrfs_extent_map_end(em); + end = min_t(u64, end, btrfs_extent_map_end(em)); start_diff = start - em->start; em->start = start; em->len = end - start; @@ -716,7 +715,7 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode, if (ret == -EEXIST) { struct extent_map *existing; - existing = search_extent_mapping(&inode->extent_tree, start, len); + existing = btrfs_search_extent_mapping(&inode->extent_tree, start, len); trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); @@ -725,8 +724,8 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode, * extent causing the -EEXIST. */ if (start >= existing->start && - start < extent_map_end(existing)) { - free_extent_map(em); + start < btrfs_extent_map_end(existing)) { + btrfs_free_extent_map(em); *em_in = existing; ret = 0; } else { @@ -739,14 +738,14 @@ int btrfs_add_extent_mapping(struct btrfs_inode *inode, */ ret = merge_extent_mapping(inode, existing, em, start); if (WARN_ON(ret)) { - free_extent_map(em); + btrfs_free_extent_map(em); *em_in = NULL; btrfs_warn(fs_info, "extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu", - existing->start, extent_map_end(existing), + existing->start, btrfs_extent_map_end(existing), orig_start, orig_start + orig_len, start); } - free_extent_map(existing); + btrfs_free_extent_map(existing); } } @@ -772,8 +771,8 @@ static void drop_all_extent_maps_fast(struct btrfs_inode *inode) em = rb_entry(node, struct extent_map, rb_node); em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); - remove_extent_mapping(inode, em); - free_extent_map(em); + btrfs_remove_extent_mapping(inode, em); + btrfs_free_extent_map(em); if (cond_resched_rwlock_write(&tree->lock)) node = rb_first(&tree->root); @@ -826,15 +825,15 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, * range ends after our range (and they might be the same extent map), * because we need to split those two extent maps at the boundaries. */ - split = alloc_extent_map(); - split2 = alloc_extent_map(); + split = btrfs_alloc_extent_map(); + split2 = btrfs_alloc_extent_map(); write_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, start, len); + em = btrfs_lookup_extent_mapping(em_tree, start, len); while (em) { /* extent_map_end() returns exclusive value (last byte + 1). */ - const u64 em_end = extent_map_end(em); + const u64 em_end = btrfs_extent_map_end(em); struct extent_map *next_em = NULL; u64 gen; unsigned long flags; @@ -898,7 +897,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, split->generation = gen; split->flags = flags; replace_extent_mapping(inode, em, split, modified); - free_extent_map(split); + btrfs_free_extent_map(split); split = split2; split2 = NULL; } @@ -925,7 +924,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, split->ram_bytes = split->len; } - if (extent_map_in_tree(em)) { + if (btrfs_extent_map_in_tree(em)) { replace_extent_mapping(inode, em, split, modified); } else { int ret; @@ -936,11 +935,11 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, if (WARN_ON(ret != 0) && modified) btrfs_set_inode_full_sync(inode); } - free_extent_map(split); + btrfs_free_extent_map(split); split = NULL; } remove_em: - if (extent_map_in_tree(em)) { + if (btrfs_extent_map_in_tree(em)) { /* * If the extent map is still in the tree it means that * either of the following is true: @@ -965,25 +964,25 @@ remove_em: ASSERT(!split); btrfs_set_inode_full_sync(inode); } - remove_extent_mapping(inode, em); + btrfs_remove_extent_mapping(inode, em); } /* * Once for the tree reference (we replaced or removed the * extent map from the tree). */ - free_extent_map(em); + btrfs_free_extent_map(em); next: /* Once for us (for our lookup reference). */ - free_extent_map(em); + btrfs_free_extent_map(em); em = next_em; } write_unlock(&em_tree->lock); - free_extent_map(split); - free_extent_map(split2); + btrfs_free_extent_map(split); + btrfs_free_extent_map(split2); } /* @@ -1007,7 +1006,7 @@ int btrfs_replace_extent_map_range(struct btrfs_inode *inode, struct extent_map_tree *tree = &inode->extent_tree; int ret; - ASSERT(!extent_map_in_tree(new_em)); + ASSERT(!btrfs_extent_map_in_tree(new_em)); /* * The caller has locked an appropriate file range in the inode's io @@ -1033,8 +1032,8 @@ int btrfs_replace_extent_map_range(struct btrfs_inode *inode, * * This function is used when an ordered_extent needs to be split. */ -int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, - u64 new_logical) +int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, + u64 new_logical) { struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; @@ -1046,25 +1045,25 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, ASSERT(pre != 0); ASSERT(pre < len); - split_pre = alloc_extent_map(); + split_pre = btrfs_alloc_extent_map(); if (!split_pre) return -ENOMEM; - split_mid = alloc_extent_map(); + split_mid = btrfs_alloc_extent_map(); if (!split_mid) { ret = -ENOMEM; goto out_free_pre; } - lock_extent(&inode->io_tree, start, start + len - 1, NULL); + btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL); write_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, start, len); + em = btrfs_lookup_extent_mapping(em_tree, start, len); if (!em) { ret = -EIO; goto out_unlock; } ASSERT(em->len == len); - ASSERT(!extent_map_is_compressed(em)); + ASSERT(!btrfs_extent_map_is_compressed(em)); ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE); ASSERT(em->flags & EXTENT_FLAG_PINNED); ASSERT(!(em->flags & EXTENT_FLAG_LOGGING)); @@ -1093,7 +1092,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, /* Insert the middle extent_map. */ split_mid->start = em->start + pre; split_mid->len = em->len - pre; - split_mid->disk_bytenr = extent_map_block_start(em) + pre; + split_mid->disk_bytenr = btrfs_extent_map_block_start(em) + pre; split_mid->disk_num_bytes = split_mid->len; split_mid->offset = 0; split_mid->ram_bytes = split_mid->len; @@ -1102,16 +1101,16 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, add_extent_mapping(inode, split_mid, 1); /* Once for us */ - free_extent_map(em); + btrfs_free_extent_map(em); /* Once for the tree */ - free_extent_map(em); + btrfs_free_extent_map(em); out_unlock: write_unlock(&em_tree->lock); - unlock_extent(&inode->io_tree, start, start + len - 1, NULL); - free_extent_map(split_mid); + btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL); + btrfs_free_extent_map(split_mid); out_free_pre: - free_extent_map(split_pre); + btrfs_free_extent_map(split_pre); return ret; } @@ -1168,10 +1167,10 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c if (!list_empty(&em->list) && em->generation >= cur_fs_gen) btrfs_set_inode_full_sync(inode); - remove_extent_mapping(inode, em); + btrfs_remove_extent_mapping(inode, em); trace_btrfs_extent_map_shrinker_remove_em(inode, em); /* Drop the reference for the tree. */ - free_extent_map(em); + btrfs_free_extent_map(em); nr_dropped++; next: if (ctx->scanned >= ctx->nr_to_scan) diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index cd123b266b64..d4b81ee4d97b 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -108,8 +108,8 @@ struct extent_map_tree { struct btrfs_inode; -static inline void extent_map_set_compression(struct extent_map *em, - enum btrfs_compression_type type) +static inline void btrfs_extent_map_set_compression(struct extent_map *em, + enum btrfs_compression_type type) { if (type == BTRFS_COMPRESS_ZLIB) em->flags |= EXTENT_FLAG_COMPRESS_ZLIB; @@ -119,7 +119,8 @@ static inline void extent_map_set_compression(struct extent_map *em, em->flags |= EXTENT_FLAG_COMPRESS_ZSTD; } -static inline enum btrfs_compression_type extent_map_compression(const struct extent_map *em) +static inline enum btrfs_compression_type btrfs_extent_map_compression( + const struct extent_map *em) { if (em->flags & EXTENT_FLAG_COMPRESS_ZLIB) return BTRFS_COMPRESS_ZLIB; @@ -137,50 +138,50 @@ static inline enum btrfs_compression_type extent_map_compression(const struct ex * More efficient way to determine if extent is compressed, instead of using * 'extent_map_compression() != BTRFS_COMPRESS_NONE'. */ -static inline bool extent_map_is_compressed(const struct extent_map *em) +static inline bool btrfs_extent_map_is_compressed(const struct extent_map *em) { return (em->flags & (EXTENT_FLAG_COMPRESS_ZLIB | EXTENT_FLAG_COMPRESS_LZO | EXTENT_FLAG_COMPRESS_ZSTD)) != 0; } -static inline int extent_map_in_tree(const struct extent_map *em) +static inline int btrfs_extent_map_in_tree(const struct extent_map *em) { return !RB_EMPTY_NODE(&em->rb_node); } -static inline u64 extent_map_block_start(const struct extent_map *em) +static inline u64 btrfs_extent_map_block_start(const struct extent_map *em) { if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { - if (extent_map_is_compressed(em)) + if (btrfs_extent_map_is_compressed(em)) return em->disk_bytenr; return em->disk_bytenr + em->offset; } return em->disk_bytenr; } -static inline u64 extent_map_end(const struct extent_map *em) +static inline u64 btrfs_extent_map_end(const struct extent_map *em) { if (em->start + em->len < em->start) return (u64)-1; return em->start + em->len; } -void extent_map_tree_init(struct extent_map_tree *tree); -struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len); -void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em); -int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, - u64 new_logical); - -struct extent_map *alloc_extent_map(void); -void free_extent_map(struct extent_map *em); -int __init extent_map_init(void); -void __cold extent_map_exit(void); -int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen); -void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em); -struct extent_map *search_extent_mapping(struct extent_map_tree *tree, - u64 start, u64 len); +void btrfs_extent_map_tree_init(struct extent_map_tree *tree); +struct extent_map *btrfs_lookup_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len); +void btrfs_remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em); +int btrfs_split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, + u64 new_logical); + +struct extent_map *btrfs_alloc_extent_map(void); +void btrfs_free_extent_map(struct extent_map *em); +int __init btrfs_extent_map_init(void); +void __cold btrfs_extent_map_exit(void); +int btrfs_unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen); +void btrfs_clear_em_logging(struct btrfs_inode *inode, struct extent_map *em); +struct extent_map *btrfs_search_extent_mapping(struct extent_map_tree *tree, + u64 start, u64 len); int btrfs_add_extent_mapping(struct btrfs_inode *inode, struct extent_map **em_in, u64 start, u64 len); void btrfs_drop_extent_map_range(struct btrfs_inode *inode, diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c index b80c07ad8c5e..43bf0979fd53 100644 --- a/fs/btrfs/fiemap.c +++ b/fs/btrfs/fiemap.c @@ -634,7 +634,7 @@ static int extent_fiemap(struct btrfs_inode *inode, const u64 ino = btrfs_ino(inode); struct extent_state *cached_state = NULL; struct extent_state *delalloc_cached_state = NULL; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct fiemap_cache cache = { 0 }; struct btrfs_backref_share_check_ctx *backref_ctx; u64 last_extent_end = 0; @@ -661,7 +661,7 @@ restart: range_end = round_up(start + len, sectorsize); prev_extent_end = range_start; - lock_extent(&inode->io_tree, range_start, range_end, &cached_state); + btrfs_lock_extent(&inode->io_tree, range_start, range_end, &cached_state); ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end); if (ret < 0) @@ -841,7 +841,7 @@ check_eof_delalloc: } out_unlock: - unlock_extent(&inode->io_tree, range_start, range_end, &cached_state); + btrfs_unlock_extent(&inode->io_tree, range_start, range_end, &cached_state); if (ret == BTRFS_FIEMAP_FLUSH_CACHE) { btrfs_release_path(path); @@ -871,10 +871,9 @@ out_unlock: ret = emit_last_fiemap_cache(fieinfo, &cache); out: - free_extent_state(delalloc_cached_state); + btrfs_free_extent_state(delalloc_cached_state); kfree(cache.entries); btrfs_free_backref_share_ctx(backref_ctx); - btrfs_free_path(path); return ret; } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 344b4db487a0..54d523d4f421 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -46,7 +46,7 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size) { u64 start, end, i_size; - int ret; + bool found; spin_lock(&inode->lock); i_size = new_i_size ?: i_size_read(&inode->vfs_inode); @@ -55,9 +55,9 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz goto out_unlock; } - ret = find_contiguous_extent_bit(inode->file_extent_tree, 0, &start, - &end, EXTENT_DIRTY); - if (!ret && start == 0) + found = btrfs_find_contiguous_extent_bit(inode->file_extent_tree, 0, &start, + &end, EXTENT_DIRTY); + if (found && start == 0) i_size = min(i_size, end + 1); else i_size = 0; @@ -91,8 +91,8 @@ int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize)); - return set_extent_bit(inode->file_extent_tree, start, start + len - 1, - EXTENT_DIRTY, NULL); + return btrfs_set_extent_bit(inode->file_extent_tree, start, start + len - 1, + EXTENT_DIRTY, NULL); } /* @@ -121,8 +121,8 @@ int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) || len == (u64)-1); - return clear_extent_bit(inode->file_extent_tree, start, - start + len - 1, EXTENT_DIRTY, NULL); + return btrfs_clear_extent_bit(inode->file_extent_tree, start, + start + len - 1, EXTENT_DIRTY, NULL); } static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes) @@ -336,7 +336,7 @@ out: * * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. */ -blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) +int btrfs_lookup_bio_sums(struct btrfs_bio *bbio) { struct btrfs_inode *inode = bbio->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; @@ -347,12 +347,12 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) u32 orig_len = bio->bi_iter.bi_size; u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits; - blk_status_t ret = BLK_STS_OK; + int ret = 0; u32 bio_offset = 0; if ((inode->flags & BTRFS_INODE_NODATASUM) || test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state)) - return BLK_STS_OK; + return 0; /* * This function is only called for read bio. @@ -369,12 +369,12 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) ASSERT(bio_op(bio) == REQ_OP_READ); path = btrfs_alloc_path(); if (!path) - return BLK_STS_RESOURCE; + return -ENOMEM; if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS); if (!bbio->csum) - return BLK_STS_RESOURCE; + return -ENOMEM; } else { bbio->csum = bbio->csum_inline; } @@ -406,7 +406,7 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) count = search_csum_tree(fs_info, path, cur_disk_bytenr, orig_len - bio_offset, csum_dst); if (count < 0) { - ret = errno_to_blk_status(count); + ret = count; if (bbio->csum != bbio->csum_inline) kfree(bbio->csum); bbio->csum = NULL; @@ -430,9 +430,9 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) if (btrfs_root_id(inode->root) == BTRFS_DATA_RELOC_TREE_OBJECTID) { u64 file_offset = bbio->file_offset + bio_offset; - set_extent_bit(&inode->io_tree, file_offset, - file_offset + sectorsize - 1, - EXTENT_NODATASUM, NULL); + btrfs_set_extent_bit(&inode->io_tree, file_offset, + file_offset + sectorsize - 1, + EXTENT_NODATASUM, NULL); } else { btrfs_warn_rl(fs_info, "csum hole found for disk bytenr range [%llu, %llu)", @@ -735,7 +735,7 @@ fail: /* * Calculate checksums of the data contained inside a bio. */ -blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) +int btrfs_csum_one_bio(struct btrfs_bio *bbio) { struct btrfs_ordered_extent *ordered = bbio->ordered; struct btrfs_inode *inode = bbio->inode; @@ -757,7 +757,7 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) memalloc_nofs_restore(nofs_flag); if (!sums) - return BLK_STS_RESOURCE; + return -ENOMEM; sums->len = bio->bi_iter.bi_size; INIT_LIST_HEAD(&sums->list); @@ -794,11 +794,11 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) * record the updated logical address on Zone Append completion. * Allocate just the structure with an empty sums array here for that case. */ -blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio) +int btrfs_alloc_dummy_sum(struct btrfs_bio *bbio) { bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS); if (!bbio->sums) - return BLK_STS_RESOURCE; + return -ENOMEM; bbio->sums->len = bbio->bio.bi_iter.bi_size; bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; btrfs_add_ordered_sum(bbio->ordered, bbio->sums); @@ -1048,7 +1048,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key file_key; struct btrfs_key found_key; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_csum_item *item; struct btrfs_csum_item *item_end; struct extent_buffer *leaf = NULL; @@ -1259,7 +1259,6 @@ found: goto again; } out: - btrfs_free_path(path); return ret; } @@ -1297,7 +1296,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, em->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); em->offset = btrfs_file_extent_offset(leaf, fi); if (compress_type != BTRFS_COMPRESS_NONE) { - extent_map_set_compression(em, compress_type); + btrfs_extent_map_set_compression(em, compress_type); } else { /* * Older kernels can create regular non-hole data @@ -1317,7 +1316,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, em->start = 0; em->len = fs_info->sectorsize; em->offset = 0; - extent_map_set_compression(em, compress_type); + btrfs_extent_map_set_compression(em, compress_type); } else { btrfs_err(fs_info, "unknown file extent item type %d, inode %llu, offset %llu, " diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h index 6181a70ec3ef..63216c43676d 100644 --- a/fs/btrfs/file-item.h +++ b/fs/btrfs/file-item.h @@ -53,7 +53,7 @@ static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) int btrfs_del_csums(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 len); -blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio); +int btrfs_lookup_bio_sums(struct btrfs_bio *bbio); int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, u64 num_bytes); @@ -64,8 +64,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_ordered_sum *sums); -blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio); -blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio); +int btrfs_csum_one_bio(struct btrfs_bio *bbio); +int btrfs_alloc_dummy_sum(struct btrfs_bio *bbio); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit, bool nowait); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 71b8a825c447..8ce6f45f45e0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -98,9 +98,9 @@ int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos * The pages may have already been dirty, clear out old accounting so * we can set things up properly */ - clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block, - EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, - cached); + btrfs_clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block, + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, + cached); ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, extra_bits, cached); @@ -508,20 +508,19 @@ out: return ret; } -static int extent_mergeable(struct extent_buffer *leaf, int slot, - u64 objectid, u64 bytenr, u64 orig_offset, - u64 *start, u64 *end) +static bool extent_mergeable(struct extent_buffer *leaf, int slot, u64 objectid, + u64 bytenr, u64 orig_offset, u64 *start, u64 *end) { struct btrfs_file_extent_item *fi; struct btrfs_key key; u64 extent_end; if (slot < 0 || slot >= btrfs_header_nritems(leaf)) - return 0; + return false; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) - return 0; + return false; fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || @@ -530,15 +529,15 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot, btrfs_file_extent_compression(leaf, fi) || btrfs_file_extent_encryption(leaf, fi) || btrfs_file_extent_other_encoding(leaf, fi)) - return 0; + return false; extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); if ((*start && *start != key.offset) || (*end && *end != extent_end)) - return 0; + return false; *start = key.offset; *end = extent_end; - return 1; + return true; } /* @@ -553,7 +552,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, { struct btrfs_root *root = inode->root; struct extent_buffer *leaf; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_file_extent_item *fi; struct btrfs_ref ref = { 0 }; struct btrfs_key key; @@ -791,7 +790,6 @@ again: } } out: - btrfs_free_path(path); return ret; } @@ -800,7 +798,7 @@ out: * On success return a locked folio and 0 */ static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 pos, - u64 len, bool force_uptodate) + u64 len) { u64 clamp_start = max_t(u64, pos, folio_pos(folio)); u64 clamp_end = min_t(u64, pos + len, folio_pos(folio) + folio_size(folio)); @@ -810,8 +808,7 @@ static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 if (folio_test_uptodate(folio)) return 0; - if (!force_uptodate && - IS_ALIGNED(clamp_start, blocksize) && + if (IS_ALIGNED(clamp_start, blocksize) && IS_ALIGNED(clamp_end, blocksize)) return 0; @@ -858,32 +855,27 @@ static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait) */ static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ret, loff_t pos, size_t write_bytes, - bool force_uptodate, bool nowait) + bool nowait) { unsigned long index = pos >> PAGE_SHIFT; gfp_t mask = get_prepare_gfp_flags(inode, nowait); - fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN); + fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN) | + fgf_set_order(write_bytes); struct folio *folio; int ret = 0; again: folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags, mask); - if (IS_ERR(folio)) { - if (nowait) - ret = -EAGAIN; - else - ret = PTR_ERR(folio); - return ret; - } - /* Only support page sized folio yet. */ - ASSERT(folio_order(folio) == 0); + if (IS_ERR(folio)) + return PTR_ERR(folio); + ret = set_folio_extent_mapped(folio); if (ret < 0) { folio_unlock(folio); folio_put(folio); return ret; } - ret = prepare_uptodate_folio(inode, folio, pos, write_bytes, force_uptodate); + ret = prepare_uptodate_folio(inode, folio, pos, write_bytes); if (ret) { /* The folio is already unlocked. */ folio_put(folio); @@ -924,14 +916,15 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio, struct btrfs_ordered_extent *ordered; if (nowait) { - if (!try_lock_extent(&inode->io_tree, start_pos, last_pos, - cached_state)) { + if (!btrfs_try_lock_extent(&inode->io_tree, start_pos, + last_pos, cached_state)) { folio_unlock(folio); folio_put(folio); return -EAGAIN; } } else { - lock_extent(&inode->io_tree, start_pos, last_pos, cached_state); + btrfs_lock_extent(&inode->io_tree, start_pos, last_pos, + cached_state); } ordered = btrfs_lookup_ordered_range(inode, start_pos, @@ -939,8 +932,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio, if (ordered && ordered->file_offset + ordered->num_bytes > start_pos && ordered->file_offset <= last_pos) { - unlock_extent(&inode->io_tree, start_pos, last_pos, - cached_state); + btrfs_unlock_extent(&inode->io_tree, start_pos, last_pos, + cached_state); folio_unlock(folio); folio_put(folio); btrfs_start_ordered_extent(ordered); @@ -1020,7 +1013,7 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, else *write_bytes = min_t(size_t, *write_bytes , num_bytes - pos + lockstart); - unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); + btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); return ret; } @@ -1077,241 +1070,306 @@ int btrfs_write_check(struct kiocb *iocb, size_t count) return 0; } -ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i) +static void release_space(struct btrfs_inode *inode, struct extent_changeset *data_reserved, + u64 start, u64 len, bool only_release_metadata) { - struct file *file = iocb->ki_filp; - loff_t pos; - struct inode *inode = file_inode(file); - struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); - struct extent_changeset *data_reserved = NULL; - u64 release_bytes = 0; - u64 lockstart; - u64 lockend; - size_t num_written = 0; - ssize_t ret; - loff_t old_isize; - unsigned int ilock_flags = 0; - const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); - unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0); - bool only_release_metadata = false; - - if (nowait) - ilock_flags |= BTRFS_ILOCK_TRY; + if (len == 0) + return; - ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags); - if (ret < 0) - return ret; + if (only_release_metadata) { + btrfs_check_nocow_unlock(inode); + btrfs_delalloc_release_metadata(inode, len, true); + } else { + const struct btrfs_fs_info *fs_info = inode->root->fs_info; - /* - * We can only trust the isize with inode lock held, or it can race with - * other buffered writes and cause incorrect call of - * pagecache_isize_extended() to overwrite existing data. - */ - old_isize = i_size_read(inode); + btrfs_delalloc_release_space(inode, data_reserved, + round_down(start, fs_info->sectorsize), + len, true); + } +} - ret = generic_write_checks(iocb, i); - if (ret <= 0) - goto out; +/* + * Reserve data and metadata space for this buffered write range. + * + * Return >0 for the number of bytes reserved, which is always block aligned. + * Return <0 for error. + */ +static ssize_t reserve_space(struct btrfs_inode *inode, + struct extent_changeset **data_reserved, + u64 start, size_t *len, bool nowait, + bool *only_release_metadata) +{ + const struct btrfs_fs_info *fs_info = inode->root->fs_info; + const unsigned int block_offset = (start & (fs_info->sectorsize - 1)); + size_t reserve_bytes; + int ret; - ret = btrfs_write_check(iocb, ret); - if (ret < 0) - goto out; + ret = btrfs_check_data_free_space(inode, data_reserved, start, *len, nowait); + if (ret < 0) { + int can_nocow; - pos = iocb->ki_pos; - while (iov_iter_count(i) > 0) { - struct extent_state *cached_state = NULL; - size_t offset = offset_in_page(pos); - size_t sector_offset; - size_t write_bytes = min(iov_iter_count(i), PAGE_SIZE - offset); - size_t reserve_bytes; - size_t copied; - size_t dirty_sectors; - size_t num_sectors; - struct folio *folio = NULL; - int extents_locked; - bool force_page_uptodate = false; + if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) + return -EAGAIN; /* - * Fault pages before locking them in prepare_one_folio() - * to avoid recursive lock + * If we don't have to COW at the offset, reserve metadata only. + * write_bytes may get smaller than requested here. */ - if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) { - ret = -EFAULT; - break; - } + can_nocow = btrfs_check_nocow_lock(inode, start, len, nowait); + if (can_nocow < 0) + ret = can_nocow; + if (can_nocow > 0) + ret = 0; + if (ret) + return ret; + *only_release_metadata = true; + } - only_release_metadata = false; - sector_offset = pos & (fs_info->sectorsize - 1); + reserve_bytes = round_up(*len + block_offset, fs_info->sectorsize); + WARN_ON(reserve_bytes == 0); + ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes, + reserve_bytes, nowait); + if (ret) { + if (!*only_release_metadata) + btrfs_free_reserved_data_space(inode, *data_reserved, + start, *len); + else + btrfs_check_nocow_unlock(inode); - extent_changeset_release(data_reserved); - ret = btrfs_check_data_free_space(BTRFS_I(inode), - &data_reserved, pos, - write_bytes, nowait); - if (ret < 0) { - int can_nocow; + if (nowait && ret == -ENOSPC) + ret = -EAGAIN; + return ret; + } + return reserve_bytes; +} - if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) { - ret = -EAGAIN; - break; - } +/* Shrink the reserved data and metadata space from @reserved_len to @new_len. */ +static void shrink_reserved_space(struct btrfs_inode *inode, + struct extent_changeset *data_reserved, + u64 reserved_start, u64 reserved_len, + u64 new_len, bool only_release_metadata) +{ + const u64 diff = reserved_len - new_len; - /* - * If we don't have to COW at the offset, reserve - * metadata only. write_bytes may get smaller than - * requested here. - */ - can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos, - &write_bytes, nowait); - if (can_nocow < 0) - ret = can_nocow; - if (can_nocow > 0) - ret = 0; - if (ret) - break; - only_release_metadata = true; - } + ASSERT(new_len <= reserved_len); + btrfs_delalloc_shrink_extents(inode, reserved_len, new_len); + if (only_release_metadata) + btrfs_delalloc_release_metadata(inode, diff, true); + else + btrfs_delalloc_release_space(inode, data_reserved, + reserved_start + new_len, diff, true); +} - reserve_bytes = round_up(write_bytes + sector_offset, - fs_info->sectorsize); - WARN_ON(reserve_bytes == 0); - ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), - reserve_bytes, - reserve_bytes, nowait); - if (ret) { - if (!only_release_metadata) - btrfs_free_reserved_data_space(BTRFS_I(inode), - data_reserved, pos, - write_bytes); - else - btrfs_check_nocow_unlock(BTRFS_I(inode)); +/* Calculate the maximum amount of bytes we can write into one folio. */ +static size_t calc_write_bytes(const struct btrfs_inode *inode, + const struct iov_iter *iter, u64 start) +{ + const size_t max_folio_size = mapping_max_folio_size(inode->vfs_inode.i_mapping); - if (nowait && ret == -ENOSPC) - ret = -EAGAIN; - break; - } + return min(max_folio_size - (start & (max_folio_size - 1)), + iov_iter_count(iter)); +} + +/* + * Do the heavy-lifting work to copy one range into one folio of the page cache. + * + * Return > 0 in case we copied all bytes or just some of them. + * Return 0 if no bytes were copied, in which case the caller should retry. + * Return <0 on error. + */ +static int copy_one_range(struct btrfs_inode *inode, struct iov_iter *iter, + struct extent_changeset **data_reserved, u64 start, + bool nowait) +{ + struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct extent_state *cached_state = NULL; + size_t write_bytes = calc_write_bytes(inode, iter, start); + size_t copied; + const u64 reserved_start = round_down(start, fs_info->sectorsize); + u64 reserved_len; + struct folio *folio = NULL; + int extents_locked; + u64 lockstart; + u64 lockend; + bool only_release_metadata = false; + const unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0); + int ret; + + /* + * Fault all pages before locking them in prepare_one_folio() to avoid + * recursive lock. + */ + if (unlikely(fault_in_iov_iter_readable(iter, write_bytes))) + return -EFAULT; + extent_changeset_release(*data_reserved); + ret = reserve_space(inode, data_reserved, start, &write_bytes, nowait, + &only_release_metadata); + if (ret < 0) + return ret; + reserved_len = ret; + /* Write range must be inside the reserved range. */ + ASSERT(reserved_start <= start); + ASSERT(start + write_bytes <= reserved_start + reserved_len); - release_bytes = reserve_bytes; again: - ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags); - if (ret) { - btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); - break; - } + ret = balance_dirty_pages_ratelimited_flags(inode->vfs_inode.i_mapping, + bdp_flags); + if (ret) { + btrfs_delalloc_release_extents(inode, reserved_len); + release_space(inode, *data_reserved, reserved_start, reserved_len, + only_release_metadata); + return ret; + } - ret = prepare_one_folio(inode, &folio, pos, write_bytes, - force_page_uptodate, false); - if (ret) { - btrfs_delalloc_release_extents(BTRFS_I(inode), - reserve_bytes); - break; - } + ret = prepare_one_folio(&inode->vfs_inode, &folio, start, write_bytes, false); + if (ret) { + btrfs_delalloc_release_extents(inode, reserved_len); + release_space(inode, *data_reserved, reserved_start, reserved_len, + only_release_metadata); + return ret; + } + + /* + * The reserved range goes beyond the current folio, shrink the reserved + * space to the folio boundary. + */ + if (reserved_start + reserved_len > folio_pos(folio) + folio_size(folio)) { + const u64 last_block = folio_pos(folio) + folio_size(folio); + + shrink_reserved_space(inode, *data_reserved, reserved_start, + reserved_len, last_block - reserved_start, + only_release_metadata); + write_bytes = last_block - start; + reserved_len = last_block - reserved_start; + } + + extents_locked = lock_and_cleanup_extent_if_need(inode, folio, start, + write_bytes, &lockstart, + &lockend, nowait, + &cached_state); + if (extents_locked < 0) { + if (!nowait && extents_locked == -EAGAIN) + goto again; - extents_locked = lock_and_cleanup_extent_if_need(BTRFS_I(inode), - folio, pos, write_bytes, &lockstart, - &lockend, nowait, &cached_state); - if (extents_locked < 0) { - if (!nowait && extents_locked == -EAGAIN) - goto again; + btrfs_delalloc_release_extents(inode, reserved_len); + release_space(inode, *data_reserved, reserved_start, reserved_len, + only_release_metadata); + ret = extents_locked; + return ret; + } - btrfs_delalloc_release_extents(BTRFS_I(inode), - reserve_bytes); - ret = extents_locked; - break; - } + copied = copy_folio_from_iter_atomic(folio, offset_in_folio(folio, start), + write_bytes, iter); + flush_dcache_folio(folio); - copied = copy_folio_from_iter_atomic(folio, - offset_in_folio(folio, pos), write_bytes, i); - flush_dcache_folio(folio); + if (unlikely(copied < write_bytes)) { + u64 last_block; /* - * If we get a partial write, we can end up with partially - * uptodate page. Although if sector size < page size we can - * handle it, but if it's not sector aligned it can cause - * a lot of complexity, so make sure they don't happen by - * forcing retry this copy. + * The original write range doesn't need an uptodate folio as + * the range is block aligned. But now a short copy happened. + * We cannot handle it without an uptodate folio. + * + * So just revert the range and we will retry. */ - if (unlikely(copied < write_bytes)) { - if (!folio_test_uptodate(folio)) { - iov_iter_revert(i, copied); - copied = 0; - } + if (!folio_test_uptodate(folio)) { + iov_iter_revert(iter, copied); + copied = 0; } - num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes); - dirty_sectors = round_up(copied + sector_offset, - fs_info->sectorsize); - dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors); - + /* No copied bytes, unlock, release reserved space and exit. */ if (copied == 0) { - force_page_uptodate = true; - dirty_sectors = 0; - } else { - force_page_uptodate = false; + if (extents_locked) + btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, + &cached_state); + else + btrfs_free_extent_state(cached_state); + btrfs_delalloc_release_extents(inode, reserved_len); + release_space(inode, *data_reserved, reserved_start, reserved_len, + only_release_metadata); + btrfs_drop_folio(fs_info, folio, start, copied); + return 0; } - if (num_sectors > dirty_sectors) { - /* release everything except the sectors we dirtied */ - release_bytes -= dirty_sectors << fs_info->sectorsize_bits; - if (only_release_metadata) { - btrfs_delalloc_release_metadata(BTRFS_I(inode), - release_bytes, true); - } else { - u64 release_start = round_up(pos + copied, - fs_info->sectorsize); - btrfs_delalloc_release_space(BTRFS_I(inode), - data_reserved, release_start, - release_bytes, true); - } - } + /* Release the reserved space beyond the last block. */ + last_block = round_up(start + copied, fs_info->sectorsize); + + shrink_reserved_space(inode, *data_reserved, reserved_start, + reserved_len, last_block - reserved_start, + only_release_metadata); + reserved_len = last_block - reserved_start; + } - release_bytes = round_up(copied + sector_offset, - fs_info->sectorsize); + ret = btrfs_dirty_folio(inode, folio, start, copied, &cached_state, + only_release_metadata); + /* + * If we have not locked the extent range, because the range's start + * offset is >= i_size, we might still have a non-NULL cached extent + * state, acquired while marking the extent range as delalloc through + * btrfs_dirty_page(). Therefore free any possible cached extent state + * to avoid a memory leak. + */ + if (extents_locked) + btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); + else + btrfs_free_extent_state(cached_state); - ret = btrfs_dirty_folio(BTRFS_I(inode), folio, pos, copied, - &cached_state, only_release_metadata); + btrfs_delalloc_release_extents(inode, reserved_len); + if (ret) { + btrfs_drop_folio(fs_info, folio, start, copied); + release_space(inode, *data_reserved, reserved_start, reserved_len, + only_release_metadata); + return ret; + } + if (only_release_metadata) + btrfs_check_nocow_unlock(inode); - /* - * If we have not locked the extent range, because the range's - * start offset is >= i_size, we might still have a non-NULL - * cached extent state, acquired while marking the extent range - * as delalloc through btrfs_dirty_page(). Therefore free any - * possible cached extent state to avoid a memory leak. - */ - if (extents_locked) - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, - lockend, &cached_state); - else - free_extent_state(cached_state); + btrfs_drop_folio(fs_info, folio, start, copied); + return copied; +} - btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); - if (ret) { - btrfs_drop_folio(fs_info, folio, pos, copied); - break; - } +ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *iter) +{ + struct file *file = iocb->ki_filp; + loff_t pos; + struct inode *inode = file_inode(file); + struct extent_changeset *data_reserved = NULL; + size_t num_written = 0; + ssize_t ret; + loff_t old_isize; + unsigned int ilock_flags = 0; + const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); - release_bytes = 0; - if (only_release_metadata) - btrfs_check_nocow_unlock(BTRFS_I(inode)); + if (nowait) + ilock_flags |= BTRFS_ILOCK_TRY; - btrfs_drop_folio(fs_info, folio, pos, copied); + ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags); + if (ret < 0) + return ret; - cond_resched(); + /* + * We can only trust the isize with inode lock held, or it can race with + * other buffered writes and cause incorrect call of + * pagecache_isize_extended() to overwrite existing data. + */ + old_isize = i_size_read(inode); - pos += copied; - num_written += copied; - } + ret = generic_write_checks(iocb, iter); + if (ret <= 0) + goto out; - if (release_bytes) { - if (only_release_metadata) { - btrfs_check_nocow_unlock(BTRFS_I(inode)); - btrfs_delalloc_release_metadata(BTRFS_I(inode), - release_bytes, true); - } else { - btrfs_delalloc_release_space(BTRFS_I(inode), - data_reserved, - round_down(pos, fs_info->sectorsize), - release_bytes, true); - } + ret = btrfs_write_check(iocb, ret); + if (ret < 0) + goto out; + + pos = iocb->ki_pos; + while (iov_iter_count(iter) > 0) { + ret = copy_one_range(BTRFS_I(inode), iter, &data_reserved, pos, nowait); + if (ret < 0) + break; + pos += ret; + num_written += ret; + cond_resched(); } extent_changeset_free(data_reserved); @@ -1406,7 +1464,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp) if (private) { kfree(private->filldir_buf); - free_extent_state(private->llseek_cached_state); + btrfs_free_extent_state(private->llseek_cached_state); kfree(private); filp->private_data = NULL; } @@ -1783,16 +1841,12 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) unsigned long zero_start; loff_t size; size_t fsize = folio_size(folio); - vm_fault_t ret; - int ret2; - int reserved = 0; + int ret; u64 reserved_space; u64 page_start; u64 page_end; u64 end; - ASSERT(folio_order(folio) == 0); - reserved_space = fsize; sb_start_pagefault(inode->i_sb); @@ -1808,21 +1862,14 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) * end up waiting indefinitely to get a lock on the page currently * being processed by btrfs_page_mkwrite() function. */ - ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, - page_start, reserved_space); - if (!ret2) { - ret2 = file_update_time(vmf->vma->vm_file); - reserved = 1; - } - if (ret2) { - ret = vmf_error(ret2); - if (reserved) - goto out; + ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, + page_start, reserved_space); + if (ret < 0) goto out_noreserve; - } - /* Make the VM retry the fault. */ - ret = VM_FAULT_NOPAGE; + ret = file_update_time(vmf->vma->vm_file); + if (ret < 0) + goto out; again: down_read(&BTRFS_I(inode)->i_mmap_lock); folio_lock(folio); @@ -1835,11 +1882,10 @@ again: } folio_wait_writeback(folio); - lock_extent(io_tree, page_start, page_end, &cached_state); - ret2 = set_folio_extent_mapped(folio); - if (ret2 < 0) { - ret = vmf_error(ret2); - unlock_extent(io_tree, page_start, page_end, &cached_state); + btrfs_lock_extent(io_tree, page_start, page_end, &cached_state); + ret = set_folio_extent_mapped(folio); + if (ret < 0) { + btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state); goto out_unlock; } @@ -1849,7 +1895,7 @@ again: */ ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize); if (ordered) { - unlock_extent(io_tree, page_start, page_end, &cached_state); + btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state); folio_unlock(folio); up_read(&BTRFS_I(inode)->i_mmap_lock); btrfs_start_ordered_extent(ordered); @@ -1857,12 +1903,12 @@ again: goto again; } - if (folio->index == ((size - 1) >> PAGE_SHIFT)) { + if (folio_contains(folio, (size - 1) >> PAGE_SHIFT)) { reserved_space = round_up(size - page_start, fs_info->sectorsize); if (reserved_space < fsize) { end = page_start + reserved_space - 1; btrfs_delalloc_release_space(BTRFS_I(inode), - data_reserved, page_start, + data_reserved, end + 1, fsize - reserved_space, true); } } @@ -1874,15 +1920,14 @@ again: * clear any delalloc bits within this page range since we have to * reserve data&meta space before lock_page() (see above comments). */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, - EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | - EXTENT_DEFRAG, &cached_state); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | + EXTENT_DEFRAG, &cached_state); - ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, + ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0, &cached_state); - if (ret2) { - unlock_extent(io_tree, page_start, page_end, &cached_state); - ret = VM_FAULT_SIGBUS; + if (ret < 0) { + btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state); goto out_unlock; } @@ -1901,7 +1946,7 @@ again: btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); - unlock_extent(io_tree, page_start, page_end, &cached_state); + btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state); up_read(&BTRFS_I(inode)->i_mmap_lock); btrfs_delalloc_release_extents(BTRFS_I(inode), fsize); @@ -1915,11 +1960,16 @@ out_unlock: out: btrfs_delalloc_release_extents(BTRFS_I(inode), fsize); btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start, - reserved_space, (ret != 0)); + reserved_space, true); + extent_changeset_free(data_reserved); out_noreserve: sb_end_pagefault(inode->i_sb); - extent_changeset_free(data_reserved); - return ret; + + if (ret < 0) + return vmf_error(ret); + + /* Make the VM retry the fault. */ + return VM_FAULT_NOPAGE; } static const struct vm_operations_struct btrfs_file_vm_ops = { @@ -1941,33 +1991,33 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) return 0; } -static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, - int slot, u64 start, u64 end) +static bool hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, + int slot, u64 start, u64 end) { struct btrfs_file_extent_item *fi; struct btrfs_key key; if (slot < 0 || slot >= btrfs_header_nritems(leaf)) - return 0; + return false; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) - return 0; + return false; fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) - return 0; + return false; if (btrfs_file_extent_disk_bytenr(leaf, fi)) - return 0; + return false; if (key.offset == end) - return 1; + return true; if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start) - return 1; - return 0; + return true; + return false; } static int fill_holes(struct btrfs_trans_handle *trans, @@ -2041,7 +2091,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, out: btrfs_release_path(path); - hole_em = alloc_extent_map(); + hole_em = btrfs_alloc_extent_map(); if (!hole_em) { btrfs_drop_extent_map_range(inode, offset, end - 1, false); btrfs_set_inode_full_sync(inode); @@ -2055,7 +2105,7 @@ out: hole_em->generation = trans->transid; ret = btrfs_replace_extent_map_range(inode, hole_em, true); - free_extent_map(hole_em); + btrfs_free_extent_map(hole_em); if (ret) btrfs_set_inode_full_sync(inode); } @@ -2088,15 +2138,33 @@ static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len) 0 : *start + *len - em->start - em->len; *start = em->start + em->len; } - free_extent_map(em); + btrfs_free_extent_map(em); return ret; } -static void btrfs_punch_hole_lock_range(struct inode *inode, - const u64 lockstart, - const u64 lockend, - struct extent_state **cached_state) +/* + * Check if there is no folio in the range. + * + * We cannot utilize filemap_range_has_page() in a filemap with large folios + * as we can hit the following false positive: + * + * start end + * | | + * |//|//|//|//| | | | | | | | |//|//| + * \ / \ / + * Folio A Folio B + * + * That large folio A and B cover the start and end indexes. + * In that case filemap_range_has_page() will always return true, but the above + * case is fine for btrfs_punch_hole_lock_range() usage. + * + * So here we only ensure that no other folios is in the range, excluding the + * head/tail large folio. + */ +static bool check_range_has_page(struct inode *inode, u64 start, u64 end) { + struct folio_batch fbatch; + bool ret = false; /* * For subpage case, if the range is not at page boundary, we could * have pages at the leading/tailing part of the range. @@ -2107,17 +2175,45 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, * * And do not decrease page_lockend right now, as it can be 0. */ - const u64 page_lockstart = round_up(lockstart, PAGE_SIZE); - const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE); + const u64 page_lockstart = round_up(start, PAGE_SIZE); + const u64 page_lockend = round_down(end + 1, PAGE_SIZE); + const pgoff_t start_index = page_lockstart >> PAGE_SHIFT; + const pgoff_t end_index = (page_lockend - 1) >> PAGE_SHIFT; + pgoff_t tmp = start_index; + int found_folios; + + /* The same page or adjacent pages. */ + if (page_lockend <= page_lockstart) + return false; + folio_batch_init(&fbatch); + found_folios = filemap_get_folios(inode->i_mapping, &tmp, end_index, &fbatch); + for (int i = 0; i < found_folios; i++) { + struct folio *folio = fbatch.folios[i]; + + /* A large folio begins before the start. Not a target. */ + if (folio->index < start_index) + continue; + /* A large folio extends beyond the end. Not a target. */ + if (folio->index + folio_nr_pages(folio) > end_index) + continue; + /* A folio doesn't cover the head/tail index. Found a target. */ + ret = true; + break; + } + folio_batch_release(&fbatch); + return ret; +} + +static void btrfs_punch_hole_lock_range(struct inode *inode, + const u64 lockstart, const u64 lockend, + struct extent_state **cached_state) +{ while (1) { truncate_pagecache_range(inode, lockstart, lockend); - lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, - cached_state); - /* The same page or adjacent pages. */ - if (page_lockend <= page_lockstart) - break; + btrfs_lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, + cached_state); /* * We can't have ordered extents in the range, nor dirty/writeback * pages, because we have locked the inode's VFS lock in exclusive @@ -2128,12 +2224,11 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, * locking the range check if we have pages in the range, and if * we do, unlock the range and retry. */ - if (!filemap_range_has_page(inode->i_mapping, page_lockstart, - page_lockend - 1)) + if (!check_range_has_page(inode, lockstart, lockend)) break; - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, - cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, + cached_state); } btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend); @@ -2506,7 +2601,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) u64 lockend; u64 tail_start; u64 tail_len; - u64 orig_start = offset; + const u64 orig_start = offset; + const u64 orig_end = offset + len - 1; int ret = 0; bool same_block; u64 ino_size; @@ -2538,18 +2634,14 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset)) == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)); /* - * We needn't truncate any block which is beyond the end of the file - * because we are sure there is no data there. - */ - /* * Only do this if we are in the same block and we aren't doing the * entire block. */ if (same_block && len < fs_info->sectorsize) { if (offset < ino_size) { truncated_block = true; - ret = btrfs_truncate_block(BTRFS_I(inode), offset, len, - 0); + ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1, + orig_start, orig_end); } else { ret = 0; } @@ -2559,7 +2651,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) /* zero back part of the first block */ if (offset < ino_size) { truncated_block = true; - ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0); + ret = btrfs_truncate_block(BTRFS_I(inode), offset, orig_start, orig_end); if (ret) { btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); return ret; @@ -2596,8 +2688,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) if (tail_start + tail_len < ino_size) { truncated_block = true; ret = btrfs_truncate_block(BTRFS_I(inode), - tail_start + tail_len, - 0, 1); + tail_start + tail_len - 1, + orig_start, orig_end); if (ret) goto out_only_mutex; } @@ -2631,8 +2723,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); out: - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state); out_only_mutex: if (!updated_inode && truncated_block && !ret) { /* @@ -2750,7 +2842,7 @@ static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode, else ret = RANGE_BOUNDARY_WRITTEN_EXTENT; - free_extent_map(em); + btrfs_free_extent_map(em); return ret; } @@ -2765,6 +2857,8 @@ static int btrfs_zero_range(struct inode *inode, int ret; u64 alloc_hint = 0; const u64 sectorsize = fs_info->sectorsize; + const u64 orig_start = offset; + const u64 orig_end = offset + len - 1; u64 alloc_start = round_down(offset, sectorsize); u64 alloc_end = round_up(offset + len, sectorsize); u64 bytes_to_reserve = 0; @@ -2794,7 +2888,7 @@ static int btrfs_zero_range(struct inode *inode, * do nothing except updating the inode's i_size if * needed. */ - free_extent_map(em); + btrfs_free_extent_map(em); ret = btrfs_fallocate_update_isize(inode, offset + len, mode); goto out; @@ -2807,9 +2901,9 @@ static int btrfs_zero_range(struct inode *inode, ASSERT(IS_ALIGNED(alloc_start, sectorsize)); len = offset + len - alloc_start; offset = alloc_start; - alloc_hint = extent_map_block_start(em) + em->len; + alloc_hint = btrfs_extent_map_block_start(em) + em->len; } - free_extent_map(em); + btrfs_free_extent_map(em); if (BTRFS_BYTES_TO_BLKS(fs_info, offset) == BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) { @@ -2820,22 +2914,22 @@ static int btrfs_zero_range(struct inode *inode, } if (em->flags & EXTENT_FLAG_PREALLOC) { - free_extent_map(em); + btrfs_free_extent_map(em); ret = btrfs_fallocate_update_isize(inode, offset + len, mode); goto out; } if (len < sectorsize && em->disk_bytenr != EXTENT_MAP_HOLE) { - free_extent_map(em); - ret = btrfs_truncate_block(BTRFS_I(inode), offset, len, - 0); + btrfs_free_extent_map(em); + ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1, + orig_start, orig_end); if (!ret) ret = btrfs_fallocate_update_isize(inode, offset + len, mode); return ret; } - free_extent_map(em); + btrfs_free_extent_map(em); alloc_start = round_down(offset, sectorsize); alloc_end = alloc_start + sectorsize; goto reserve_space; @@ -2859,7 +2953,8 @@ static int btrfs_zero_range(struct inode *inode, alloc_start = round_down(offset, sectorsize); ret = 0; } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { - ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0); + ret = btrfs_truncate_block(BTRFS_I(inode), offset, + orig_start, orig_end); if (ret) goto out; } else { @@ -2876,8 +2971,8 @@ static int btrfs_zero_range(struct inode *inode, alloc_end = round_up(offset + len, sectorsize); ret = 0; } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) { - ret = btrfs_truncate_block(BTRFS_I(inode), offset + len, - 0, 1); + ret = btrfs_truncate_block(BTRFS_I(inode), offset + len - 1, + orig_start, orig_end); if (ret) goto out; } else { @@ -2902,16 +2997,16 @@ reserve_space: ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, alloc_start, bytes_to_reserve); if (ret) { - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, - lockend, &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, + lockend, &cached_state); goto out; } ret = btrfs_prealloc_file_range(inode, mode, alloc_start, alloc_end - alloc_start, fs_info->sectorsize, offset + len, &alloc_hint); - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state); /* btrfs_prealloc_file_range releases reserved space on error */ if (ret) { space_reserved = false; @@ -2997,7 +3092,8 @@ static long btrfs_fallocate(struct file *file, int mode, * need to zero out the end of the block if i_size lands in the * middle of a block. */ - ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0); + ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, + inode->i_size, (u64)-1); if (ret) goto out; } @@ -3022,8 +3118,8 @@ static long btrfs_fallocate(struct file *file, int mode, } locked_end = alloc_end - 1; - lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - &cached_state); + btrfs_lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, + &cached_state); btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end); @@ -3035,8 +3131,8 @@ static long btrfs_fallocate(struct file *file, int mode, ret = PTR_ERR(em); break; } - last_byte = min(extent_map_end(em), alloc_end); - actual_end = min_t(u64, extent_map_end(em), offset + len); + last_byte = min(btrfs_extent_map_end(em), alloc_end); + actual_end = min_t(u64, btrfs_extent_map_end(em), offset + len); last_byte = ALIGN(last_byte, blocksize); if (em->disk_bytenr == EXTENT_MAP_HOLE || (cur_offset >= inode->i_size && @@ -3045,19 +3141,19 @@ static long btrfs_fallocate(struct file *file, int mode, ret = add_falloc_range(&reserve_list, cur_offset, range_len); if (ret < 0) { - free_extent_map(em); + btrfs_free_extent_map(em); break; } ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved, cur_offset, range_len); if (ret < 0) { - free_extent_map(em); + btrfs_free_extent_map(em); break; } qgroup_reserved += range_len; data_space_needed += range_len; } - free_extent_map(em); + btrfs_free_extent_map(em); cur_offset = last_byte; } @@ -3111,8 +3207,8 @@ static long btrfs_fallocate(struct file *file, int mode, */ ret = btrfs_fallocate_update_isize(inode, actual_end, mode); out_unlock: - unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, + &cached_state); out: btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP); extent_changeset_free(data_reserved); @@ -3146,10 +3242,10 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end if (inode->delalloc_bytes > 0) { spin_unlock(&inode->lock); *delalloc_start_ret = start; - delalloc_len = count_range_bits(&inode->io_tree, - delalloc_start_ret, end, - len, EXTENT_DELALLOC, 1, - cached_state); + delalloc_len = btrfs_count_range_bits(&inode->io_tree, + delalloc_start_ret, end, + len, EXTENT_DELALLOC, 1, + cached_state); } else { spin_unlock(&inode->lock); } @@ -3458,7 +3554,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) last_extent_end = lockstart; - lock_extent(&inode->io_tree, lockstart, lockend, &cached_state); + btrfs_lock_extent(&inode->io_tree, lockstart, lockend, &cached_state); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { @@ -3604,7 +3700,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) } out: - unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); + btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state); btrfs_free_path(path); if (ret < 0) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 05e173311c1a..4b34ea1f01c2 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -308,8 +308,9 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, bool locked = false; if (block_group) { - struct btrfs_path *path = btrfs_alloc_path(); + BTRFS_PATH_AUTO_FREE(path); + path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto fail; @@ -330,13 +331,12 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_CLEAR; spin_unlock(&block_group->lock); - btrfs_free_path(path); } btrfs_i_size_write(inode, 0); truncate_pagecache(vfs_inode, 0); - lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); + btrfs_lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); /* @@ -348,7 +348,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); btrfs_inode_safe_disk_i_size_write(inode, control.last_size); - unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); + btrfs_unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); if (ret) goto fail; @@ -457,7 +457,7 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate) mask); if (IS_ERR(folio)) { io_ctl_drop_pages(io_ctl); - return -ENOMEM; + return PTR_ERR(folio); } ret = set_folio_extent_mapped(folio); @@ -1080,9 +1080,8 @@ int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, /* Get the cluster for this block_group if it exists */ if (block_group && !list_empty(&block_group->cluster_list)) { - cluster = list_entry(block_group->cluster_list.next, - struct btrfs_free_cluster, - block_group_list); + cluster = list_first_entry(&block_group->cluster_list, + struct btrfs_free_cluster, block_group_list); } if (!node && cluster) { @@ -1160,8 +1159,8 @@ update_cache_item(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, - EXTENT_DELALLOC, NULL); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, + EXTENT_DELALLOC, NULL); goto fail; } leaf = path->nodes[0]; @@ -1172,9 +1171,9 @@ update_cache_item(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || found_key.offset != offset) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, - inode->i_size - 1, EXTENT_DELALLOC, - NULL); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, + inode->i_size - 1, EXTENT_DELALLOC, + NULL); btrfs_release_path(path); goto fail; } @@ -1219,9 +1218,9 @@ static noinline_for_stack int write_pinned_extent_entries( start = block_group->start; while (start < block_group->start + block_group->length) { - if (!find_first_extent_bit(unpin, start, - &extent_start, &extent_end, - EXTENT_DIRTY, NULL)) + if (!btrfs_find_first_extent_bit(unpin, start, + &extent_start, &extent_end, + EXTENT_DIRTY, NULL)) return 0; /* This pinned extent is out of our range */ @@ -1267,8 +1266,8 @@ static int flush_dirty_cache(struct inode *inode) ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); if (ret) - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, - EXTENT_DELALLOC, NULL); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, + EXTENT_DELALLOC, NULL); return ret; } @@ -1288,8 +1287,8 @@ cleanup_write_cache_enospc(struct inode *inode, struct extent_state **cached_state) { io_ctl_drop_pages(io_ctl); - unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, - cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, + cached_state); } static int __btrfs_wait_cache_io(struct btrfs_root *root, @@ -1414,8 +1413,8 @@ static int __btrfs_write_out_cache(struct inode *inode, if (ret) goto out_unlock; - lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, - &cached_state); + btrfs_lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, + &cached_state); io_ctl_set_generation(io_ctl, trans->transid); @@ -1475,8 +1474,8 @@ static int __btrfs_write_out_cache(struct inode *inode, io_ctl_drop_pages(io_ctl); io_ctl_free(io_ctl); - unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, - &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, + &cached_state); /* * at this point the pages are under IO and we're happy, @@ -2342,9 +2341,8 @@ again: struct rb_node *node; struct btrfs_free_space *entry; - cluster = list_entry(block_group->cluster_list.next, - struct btrfs_free_cluster, - block_group_list); + cluster = list_first_entry(&block_group->cluster_list, + struct btrfs_free_cluster, block_group_list); spin_lock(&cluster->lock); node = rb_first(&cluster->root); if (!node) { diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 39c6b96a4c25..0c573d46639a 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -117,7 +117,7 @@ struct btrfs_free_space_info *search_free_space_info( if (ret != 0) { btrfs_warn(fs_info, "missing free space info for %llu", block_group->start); - ASSERT(0); + DEBUG_WARN(); return ERR_PTR(-ENOENT); } @@ -141,12 +141,12 @@ static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, return ret; if (ret == 0) { - ASSERT(0); + DEBUG_WARN(); return -EIO; } if (p->slots[0] == 0) { - ASSERT(0); + DEBUG_WARN("no previous slot found"); return -EIO; } p->slots[0]--; @@ -223,6 +223,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; } @@ -235,8 +236,10 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } leaf = path->nodes[0]; nr = 0; @@ -271,14 +274,17 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } btrfs_release_path(path); } info = search_free_space_info(trans, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); + btrfs_abort_transaction(trans, ret); goto out; } leaf = path->nodes[0]; @@ -293,8 +299,8 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); - ASSERT(0); ret = -EIO; + btrfs_abort_transaction(trans, ret); goto out; } @@ -315,8 +321,10 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_item(trans, root, path, &key, data_size); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } leaf = path->nodes[0]; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); @@ -331,8 +339,6 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, ret = 0; out: kvfree(bitmap); - if (ret) - btrfs_abort_transaction(trans, ret); return ret; } @@ -358,6 +364,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; } @@ -370,8 +377,10 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } leaf = path->nodes[0]; nr = 0; @@ -412,14 +421,17 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } btrfs_release_path(path); } info = search_free_space_info(trans, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); + btrfs_abort_transaction(trans, ret); goto out; } leaf = path->nodes[0]; @@ -441,8 +453,10 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); - if (ret) + if (ret) { + btrfs_abort_transaction(trans, ret); goto out; + } btrfs_release_path(path); extent_count++; @@ -455,16 +469,14 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); - ASSERT(0); ret = -EIO; + btrfs_abort_transaction(trans, ret); goto out; } ret = 0; out: kvfree(bitmap); - if (ret) - btrfs_abort_transaction(trans, ret); return ret; } @@ -838,13 +850,15 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; } block_group = btrfs_lookup_block_group(trans->fs_info, start); if (!block_group) { - ASSERT(0); + DEBUG_WARN("no block group found for start=%llu", start); ret = -ENOENT; + btrfs_abort_transaction(trans, ret); goto out; } @@ -852,12 +866,12 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans, ret = __remove_from_free_space_tree(trans, block_group, path, start, size); mutex_unlock(&block_group->free_space_lock); + if (ret) + btrfs_abort_transaction(trans, ret); btrfs_put_block_group(block_group); out: btrfs_free_path(path); - if (ret) - btrfs_abort_transaction(trans, ret); return ret; } @@ -1031,25 +1045,27 @@ int add_to_free_space_tree(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; + btrfs_abort_transaction(trans, ret); goto out; } block_group = btrfs_lookup_block_group(trans->fs_info, start); if (!block_group) { - ASSERT(0); + DEBUG_WARN("no block group found for start=%llu", start); ret = -ENOENT; + btrfs_abort_transaction(trans, ret); goto out; } mutex_lock(&block_group->free_space_lock); ret = __add_to_free_space_tree(trans, block_group, path, start, size); mutex_unlock(&block_group->free_space_lock); + if (ret) + btrfs_abort_transaction(trans, ret); btrfs_put_block_group(block_group); out: btrfs_free_path(path); - if (ret) - btrfs_abort_transaction(trans, ret); return ret; } @@ -1555,7 +1571,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); - ASSERT(0); + DEBUG_WARN(); ret = -EIO; goto out; } @@ -1619,7 +1635,7 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); - ASSERT(0); + DEBUG_WARN(); ret = -EIO; goto out; } diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h index 7baa2ed45198..4394de12a767 100644 --- a/fs/btrfs/fs.h +++ b/fs/btrfs/fs.h @@ -472,6 +472,8 @@ struct btrfs_fs_info { struct btrfs_block_rsv delayed_block_rsv; /* Block reservation for delayed refs */ struct btrfs_block_rsv delayed_refs_rsv; + /* Block reservation for treelog tree */ + struct btrfs_block_rsv treelog_rsv; struct btrfs_block_rsv empty_block_rsv; @@ -777,10 +779,8 @@ struct btrfs_fs_info { struct btrfs_delayed_root *delayed_root; - /* Extent buffer radix tree */ - spinlock_t buffer_lock; /* Entries are eb->start / sectorsize */ - struct radix_tree_root buffer_radix; + struct xarray buffer_tree; /* Next backup root to be overwritten */ int backup_root_index; diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 3530de0618c8..a61c3540d67b 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -109,7 +109,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, u64 inode_objectid, u64 ref_objectid, u64 *index) { - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct btrfs_inode_extref *extref; struct extent_buffer *leaf; @@ -129,9 +129,9 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) - ret = -ENOENT; + return -ENOENT; if (ret < 0) - goto out; + return ret; /* * Sanity check - did we find the right item for this name? @@ -142,8 +142,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, ref_objectid, name); if (!extref) { btrfs_abort_transaction(trans, -ENOENT); - ret = -ENOENT; - goto out; + return -ENOENT; } leaf = path->nodes[0]; @@ -152,12 +151,8 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, *index = btrfs_inode_extref_index(leaf, extref); if (del_len == item_size) { - /* - * Common case only one ref in the item, remove the - * whole item. - */ - ret = btrfs_del_item(trans, root, path); - goto out; + /* Common case only one ref in the item, remove the whole item. */ + return btrfs_del_item(trans, root, path); } ptr = (unsigned long)extref; @@ -168,9 +163,6 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, btrfs_truncate_item(trans, path, item_size - del_len, 1); -out: - btrfs_free_path(path); - return ret; } @@ -260,7 +252,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, int ret; int ins_len = name->len + sizeof(*extref); unsigned long ptr; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_key key; struct extent_buffer *leaf; @@ -279,13 +271,13 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, path->slots[0], ref_objectid, name)) - goto out; + return ret; btrfs_extend_item(trans, path, ins_len); ret = 0; } if (ret < 0) - goto out; + return ret; leaf = path->nodes[0]; ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char); @@ -298,9 +290,8 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, ptr = (unsigned long)&extref->name; write_extent_buffer(path->nodes[0], name->name, ptr, name->len); -out: - btrfs_free_path(path); - return ret; + + return 0; } /* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 90f5da3c520a..c0c778243bf1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -686,12 +686,12 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, if (!can_cow_file_range_inline(inode, offset, size, compressed_size)) return 1; - lock_extent(&inode->io_tree, offset, end, &cached); + btrfs_lock_extent(&inode->io_tree, offset, end, &cached); ret = __cow_file_range_inline(inode, size, compressed_size, compress_type, compressed_folio, update_i_size); if (ret > 0) { - unlock_extent(&inode->io_tree, offset, end, &cached); + btrfs_unlock_extent(&inode->io_tree, offset, end, &cached); return ret; } @@ -777,26 +777,9 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, struct btrfs_fs_info *fs_info = inode->root->fs_info; if (!btrfs_inode_can_compress(inode)) { - WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), - KERN_ERR "BTRFS: unexpected compression for ino %llu\n", - btrfs_ino(inode)); + DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode)); return 0; } - /* - * Only enable sector perfect compression for experimental builds. - * - * This is a big feature change for subpage cases, and can hit - * different corner cases, so only limit this feature for - * experimental build for now. - * - * ETA for moving this out of experimental builds is 6.15. - */ - if (fs_info->sectorsize < PAGE_SIZE && - !IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) { - if (!PAGE_ALIGNED(start) || - !PAGE_ALIGNED(end + 1)) - return 0; - } /* force compress */ if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) @@ -1153,7 +1136,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk, goto done; } - lock_extent(io_tree, start, end, &cached); + btrfs_lock_extent(io_tree, start, end, &cached); /* Here we're doing allocation and writeback of the compressed pages */ file_extent.disk_bytenr = ins.objectid; @@ -1168,10 +1151,10 @@ static void submit_one_async_extent(struct async_chunk *async_chunk, ret = PTR_ERR(em); goto out_free_reserve; } - free_extent_map(em); + btrfs_free_extent_map(em); ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, - 1 << BTRFS_ORDERED_COMPRESSED); + 1U << BTRFS_ORDERED_COMPRESSED); if (IS_ERR(ordered)) { btrfs_drop_extent_map_range(inode, start, end, false); ret = PTR_ERR(ordered); @@ -1198,7 +1181,7 @@ done: out_free_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); - btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); + btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); mapping_set_error(inode->vfs_inode.i_mapping, -EIO); extent_clear_unlock_delalloc(inode, start, end, NULL, &cached, @@ -1225,7 +1208,7 @@ u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, u64 alloc_hint = 0; read_lock(&em_tree->lock); - em = search_extent_mapping(em_tree, start, num_bytes); + em = btrfs_search_extent_mapping(em_tree, start, num_bytes); if (em) { /* * if block start isn't an actual block number then find the @@ -1233,15 +1216,15 @@ u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, * block is also bogus then just don't worry about it. */ if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) { - free_extent_map(em); - em = search_extent_mapping(em_tree, 0, 0); + btrfs_free_extent_map(em); + em = btrfs_search_extent_mapping(em_tree, 0, 0); if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE) - alloc_hint = extent_map_block_start(em); + alloc_hint = btrfs_extent_map_block_start(em); if (em) - free_extent_map(em); + btrfs_free_extent_map(em); } else { - alloc_hint = extent_map_block_start(em); - free_extent_map(em); + alloc_hint = btrfs_extent_map_block_start(em); + btrfs_free_extent_map(em); } } read_unlock(&em_tree->lock); @@ -1404,24 +1387,24 @@ static noinline int cow_file_range(struct btrfs_inode *inode, * Locked range will be released either during error clean up or * after the whole range is finished. */ - lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1, - &cached); + btrfs_lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1, + &cached); em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_REGULAR); if (IS_ERR(em)) { - unlock_extent(&inode->io_tree, start, - start + cur_alloc_size - 1, &cached); + btrfs_unlock_extent(&inode->io_tree, start, + start + cur_alloc_size - 1, &cached); ret = PTR_ERR(em); goto out_reserve; } - free_extent_map(em); + btrfs_free_extent_map(em); ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, - 1 << BTRFS_ORDERED_REGULAR); + 1U << BTRFS_ORDERED_REGULAR); if (IS_ERR(ordered)) { - unlock_extent(&inode->io_tree, start, - start + cur_alloc_size - 1, &cached); + btrfs_unlock_extent(&inode->io_tree, start, + start + cur_alloc_size - 1, &cached); ret = PTR_ERR(ordered); goto out_drop_extent_cache; } @@ -1476,7 +1459,7 @@ out_drop_extent_cache: btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false); out_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); - btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); + btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); out_unlock: /* * Now, we have three regions to clean up: @@ -1585,8 +1568,8 @@ static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_ PAGE_SHIFT; while (!list_empty(&async_chunk->extents)) { - async_extent = list_entry(async_chunk->extents.next, - struct async_extent, list); + async_extent = list_first_entry(&async_chunk->extents, + struct async_extent, list); list_del(&async_extent->list); submit_one_async_extent(async_chunk, async_extent, &alloc_hint); } @@ -1756,9 +1739,9 @@ static int fallback_to_cow(struct btrfs_inode *inode, * group that contains that extent to RO mode and therefore force COW * when starting writeback. */ - lock_extent(io_tree, start, end, &cached_state); - count = count_range_bits(io_tree, &range_start, end, range_bytes, - EXTENT_NORESERVE, 0, NULL); + btrfs_lock_extent(io_tree, start, end, &cached_state); + count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes, + EXTENT_NORESERVE, 0, NULL); if (count > 0 || is_space_ino || is_reloc_ino) { u64 bytes = count; struct btrfs_fs_info *fs_info = inode->root->fs_info; @@ -1772,10 +1755,9 @@ static int fallback_to_cow(struct btrfs_inode *inode, spin_unlock(&sinfo->lock); if (count > 0) - clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, - NULL); + btrfs_clear_extent_bits(io_tree, start, end, EXTENT_NORESERVE); } - unlock_extent(io_tree, start, end, &cached_state); + btrfs_unlock_extent(io_tree, start, end, &cached_state); /* * Don't try to create inline extents, as a mix of inline extent that @@ -1983,7 +1965,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio u64 end = file_pos + len - 1; int ret = 0; - lock_extent(&inode->io_tree, file_pos, end, cached); + btrfs_lock_extent(&inode->io_tree, file_pos, end, cached); if (is_prealloc) { struct extent_map *em; @@ -1991,20 +1973,20 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent, BTRFS_ORDERED_PREALLOC); if (IS_ERR(em)) { - unlock_extent(&inode->io_tree, file_pos, end, cached); + btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached); return PTR_ERR(em); } - free_extent_map(em); + btrfs_free_extent_map(em); } ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent, is_prealloc - ? (1 << BTRFS_ORDERED_PREALLOC) - : (1 << BTRFS_ORDERED_NOCOW)); + ? (1U << BTRFS_ORDERED_PREALLOC) + : (1U << BTRFS_ORDERED_NOCOW)); if (IS_ERR(ordered)) { if (is_prealloc) btrfs_drop_extent_map_range(inode, file_pos, end, false); - unlock_extent(&inode->io_tree, file_pos, end, cached); + btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached); return PTR_ERR(ordered); } @@ -2303,7 +2285,7 @@ error: if (cur_offset < end) { struct extent_state *cached = NULL; - lock_extent(&inode->io_tree, cur_offset, end, &cached); + btrfs_lock_extent(&inode->io_tree, cur_offset, end, &cached); extent_clear_unlock_delalloc(inode, cur_offset, end, locked_folio, &cached, EXTENT_LOCKED | EXTENT_DELALLOC | @@ -2325,7 +2307,7 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) { if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { if (inode->defrag_bytes && - test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG)) + btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG)) return false; return true; } @@ -2614,7 +2596,7 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, !btrfs_is_free_space_inode(inode) && !(state->state & EXTENT_NORESERVE) && (bits & EXTENT_CLEAR_DATA_RESV)) - btrfs_free_reserved_data_space_noquota(fs_info, len); + btrfs_free_reserved_data_space_noquota(inode, len); percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, fs_info->delalloc_batch); @@ -2698,12 +2680,12 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, if (em_len > search_len) em_len = search_len; - ret = set_extent_bit(&inode->io_tree, search_start, - search_start + em_len - 1, - EXTENT_DELALLOC_NEW, cached_state); + ret = btrfs_set_extent_bit(&inode->io_tree, search_start, + search_start + em_len - 1, + EXTENT_DELALLOC_NEW, cached_state); next: - search_start = extent_map_end(em); - free_extent_map(em); + search_start = btrfs_extent_map_end(em); + btrfs_free_extent_map(em); if (ret) return ret; } @@ -2733,8 +2715,8 @@ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, return ret; } - return set_extent_bit(&inode->io_tree, start, end, - EXTENT_DELALLOC | extra_bits, cached_state); + return btrfs_set_extent_bit(&inode->io_tree, start, end, + EXTENT_DELALLOC | extra_bits, cached_state); } /* see btrfs_writepage_start_hook for details on why this is required */ @@ -2809,7 +2791,7 @@ again: if (ret) goto out_page; - lock_extent(&inode->io_tree, page_start, page_end, &cached_state); + btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state); /* already ordered? We're done */ if (folio_test_ordered(folio)) @@ -2817,8 +2799,8 @@ again: ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); if (ordered) { - unlock_extent(&inode->io_tree, page_start, page_end, - &cached_state); + btrfs_unlock_extent(&inode->io_tree, page_start, page_end, + &cached_state); folio_unlock(folio); btrfs_start_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); @@ -2844,7 +2826,7 @@ out_reserved: if (free_delalloc_space) btrfs_delalloc_release_space(inode, data_reserved, page_start, PAGE_SIZE, true); - unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); + btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); out_page: if (ret) { /* @@ -2896,7 +2878,7 @@ int btrfs_writepage_cow_fixup(struct folio *folio) * We should not hit such out-of-band dirty folios anymore. */ if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) { - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN(); btrfs_err_rl(fs_info, "root %lld ino %llu folio %llu is marked dirty without notifying the fs", BTRFS_I(inode)->root->root_key.objectid, @@ -2945,7 +2927,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, { struct btrfs_root *root = inode->root; const u64 sectorsize = root->fs_info->sectorsize; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct extent_buffer *leaf; struct btrfs_key ins; u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi); @@ -3027,8 +3009,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, file_pos - offset, qgroup_reserved, &ins); out: - btrfs_free_path(path); - return ret; } @@ -3144,8 +3124,10 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) * depending on their current state). */ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { - clear_bits |= EXTENT_LOCKED; - lock_extent(io_tree, start, end, &cached_state); + clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED; + btrfs_lock_extent_bits(io_tree, start, end, + EXTENT_LOCKED | EXTENT_FINISHING_ORDERED, + &cached_state); } if (freespace_inode) @@ -3209,8 +3191,8 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) goto out; } - ret = unpin_extent_cache(inode, ordered_extent->file_offset, - ordered_extent->num_bytes, trans->transid); + ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset, + ordered_extent->num_bytes, trans->transid); if (ret < 0) { btrfs_abort_transaction(trans, ret); goto out; @@ -3229,9 +3211,9 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) */ if ((clear_bits & EXTENT_DELALLOC_NEW) && !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) - clear_extent_bit(&inode->io_tree, start, end, - EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, - &cached_state); + btrfs_clear_extent_bit(&inode->io_tree, start, end, + EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES, + &cached_state); btrfs_inode_safe_disk_i_size_write(inode, 0); ret = btrfs_update_inode_fallback(trans, inode); @@ -3240,15 +3222,13 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) goto out; } out: - clear_extent_bit(&inode->io_tree, start, end, clear_bits, - &cached_state); + btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, + &cached_state); if (trans) btrfs_end_transaction(trans); if (ret || truncated) { - u64 unwritten_start = start; - /* * If we failed to finish this ordered extent for any reason we * need to make sure BTRFS_ORDERED_IOERR is set on the ordered @@ -3260,10 +3240,6 @@ out: if (ret) btrfs_mark_ordered_extent_error(ordered_extent); - if (truncated) - unwritten_start += logical_len; - clear_extent_uptodate(io_tree, unwritten_start, end, NULL); - /* * Drop extent maps for the part of the extent we didn't write. * @@ -3278,9 +3254,15 @@ out: * we don't mess with the extent map tree in the NOCOW case, but * for now simply skip this if we are the free space inode. */ - if (!btrfs_is_free_space_inode(inode)) + if (!btrfs_is_free_space_inode(inode)) { + u64 unwritten_start = start; + + if (truncated) + unwritten_start += logical_len; + btrfs_drop_extent_map_range(inode, unwritten_start, end, false); + } /* * If the ordered extent had an IOERR or something else went @@ -3307,7 +3289,7 @@ out: NULL); btrfs_free_reserved_extent(fs_info, ordered_extent->disk_bytenr, - ordered_extent->disk_num_bytes, 1); + ordered_extent->disk_num_bytes, true); /* * Actually free the qgroup rsv which was released when * the ordered extent was created. @@ -3344,20 +3326,16 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered) /* * Verify the checksum for a single sector without any extra action that depend * on the type of I/O. + * + * @kaddr must be a properly kmapped address. */ -int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, - u32 pgoff, u8 *csum, const u8 * const csum_expected) +int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, void *kaddr, u8 *csum, + const u8 * const csum_expected) { SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); - char *kaddr; - - ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); shash->tfm = fs_info->csum_shash; - - kaddr = kmap_local_page(page) + pgoff; crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); - kunmap_local(kaddr); if (memcmp(csum, csum_expected, fs_info->csum_size)) return -EIO; @@ -3386,6 +3364,7 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, u64 end = file_offset + bv->bv_len - 1; u8 *csum_expected; u8 csum[BTRFS_CSUM_SIZE]; + void *kaddr; ASSERT(bv->bv_len == fs_info->sectorsize); @@ -3393,19 +3372,22 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, return true; if (btrfs_is_data_reloc_root(inode->root) && - test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, - NULL)) { + btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, + NULL)) { /* Skip the range without csum for data reloc inode */ - clear_extent_bits(&inode->io_tree, file_offset, end, - EXTENT_NODATASUM); + btrfs_clear_extent_bits(&inode->io_tree, file_offset, end, + EXTENT_NODATASUM); return true; } csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * fs_info->csum_size; - if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, - csum_expected)) + kaddr = bvec_kmap_local(bv); + if (btrfs_check_sector_csum(fs_info, kaddr, csum, csum_expected)) { + kunmap_local(kaddr); goto zeroit; + } + kunmap_local(kaddr); return true; zeroit: @@ -3552,7 +3534,7 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, int btrfs_orphan_cleanup(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct extent_buffer *leaf; struct btrfs_key key, found_key; struct btrfs_trans_handle *trans; @@ -3742,19 +3724,22 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) out: if (ret) btrfs_err(fs_info, "could not do orphan cleanup %d", ret); - btrfs_free_path(path); return ret; } /* - * very simple check to peek ahead in the leaf looking for xattrs. If we - * don't find any xattrs, we know there can't be any acls. + * Look ahead in the leaf for xattrs. If we don't find any then we know there + * can't be any ACLs. + * + * @leaf: the eb leaf where to search + * @slot: the slot the inode is in + * @objectid: the objectid of the inode * - * slot is the slot the inode is in, objectid is the objectid of the inode + * Return true if there is xattr/ACL, false otherwise. */ -static noinline int acls_after_inode_item(struct extent_buffer *leaf, - int slot, u64 objectid, - int *first_xattr_slot) +static noinline bool acls_after_inode_item(struct extent_buffer *leaf, + int slot, u64 objectid, + int *first_xattr_slot) { u32 nritems = btrfs_header_nritems(leaf); struct btrfs_key found_key; @@ -3774,45 +3759,50 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf, while (slot < nritems) { btrfs_item_key_to_cpu(leaf, &found_key, slot); - /* we found a different objectid, there must not be acls */ + /* We found a different objectid, there must be no ACLs. */ if (found_key.objectid != objectid) - return 0; + return false; - /* we found an xattr, assume we've got an acl */ + /* We found an xattr, assume we've got an ACL. */ if (found_key.type == BTRFS_XATTR_ITEM_KEY) { if (*first_xattr_slot == -1) *first_xattr_slot = slot; if (found_key.offset == xattr_access || found_key.offset == xattr_default) - return 1; + return true; } /* - * we found a key greater than an xattr key, there can't - * be any acls later on + * We found a key greater than an xattr key, there can't be any + * ACLs later on. */ if (found_key.type > BTRFS_XATTR_ITEM_KEY) - return 0; + return false; slot++; scanned++; /* - * it goes inode, inode backrefs, xattrs, extents, - * so if there are a ton of hard links to an inode there can - * be a lot of backrefs. Don't waste time searching too hard, - * this is just an optimization + * The item order goes like: + * - inode + * - inode backrefs + * - xattrs + * - extents, + * + * so if there are lots of hard links to an inode there can be + * a lot of backrefs. Don't waste time searching too hard, + * this is just an optimization. */ if (scanned >= 8) break; } - /* we hit the end of the leaf before we found an xattr or - * something larger than an xattr. We have to assume the inode - * has acls + /* + * We hit the end of the leaf before we found an xattr or something + * larger than an xattr. We have to assume the inode has ACLs. */ if (*first_xattr_slot == -1) *first_xattr_slot = slot; - return 1; + return true; } static int btrfs_init_file_extent_tree(struct btrfs_inode *inode) @@ -3832,7 +3822,8 @@ static int btrfs_init_file_extent_tree(struct btrfs_inode *inode) if (!inode->file_extent_tree) return -ENOMEM; - extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT); + btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree, + IO_TREE_INODE_FILE_EXTENT); /* Lockdep class is set only for the file extent tree. */ lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class); @@ -4135,7 +4126,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, struct btrfs_inode *inode) { struct btrfs_inode_item *inode_item; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct extent_buffer *leaf; struct btrfs_key key; int ret; @@ -4149,7 +4140,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, if (ret) { if (ret > 0) ret = -ENOENT; - goto failed; + return ret; } leaf = path->nodes[0]; @@ -4158,10 +4149,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); btrfs_set_inode_last_trans(trans, inode); - ret = 0; -failed: - btrfs_free_path(path); - return ret; + return 0; } /* @@ -4495,7 +4483,7 @@ out: static noinline int may_destroy_subvol(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_dir_item *di; struct btrfs_key key; struct fscrypt_str name = FSTR_INIT("default", 7); @@ -4517,7 +4505,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root) btrfs_err(fs_info, "deleting default subvolume %llu is not allowed", key.objectid); - goto out; + return ret; } btrfs_release_path(path); } @@ -4528,14 +4516,13 @@ static noinline int may_destroy_subvol(struct btrfs_root *root) ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); if (ret < 0) - goto out; + return ret; if (ret == 0) { /* * Key with offset -1 found, there would have to exist a root * with such id, but this is out of valid range. */ - ret = -EUCLEAN; - goto out; + return -EUCLEAN; } ret = 0; @@ -4545,8 +4532,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root) if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY) ret = -ENOTEMPTY; } -out: - btrfs_free_path(path); + return ret; } @@ -4789,20 +4775,80 @@ out_notrans: return ret; } +static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize) +{ + ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u", + blockstart, blocksize); + + if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1) + return true; + return false; +} + +static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start) +{ + const pgoff_t index = (start >> PAGE_SHIFT); + struct address_space *mapping = inode->vfs_inode.i_mapping; + struct folio *folio; + u64 zero_start; + u64 zero_end; + int ret = 0; + +again: + folio = filemap_lock_folio(mapping, index); + /* No folio present. */ + if (IS_ERR(folio)) + return 0; + + if (!folio_test_uptodate(folio)) { + ret = btrfs_read_folio(NULL, folio); + folio_lock(folio); + if (folio->mapping != mapping) { + folio_unlock(folio); + folio_put(folio); + goto again; + } + if (!folio_test_uptodate(folio)) { + ret = -EIO; + goto out_unlock; + } + } + folio_wait_writeback(folio); + + /* + * We do not need to lock extents nor wait for OE, as it's already + * beyond EOF. + */ + + zero_start = max_t(u64, folio_pos(folio), start); + zero_end = folio_pos(folio) + folio_size(folio) - 1; + folio_zero_range(folio, zero_start - folio_pos(folio), + zero_end - zero_start + 1); + +out_unlock: + folio_unlock(folio); + folio_put(folio); + return ret; +} + /* - * Read, zero a chunk and write a block. + * Handle the truncation of a fs block. * - * @inode - inode that we're zeroing - * @from - the offset to start zeroing - * @len - the length to zero, 0 to zero the entire range respective to the - * offset - * @front - zero up to the offset instead of from the offset on + * @inode - inode that we're zeroing + * @offset - the file offset of the block to truncate + * The value must be inside [@start, @end], and the function will do + * extra checks if the block that covers @offset needs to be zeroed. + * @start - the start file offset of the range we want to zero + * @end - the end (inclusive) file offset of the range we want to zero. * - * This will find the block for the "from" offset and cow the block and zero the - * part we want to zero. This is used with truncate and hole punching. + * If the range is not block aligned, read out the folio that covers @offset, + * and if needed zero blocks that are inside the folio and covered by [@start, @end). + * If @start or @end + 1 lands inside a block, that block will be marked dirty + * for writeback. + * + * This is utilized by hole punch, zero range, file expansion. */ -int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, - int front) +int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct address_space *mapping = inode->vfs_inode.i_mapping; @@ -4812,20 +4858,56 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, struct extent_changeset *data_reserved = NULL; bool only_release_metadata = false; u32 blocksize = fs_info->sectorsize; - pgoff_t index = from >> PAGE_SHIFT; - unsigned offset = from & (blocksize - 1); + pgoff_t index = (offset >> PAGE_SHIFT); struct folio *folio; gfp_t mask = btrfs_alloc_write_mask(mapping); size_t write_bytes = blocksize; int ret = 0; + const bool in_head_block = is_inside_block(offset, round_down(start, blocksize), + blocksize); + const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize), + blocksize); + bool need_truncate_head = false; + bool need_truncate_tail = false; + u64 zero_start; + u64 zero_end; u64 block_start; u64 block_end; - if (IS_ALIGNED(offset, blocksize) && - (!len || IS_ALIGNED(len, blocksize))) + /* @offset should be inside the range. */ + ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu", + offset, start, end); + + /* The range is aligned at both ends. */ + if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) { + /* + * For block size < page size case, we may have polluted blocks + * beyond EOF. So we also need to zero them out. + */ + if (end == (u64)-1 && blocksize < PAGE_SIZE) + ret = truncate_block_zero_beyond_eof(inode, start); + goto out; + } + + /* + * @offset may not be inside the head nor tail block. In that case we + * don't need to do anything. + */ + if (!in_head_block && !in_tail_block) + goto out; + + /* + * Skip the truncatioin if the range in the target block is already aligned. + * The seemingly complex check will also handle the same block case. + */ + if (in_head_block && !IS_ALIGNED(start, blocksize)) + need_truncate_head = true; + if (in_tail_block && !IS_ALIGNED(end + 1, blocksize)) + need_truncate_tail = true; + if (!need_truncate_head && !need_truncate_tail) goto out; - block_start = round_down(from, blocksize); + block_start = round_down(offset, blocksize); block_end = block_start + blocksize - 1; ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, @@ -4849,10 +4931,13 @@ again: folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); if (IS_ERR(folio)) { - btrfs_delalloc_release_space(inode, data_reserved, block_start, - blocksize, true); + if (only_release_metadata) + btrfs_delalloc_release_metadata(inode, blocksize, true); + else + btrfs_delalloc_release_space(inode, data_reserved, + block_start, blocksize, true); btrfs_delalloc_release_extents(inode, blocksize); - ret = -ENOMEM; + ret = PTR_ERR(folio); goto out; } @@ -4882,11 +4967,11 @@ again: folio_wait_writeback(folio); - lock_extent(io_tree, block_start, block_end, &cached_state); + btrfs_lock_extent(io_tree, block_start, block_end, &cached_state); ordered = btrfs_lookup_ordered_extent(inode, block_start); if (ordered) { - unlock_extent(io_tree, block_start, block_end, &cached_state); + btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); folio_unlock(folio); folio_put(folio); btrfs_start_ordered_extent(ordered); @@ -4894,37 +4979,46 @@ again: goto again; } - clear_extent_bit(&inode->io_tree, block_start, block_end, - EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, - &cached_state); + btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end, + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, + &cached_state); ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, &cached_state); if (ret) { - unlock_extent(io_tree, block_start, block_end, &cached_state); + btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); goto out_unlock; } - if (offset != blocksize) { - if (!len) - len = blocksize - offset; - if (front) - folio_zero_range(folio, block_start - folio_pos(folio), - offset); - else - folio_zero_range(folio, - (block_start - folio_pos(folio)) + offset, - len); + if (end == (u64)-1) { + /* + * We're truncating beyond EOF, the remaining blocks normally are + * already holes thus no need to zero again, but it's possible for + * fs block size < page size cases to have memory mapped writes + * to pollute ranges beyond EOF. + * + * In that case although such polluted blocks beyond EOF will + * not reach disk, it still affects our page caches. + */ + zero_start = max_t(u64, folio_pos(folio), start); + zero_end = min_t(u64, folio_pos(folio) + folio_size(folio) - 1, + end); + } else { + zero_start = max_t(u64, block_start, start); + zero_end = min_t(u64, block_end, end); } + folio_zero_range(folio, zero_start - folio_pos(folio), + zero_end - zero_start + 1); + btrfs_folio_clear_checked(fs_info, folio, block_start, block_end + 1 - block_start); btrfs_folio_set_dirty(fs_info, folio, block_start, block_end + 1 - block_start); - unlock_extent(io_tree, block_start, block_end, &cached_state); + btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state); if (only_release_metadata) - set_extent_bit(&inode->io_tree, block_start, block_end, - EXTENT_NORESERVE, NULL); + btrfs_set_extent_bit(&inode->io_tree, block_start, block_end, + EXTENT_NORESERVE, NULL); out_unlock: if (ret) { @@ -5017,7 +5111,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) * rest of the block before we expand the i_size, otherwise we could * expose stale data. */ - ret = btrfs_truncate_block(inode, oldsize, 0, 0); + ret = btrfs_truncate_block(inode, oldsize, oldsize, -1); if (ret) return ret; @@ -5034,7 +5128,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) em = NULL; break; } - last_byte = min(extent_map_end(em), block_end); + last_byte = min(btrfs_extent_map_end(em), block_end); last_byte = ALIGN(last_byte, fs_info->sectorsize); hole_size = last_byte - cur_offset; @@ -5050,7 +5144,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) if (ret) break; - hole_em = alloc_extent_map(); + hole_em = btrfs_alloc_extent_map(); if (!hole_em) { btrfs_drop_extent_map_range(inode, cur_offset, cur_offset + hole_size - 1, @@ -5067,7 +5161,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) hole_em->generation = btrfs_get_fs_generation(fs_info); ret = btrfs_replace_extent_map_range(inode, hole_em, true); - free_extent_map(hole_em); + btrfs_free_extent_map(hole_em); } else { ret = btrfs_inode_set_file_extent_range(inode, cur_offset, hole_size); @@ -5075,14 +5169,14 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size) break; } next: - free_extent_map(em); + btrfs_free_extent_map(em); em = NULL; cur_offset = last_byte; if (cur_offset >= block_end) break; } - free_extent_map(em); - unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); + btrfs_free_extent_map(em); + btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); return ret; } @@ -5266,7 +5360,7 @@ static void evict_inode_truncate_pages(struct inode *inode) state_flags = state->state; spin_unlock(&io_tree->lock); - lock_extent(io_tree, start, end, &cached_state); + btrfs_lock_extent(io_tree, start, end, &cached_state); /* * If still has DELALLOC flag, the extent didn't reach disk, @@ -5280,9 +5374,9 @@ static void evict_inode_truncate_pages(struct inode *inode) btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start, end - start + 1, NULL); - clear_extent_bit(io_tree, start, end, - EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, - &cached_state); + btrfs_clear_extent_bit(io_tree, start, end, + EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING, + &cached_state); cond_resched(); spin_lock(&io_tree->lock); @@ -5468,7 +5562,7 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, struct btrfs_key *location, u8 *type) { struct btrfs_dir_item *di; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_root *root = dir->root; int ret = 0; struct fscrypt_name fname; @@ -5479,7 +5573,7 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); if (ret < 0) - goto out; + return ret; /* * fscrypt_setup_filename() should never return a positive value, but * gcc on sparc/parisc thinks it can, so assert that doesn't happen. @@ -5508,7 +5602,6 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry, *type = btrfs_dir_ftype(path->nodes[0], di); out: fscrypt_free_filename(&fname); - btrfs_free_path(path); return ret; } @@ -5523,7 +5616,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, struct btrfs_key *location, struct btrfs_root **sub_root) { - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct btrfs_root *new_root; struct btrfs_root_ref *ref; struct extent_buffer *leaf; @@ -5579,7 +5672,6 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, location->offset = 0; err = 0; out: - btrfs_free_path(path); fscrypt_free_filename(&fname); return err; } @@ -5858,7 +5950,7 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode) { struct btrfs_root *root = inode->root; struct btrfs_key key, found_key; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct extent_buffer *leaf; int ret; @@ -5872,15 +5964,14 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode) ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) - goto out; + return ret; /* FIXME: we should be able to handle this */ if (ret == 0) - goto out; - ret = 0; + return ret; if (path->slots[0] == 0) { inode->index_cnt = BTRFS_DIR_START_INDEX; - goto out; + return 0; } path->slots[0]--; @@ -5891,13 +5982,12 @@ static int btrfs_set_inode_index_count(struct btrfs_inode *inode) if (found_key.objectid != btrfs_ino(inode) || found_key.type != BTRFS_DIR_INDEX_KEY) { inode->index_cnt = BTRFS_DIR_START_INDEX; - goto out; + return 0; } inode->index_cnt = found_key.offset + 1; -out: - btrfs_free_path(path); - return ret; + + return 0; } static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) @@ -6000,7 +6090,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) struct btrfs_dir_item *di; struct btrfs_key key; struct btrfs_key found_key; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); void *addr; LIST_HEAD(ins_list); LIST_HEAD(del_list); @@ -6113,7 +6203,6 @@ nopos: err: if (put) btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list); - btrfs_free_path(path); return ret; } @@ -6903,18 +6992,18 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, struct extent_map_tree *em_tree = &inode->extent_tree; read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, start, len); + em = btrfs_lookup_extent_mapping(em_tree, start, len); read_unlock(&em_tree->lock); if (em) { if (em->start > start || em->start + em->len <= start) - free_extent_map(em); + btrfs_free_extent_map(em); else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio) - free_extent_map(em); + btrfs_free_extent_map(em); else goto out; } - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { ret = -ENOMEM; goto out; @@ -7051,7 +7140,7 @@ not_found: insert: ret = 0; btrfs_release_path(path); - if (em->start > start || extent_map_end(em) <= start) { + if (em->start > start || btrfs_extent_map_end(em) <= start) { btrfs_err(fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]", em->start, em->len, start, len); @@ -7068,7 +7157,7 @@ out: trace_btrfs_get_extent(root, inode, em); if (ret) { - free_extent_map(em); + btrfs_free_extent_map(em); return ERR_PTR(ret); } return em; @@ -7112,7 +7201,7 @@ noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len, struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct can_nocow_file_extent_args nocow_args = { 0 }; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); int ret; struct extent_buffer *leaf; struct extent_io_tree *io_tree = &inode->io_tree; @@ -7128,13 +7217,12 @@ noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len, ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), offset, 0); if (ret < 0) - goto out; + return ret; if (ret == 1) { if (path->slots[0] == 0) { - /* can't find the item, must cow */ - ret = 0; - goto out; + /* Can't find the item, must COW. */ + return 0; } path->slots[0]--; } @@ -7143,17 +7231,17 @@ noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len, btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) { - /* not our file or wrong item type, must cow */ - goto out; + /* Not our file or wrong item type, must COW. */ + return 0; } if (key.offset > offset) { - /* Wrong offset, must cow */ - goto out; + /* Wrong offset, must COW. */ + return 0; } if (btrfs_file_extent_end(path) <= offset) - goto out; + return 0; fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(leaf, fi); @@ -7168,15 +7256,13 @@ noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len, if (ret != 1) { /* Treat errors as not being able to NOCOW. */ - ret = 0; - goto out; + return 0; } - ret = 0; if (btrfs_extent_readonly(fs_info, nocow_args.file_extent.disk_bytenr + nocow_args.file_extent.offset)) - goto out; + return 0; if (!(inode->flags & BTRFS_INODE_NODATACOW) && found_type == BTRFS_FILE_EXTENT_PREALLOC) { @@ -7184,21 +7270,18 @@ noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len, range_end = round_up(offset + nocow_args.file_extent.num_bytes, root->fs_info->sectorsize) - 1; - ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC); - if (ret) { - ret = -EAGAIN; - goto out; - } + ret = btrfs_test_range_bit_exists(io_tree, offset, range_end, + EXTENT_DELALLOC); + if (ret) + return -EAGAIN; } if (file_extent) memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent)); *len = nocow_args.file_extent.num_bytes; - ret = 1; -out: - btrfs_free_path(path); - return ret; + + return 1; } /* The callers of this must take lock_extent() */ @@ -7246,7 +7329,7 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start, break; } - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) return ERR_PTR(-ENOMEM); @@ -7259,15 +7342,15 @@ struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start, em->offset = file_extent->offset; em->flags |= EXTENT_FLAG_PINNED; if (type == BTRFS_ORDERED_COMPRESSED) - extent_map_set_compression(em, file_extent->compression); + btrfs_extent_map_set_compression(em, file_extent->compression); ret = btrfs_replace_extent_map_range(inode, em, true); if (ret) { - free_extent_map(em); + btrfs_free_extent_map(em); return ERR_PTR(ret); } - /* em got 2 refs now, callers needs to do free_extent_map once. */ + /* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */ return em; } @@ -7394,7 +7477,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset, } if (!inode_evicting) - lock_extent(tree, page_start, page_end, &cached_state); + btrfs_lock_extent(tree, page_start, page_end, &cached_state); cur = page_start; while (cur < page_end) { @@ -7450,10 +7533,10 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset, * btrfs_finish_ordered_io(). */ if (!inode_evicting) - clear_extent_bit(tree, cur, range_end, - EXTENT_DELALLOC | - EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | - EXTENT_DEFRAG, &cached_state); + btrfs_clear_extent_bit(tree, cur, range_end, + EXTENT_DELALLOC | + EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | + EXTENT_DEFRAG, &cached_state); spin_lock_irq(&inode->ordered_tree_lock); set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); @@ -7495,12 +7578,11 @@ next: * Since the IO will never happen for this page. */ btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL); - if (!inode_evicting) { - clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | - EXTENT_DELALLOC | EXTENT_UPTODATE | - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG | - extra_flags, &cached_state); - } + if (!inode_evicting) + btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED | + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | + EXTENT_DEFRAG | extra_flags, + &cached_state); cur = range_end + 1; } /* @@ -7604,7 +7686,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); control.new_size = new_size; - lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); + btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); /* * We want to drop from the next block forward in case this new * size is not block aligned since we will be keeping the last @@ -7619,7 +7701,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); btrfs_inode_safe_disk_i_size_write(inode, control.last_size); - unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); + btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); trans->block_rsv = &fs_info->trans_block_rsv; if (ret != -ENOSPC && ret != -EAGAIN) @@ -7663,7 +7745,8 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback) btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); - ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); + ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, + inode->vfs_inode.i_size, (u64)-1); if (ret) goto out; trans = btrfs_start_transaction(root, 1); @@ -7775,10 +7858,10 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->i_otime_nsec = 0; inode = &ei->vfs_inode; - extent_map_tree_init(&ei->extent_tree); + btrfs_extent_map_tree_init(&ei->extent_tree); /* This io tree sets the valid inode. */ - extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); + btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); ei->io_tree.inode = ei; ei->file_extent_tree = NULL; @@ -8555,7 +8638,7 @@ static int start_delalloc_inodes(struct btrfs_root *root, struct btrfs_inode *inode; struct inode *tmp_inode; - inode = list_entry(splice.next, struct btrfs_inode, delalloc_inodes); + inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes); list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes); @@ -8919,11 +9002,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_free_reserved_extent(fs_info, ins.objectid, - ins.offset, 0); + ins.offset, false); break; } - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset, cur_offset + ins.offset - 1, false); @@ -8941,7 +9024,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, em->generation = trans->transid; ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true); - free_extent_map(em); + btrfs_free_extent_map(em); next: num_bytes -= ins.offset; cur_offset += ins.offset; @@ -9113,7 +9196,7 @@ static ssize_t btrfs_encoded_read_inline( struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct extent_io_tree *io_tree = &inode->io_tree; - struct btrfs_path *path; + BTRFS_PATH_AUTO_FREE(path); struct extent_buffer *leaf; struct btrfs_file_extent_item *item; u64 ram_bytes; @@ -9123,10 +9206,8 @@ static ssize_t btrfs_encoded_read_inline( const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } + if (!path) + return -ENOMEM; path->nowait = nowait; @@ -9135,9 +9216,9 @@ static ssize_t btrfs_encoded_read_inline( if (ret) { if (ret > 0) { /* The extent item disappeared? */ - ret = -EIO; + return -EIO; } - goto out; + return ret; } leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); @@ -9150,17 +9231,16 @@ static ssize_t btrfs_encoded_read_inline( ret = btrfs_encoded_io_compression_from_extent(fs_info, btrfs_file_extent_compression(leaf, item)); if (ret < 0) - goto out; + return ret; encoded->compression = ret; if (encoded->compression) { size_t inline_size; inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); - if (inline_size > count) { - ret = -ENOBUFS; - goto out; - } + if (inline_size > count) + return -ENOBUFS; + count = inline_size; encoded->unencoded_len = ram_bytes; encoded->unencoded_offset = iocb->ki_pos - extent_start; @@ -9172,13 +9252,12 @@ static ssize_t btrfs_encoded_read_inline( } tmp = kmalloc(count, GFP_NOFS); - if (!tmp) { - ret = -ENOMEM; - goto out; - } + if (!tmp) + return -ENOMEM; + read_extent_buffer(leaf, tmp, ptr, count); btrfs_release_path(path); - unlock_extent(io_tree, start, lockend, cached_state); + btrfs_unlock_extent(io_tree, start, lockend, cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); *unlocked = true; @@ -9186,8 +9265,7 @@ static ssize_t btrfs_encoded_read_inline( if (ret != count) ret = -EFAULT; kfree(tmp); -out: - btrfs_free_path(path); + return ret; } @@ -9327,7 +9405,7 @@ ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter, if (ret) goto out; - unlock_extent(io_tree, start, lockend, cached_state); + btrfs_unlock_extent(io_tree, start, lockend, cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); *unlocked = true; @@ -9404,7 +9482,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, goto out_unlock_inode; } - if (!try_lock_extent(io_tree, start, lockend, cached_state)) { + if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) { ret = -EAGAIN; goto out_unlock_inode; } @@ -9413,7 +9491,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, lockend - start + 1); if (ordered) { btrfs_put_ordered_extent(ordered); - unlock_extent(io_tree, start, lockend, cached_state); + btrfs_unlock_extent(io_tree, start, lockend, cached_state); ret = -EAGAIN; goto out_unlock_inode; } @@ -9426,13 +9504,13 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, if (ret) goto out_unlock_inode; - lock_extent(io_tree, start, lockend, cached_state); + btrfs_lock_extent(io_tree, start, lockend, cached_state); ordered = btrfs_lookup_ordered_range(inode, start, lockend - start + 1); if (!ordered) break; btrfs_put_ordered_extent(ordered); - unlock_extent(io_tree, start, lockend, cached_state); + btrfs_unlock_extent(io_tree, start, lockend, cached_state); cond_resched(); } } @@ -9450,7 +9528,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, * For inline extents we get everything we need out of the * extent item. */ - free_extent_map(em); + btrfs_free_extent_map(em); em = NULL; ret = btrfs_encoded_read_inline(iocb, iter, start, lockend, cached_state, extent_start, @@ -9462,7 +9540,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, * We only want to return up to EOF even if the extent extends beyond * that. */ - encoded->len = min_t(u64, extent_map_end(em), + encoded->len = min_t(u64, btrfs_extent_map_end(em), inode->vfs_inode.i_size) - iocb->ki_pos; if (em->disk_bytenr == EXTENT_MAP_HOLE || (em->flags & EXTENT_FLAG_PREALLOC)) { @@ -9470,7 +9548,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, count = min_t(u64, count, encoded->len); encoded->len = count; encoded->unencoded_len = count; - } else if (extent_map_is_compressed(em)) { + } else if (btrfs_extent_map_is_compressed(em)) { *disk_bytenr = em->disk_bytenr; /* * Bail if the buffer isn't large enough to return the whole @@ -9485,12 +9563,12 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, encoded->unencoded_len = em->ram_bytes; encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset); ret = btrfs_encoded_io_compression_from_extent(fs_info, - extent_map_compression(em)); + btrfs_extent_map_compression(em)); if (ret < 0) goto out_em; encoded->compression = ret; } else { - *disk_bytenr = extent_map_block_start(em) + (start - em->start); + *disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start); if (encoded->len > count) encoded->len = count; /* @@ -9503,11 +9581,11 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, encoded->unencoded_len = count; *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize); } - free_extent_map(em); + btrfs_free_extent_map(em); em = NULL; if (*disk_bytenr == EXTENT_MAP_HOLE) { - unlock_extent(io_tree, start, lockend, cached_state); + btrfs_unlock_extent(io_tree, start, lockend, cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); unlocked = true; ret = iov_iter_zero(count, iter); @@ -9519,11 +9597,11 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, } out_em: - free_extent_map(em); + btrfs_free_extent_map(em); out_unlock_extent: /* Leave inode and extent locked if we need to do a read. */ if (!unlocked && ret != -EIOCBQUEUED) - unlock_extent(io_tree, start, lockend, cached_state); + btrfs_unlock_extent(io_tree, start, lockend, cached_state); out_unlock_inode: if (!unlocked && ret != -EIOCBQUEUED) btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); @@ -9670,14 +9748,14 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, end >> PAGE_SHIFT); if (ret) goto out_folios; - lock_extent(io_tree, start, end, &cached_state); + btrfs_lock_extent(io_tree, start, end, &cached_state); ordered = btrfs_lookup_ordered_range(inode, start, num_bytes); if (!ordered && !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) break; if (ordered) btrfs_put_ordered_extent(ordered); - unlock_extent(io_tree, start, end, &cached_state); + btrfs_unlock_extent(io_tree, start, end, &cached_state); cond_resched(); } @@ -9727,11 +9805,11 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, ret = PTR_ERR(em); goto out_free_reserved; } - free_extent_map(em); + btrfs_free_extent_map(em); ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent, - (1 << BTRFS_ORDERED_ENCODED) | - (1 << BTRFS_ORDERED_COMPRESSED)); + (1U << BTRFS_ORDERED_ENCODED) | + (1U << BTRFS_ORDERED_COMPRESSED)); if (IS_ERR(ordered)) { btrfs_drop_extent_map_range(inode, start, end, false); ret = PTR_ERR(ordered); @@ -9742,7 +9820,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, if (start + encoded->len > inode->vfs_inode.i_size) i_size_write(&inode->vfs_inode, start + encoded->len); - unlock_extent(io_tree, start, end, &cached_state); + btrfs_unlock_extent(io_tree, start, end, &cached_state); btrfs_delalloc_release_extents(inode, num_bytes); @@ -9752,7 +9830,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, out_free_reserved: btrfs_dec_block_group_reservations(fs_info, ins.objectid); - btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); + btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); out_delalloc_release: btrfs_delalloc_release_extents(inode, num_bytes); btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0); @@ -9765,9 +9843,9 @@ out_free_data_space: * bytes_may_use. */ if (!extent_reserved) - btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes); + btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes); out_unlock: - unlock_extent(io_tree, start, end, &cached_state); + btrfs_unlock_extent(io_tree, start, end, &cached_state); out_folios: for (i = 0; i < nr_folios; i++) { if (folios[i]) @@ -10032,7 +10110,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); - lock_extent(io_tree, 0, isize - 1, &cached_state); + btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state); while (prev_extent_end < isize) { struct btrfs_key key; struct extent_buffer *leaf; @@ -10210,7 +10288,7 @@ out: if (!IS_ERR_OR_NULL(map)) btrfs_free_chunk_map(map); - unlock_extent(io_tree, 0, isize - 1, &cached_state); + btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state); if (ret) btrfs_swap_deactivate(file); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 63aeacc54945..913acef3f0a9 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -909,7 +909,7 @@ static noinline int btrfs_mksubvol(const struct path *parent, if (error == -EINTR) return error; - dentry = lookup_one(idmap, name, parent->dentry, namelen); + dentry = lookup_one(idmap, &QSTR_LEN(name, namelen), parent->dentry); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; @@ -1446,8 +1446,8 @@ out: return ret; } -static noinline int key_in_sk(const struct btrfs_key *key, - const struct btrfs_ioctl_search_key *sk) +static noinline bool key_in_sk(const struct btrfs_key *key, + const struct btrfs_ioctl_search_key *sk) { struct btrfs_key test; int ret; @@ -1458,7 +1458,7 @@ static noinline int key_in_sk(const struct btrfs_key *key, ret = btrfs_comp_cpu_keys(key, &test); if (ret < 0) - return 0; + return false; test.objectid = sk->max_objectid; test.type = sk->max_type; @@ -1466,8 +1466,8 @@ static noinline int key_in_sk(const struct btrfs_key *key, ret = btrfs_comp_cpu_keys(key, &test); if (ret > 0) - return 0; - return 1; + return false; + return true; } static noinline int copy_to_sk(struct btrfs_path *path, @@ -2288,7 +2288,6 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL; struct mnt_idmap *idmap = file_mnt_idmap(file); char *subvol_name, *subvol_name_ptr = NULL; - int subvol_namelen; int ret = 0; bool destroy_parent = false; @@ -2411,10 +2410,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, goto out; } - subvol_namelen = strlen(subvol_name); - if (strchr(subvol_name, '/') || - strncmp(subvol_name, "..", subvol_namelen) == 0) { + strcmp(subvol_name, "..") == 0) { ret = -EINVAL; goto free_subvol_name; } @@ -2427,7 +2424,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, ret = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); if (ret == -EINTR) goto free_subvol_name; - dentry = lookup_one(idmap, subvol_name, parent, subvol_namelen); + dentry = lookup_one(idmap, &QSTR(subvol_name), parent); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto out_unlock_dir; @@ -4510,7 +4507,7 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp, args.compression, &unlocked); if (!unlocked) { - unlock_extent(io_tree, start, lockend, &cached_state); + btrfs_unlock_extent(io_tree, start, lockend, &cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); } } @@ -4699,7 +4696,7 @@ static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int iss ret = priv->count; out: - unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state); + btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); io_uring_cmd_done(cmd, ret, 0, issue_flags); @@ -4788,7 +4785,7 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter, return -EIOCBQUEUED; out_fail: - unlock_extent(io_tree, start, lockend, &cached_state); + btrfs_unlock_extent(io_tree, start, lockend, &cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); kfree(priv); return ret; @@ -4913,7 +4910,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue (const char *)&data->args + copy_end_kernel, sizeof(data->args) - copy_end_kernel)) { if (ret == -EIOCBQUEUED) { - unlock_extent(io_tree, start, lockend, &cached_state); + btrfs_unlock_extent(io_tree, start, lockend, &cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); } ret = -EFAULT; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 81e62b652e21..a3e6d9616e60 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -149,15 +149,15 @@ void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesti /* * Try-lock for read. * - * Return 1 if the rwlock has been taken, 0 otherwise + * Return true if the rwlock has been taken, false otherwise */ -int btrfs_try_tree_read_lock(struct extent_buffer *eb) +bool btrfs_try_tree_read_lock(struct extent_buffer *eb) { if (down_read_trylock(&eb->lock)) { trace_btrfs_try_tree_read_lock(eb); - return 1; + return true; } - return 0; + return false; } /* diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index c69e57ff804b..af29df98ac14 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -189,7 +189,7 @@ static inline void btrfs_tree_read_lock(struct extent_buffer *eb) } void btrfs_tree_read_unlock(struct extent_buffer *eb); -int btrfs_try_tree_read_lock(struct extent_buffer *eb); +bool btrfs_try_tree_read_lock(struct extent_buffer *eb); struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root); struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index a45bc11f8665..d403641889ca 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -252,9 +252,8 @@ int lzo_compress_folios(struct list_head *ws, struct address_space *mapping, /* Compress at most one sector of data each time */ in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off); ASSERT(in_len); - data_in = kmap_local_folio(folio_in, 0); - ret = lzo1x_1_compress(data_in + - offset_in_page(cur_in), in_len, + data_in = kmap_local_folio(folio_in, offset_in_folio(folio_in, cur_in)); + ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, &out_len, workspace->mem); kunmap_local(data_in); diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h index 08a9272399d2..6abf81bb00c2 100644 --- a/fs/btrfs/messages.h +++ b/fs/btrfs/messages.h @@ -4,6 +4,7 @@ #define BTRFS_MESSAGES_H #include <linux/types.h> +#include <linux/types.h> #include <linux/printk.h> #include <linux/bug.h> @@ -170,15 +171,83 @@ do { \ #ifdef CONFIG_BTRFS_ASSERT -#define btrfs_assertfail(expr, file, line) ({ \ - pr_err("assertion failed: %s, in %s:%d\n", (expr), (file), (line)); \ - BUG(); \ -}) +__printf(1, 2) +static inline void verify_assert_printk_format(const char *fmt, ...) { + /* Stub to verify the assertion format string. */ +} + +/* Take the first token if any. */ +#define __FIRST_ARG(_, ...) _ +/* + * Skip the first token and return the rest, if it's empty the comma is dropped. + * As ##__VA_ARGS__ cannot be at the beginning of the macro the __VA_OPT__ is needed + * and supported since GCC 8 and Clang 12. + */ +#define __REST_ARGS(_, ... ) __VA_OPT__(,) __VA_ARGS__ + +#if defined(CONFIG_CC_IS_CLANG) || GCC_VERSION >= 80000 +/* + * Assertion with optional printk() format. + * + * Accepted syntax: + * ASSERT(condition); + * ASSERT(condition, "string"); + * ASSERT(condition, "variable=%d", variable); + * + * How it works: + * - if there's no format string, ""[0] evaluates at compile time to 0 and the + * true branch is executed + * - any non-empty format string with the "" prefix evaluates to != 0 at + * compile time and the false branch is executed + * - stringified condition is printed as %s so we don't accidentally mix format + * strings (the % operator) + * - there can be only one printk() call, so the format strings and arguments are + * spliced together: + * DEFAULT_FMT [USER_FMT], DEFAULT_ARGS [, USER_ARGS] + * - comma between DEFAULT_ARGS and USER_ARGS is handled by preprocessor + * (requires __VA_OPT__ support) + * - otherwise we could use __VA_OPT(,) __VA_ARGS__ for the 2nd+ argument of args, + */ +#define ASSERT(cond, args...) \ +do { \ + verify_assert_printk_format("check the format string" args); \ + if (!likely(cond)) { \ + if (("" __FIRST_ARG(args) [0]) == 0) { \ + pr_err("assertion failed: %s :: %ld, in %s:%d\n", \ + #cond, (long)(cond), __FILE__, __LINE__); \ + } else { \ + pr_err("assertion failed: %s :: %ld, in %s:%d (" __FIRST_ARG(args) ")\n", \ + #cond, (long)(cond), __FILE__, __LINE__ __REST_ARGS(args)); \ + } \ + BUG(); \ + } \ +} while(0) + +#else + +/* For GCC < 8.x only the simple output. */ + +#define ASSERT(cond, args...) \ +do { \ + verify_assert_printk_format("check the format string" args); \ + if (!likely(cond)) { \ + pr_err("assertion failed: %s :: %ld, in %s:%d\n", \ + #cond, (long)(cond), __FILE__, __LINE__); \ + BUG(); \ + } \ +} while(0) + +#endif + +#else +#define ASSERT(cond, args...) (void)(cond) +#endif -#define ASSERT(expr) \ - (likely(expr) ? (void)0 : btrfs_assertfail(#expr, __FILE__, __LINE__)) +#ifdef CONFIG_BTRFS_DEBUG +/* Verbose warning only under debug build. */ +#define DEBUG_WARN(args...) WARN(1, KERN_ERR args) #else -#define ASSERT(expr) (void)(expr) +#define DEBUG_WARN(...) do {} while(0) #endif __printf(5, 6) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 03c945711003..9212ce110cde 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -153,25 +153,30 @@ static struct btrfs_ordered_extent *alloc_ordered_extent( struct btrfs_ordered_extent *entry; int ret; u64 qgroup_rsv = 0; + const bool is_nocow = (flags & + ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC))); - if (flags & - ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) { - /* For nocow write, we can release the qgroup rsv right now */ + /* + * For a NOCOW write we can free the qgroup reserve right now. For a COW + * one we transfer the reserved space from the inode's iotree into the + * ordered extent by calling btrfs_qgroup_release_data() and tracking + * the qgroup reserved amount in the ordered extent, so that later after + * completing the ordered extent, when running the data delayed ref it + * creates, we free the reserved data with btrfs_qgroup_free_refroot(). + */ + if (is_nocow) ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv); - if (ret < 0) - return ERR_PTR(ret); - } else { - /* - * The ordered extent has reserved qgroup space, release now - * and pass the reserved number for qgroup_record to free. - */ + else ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv); - if (ret < 0) - return ERR_PTR(ret); - } + + if (ret < 0) + return ERR_PTR(ret); + entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); - if (!entry) - return ERR_PTR(-ENOMEM); + if (!entry) { + entry = ERR_PTR(-ENOMEM); + goto out; + } entry->file_offset = file_offset; entry->num_bytes = num_bytes; @@ -180,7 +185,12 @@ static struct btrfs_ordered_extent *alloc_ordered_extent( entry->disk_num_bytes = disk_num_bytes; entry->offset = offset; entry->bytes_left = num_bytes; - entry->inode = BTRFS_I(igrab(&inode->vfs_inode)); + if (WARN_ON_ONCE(!igrab(&inode->vfs_inode))) { + kmem_cache_free(btrfs_ordered_extent_cache, entry); + entry = ERR_PTR(-ESTALE); + goto out; + } + entry->inode = inode; entry->compress_type = compress_type; entry->truncated_len = (u64)-1; entry->qgroup_rsv = qgroup_rsv; @@ -203,6 +213,12 @@ static struct btrfs_ordered_extent *alloc_ordered_extent( btrfs_mod_outstanding_extents(inode, 1); spin_unlock(&inode->lock); +out: + if (IS_ERR(entry) && !is_nocow) + btrfs_qgroup_free_refroot(inode->root->fs_info, + btrfs_root_id(inode->root), + qgroup_rsv, BTRFS_QGROUP_RSV_DATA); + return entry; } @@ -253,7 +269,7 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry) * @disk_bytenr: Offset of extent on disk. * @disk_num_bytes: Size of extent on disk. * @offset: Offset into unencoded data where file data starts. - * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*). + * @flags: Flags specifying type of extent (1U << BTRFS_ORDERED_*). * @compress_type: Compression algorithm used for data. * * Most of these parameters correspond to &struct btrfs_file_extent_item. The @@ -607,23 +623,18 @@ out: */ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) { - struct list_head *cur; - struct btrfs_ordered_sum *sum; - trace_btrfs_ordered_extent_put(entry->inode, entry); if (refcount_dec_and_test(&entry->refs)) { + struct btrfs_ordered_sum *sum; + struct btrfs_ordered_sum *tmp; + ASSERT(list_empty(&entry->root_extent_list)); ASSERT(list_empty(&entry->log_list)); ASSERT(RB_EMPTY_NODE(&entry->rb_node)); - if (entry->inode) - btrfs_add_delayed_iput(entry->inode); - while (!list_empty(&entry->list)) { - cur = entry->list.next; - sum = list_entry(cur, struct btrfs_ordered_sum, list); - list_del(&sum->list); + btrfs_add_delayed_iput(entry->inode); + list_for_each_entry_safe(sum, tmp, &entry->list, list) kvfree(sum); - } kmem_cache_free(btrfs_ordered_extent_cache, entry); } } @@ -1173,7 +1184,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, cachedp = cached_state; while (1) { - lock_extent(&inode->io_tree, start, end, cachedp); + btrfs_lock_extent(&inode->io_tree, start, end, cachedp); ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1); if (!ordered) { @@ -1186,7 +1197,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, refcount_dec(&cache->refs); break; } - unlock_extent(&inode->io_tree, start, end, cachedp); + btrfs_unlock_extent(&inode->io_tree, start, end, cachedp); btrfs_start_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); } @@ -1204,7 +1215,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end, { struct btrfs_ordered_extent *ordered; - if (!try_lock_extent(&inode->io_tree, start, end, cached_state)) + if (!btrfs_try_lock_extent(&inode->io_tree, start, end, cached_state)) return false; ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1); @@ -1212,7 +1223,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end, return true; btrfs_put_ordered_extent(ordered); - unlock_extent(&inode->io_tree, start, end, cached_state); + btrfs_unlock_extent(&inode->io_tree, start, end, cached_state); return false; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index d6fa36674270..b3176edbde82 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -83,7 +83,7 @@ static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup, u64 num_bytes, enum btrfs_qgroup_rsv_type type) { - trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); + trace_btrfs_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); qgroup->rsv.values[type] += num_bytes; } @@ -91,7 +91,7 @@ static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup, u64 num_bytes, enum btrfs_qgroup_rsv_type type) { - trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); + trace_btrfs_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); if (qgroup->rsv.values[type] >= num_bytes) { qgroup->rsv.values[type] -= num_bytes; return; @@ -1823,7 +1823,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] || qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] || qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) { - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN(); btrfs_warn_rl(fs_info, "to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu", btrfs_qgroup_level(qgroup->qgroupid), @@ -1843,7 +1843,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) { if (qgroup->rfer || qgroup->excl || qgroup->rfer_cmpr || qgroup->excl_cmpr) { - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN(); btrfs_warn_rl(fs_info, "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu", btrfs_qgroup_level(qgroup->qgroupid), @@ -2837,8 +2837,8 @@ static void qgroup_update_counters(struct btrfs_fs_info *fs_info, cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); - trace_qgroup_update_counters(fs_info, qg, cur_old_count, - cur_new_count); + trace_btrfs_qgroup_update_counters(fs_info, qg, cur_old_count, + cur_new_count); /* Rfer update part */ if (cur_old_count == 0 && cur_new_count > 0) { @@ -3100,8 +3100,7 @@ cleanup: kfree(record); } - trace_qgroup_num_dirty_extents(fs_info, trans->transid, - num_dirty_extents); + trace_btrfs_qgroup_num_dirty_extents(fs_info, trans->transid, num_dirty_extents); return ret; } @@ -4129,8 +4128,8 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode, * Now the entry is in [start, start + len), revert the * EXTENT_QGROUP_RESERVED bit. */ - clear_ret = clear_extent_bits(&inode->io_tree, entry_start, - entry_end, EXTENT_QGROUP_RESERVED); + clear_ret = btrfs_clear_extent_bits(&inode->io_tree, entry_start, + entry_end, EXTENT_QGROUP_RESERVED); if (!ret && clear_ret < 0) ret = clear_ret; @@ -4232,8 +4231,9 @@ static int qgroup_reserve_data(struct btrfs_inode *inode, reserved = *reserved_ret; /* Record already reserved space */ orig_reserved = reserved->bytes_changed; - ret = set_record_extent_bits(&inode->io_tree, start, - start + len -1, EXTENT_QGROUP_RESERVED, reserved); + ret = btrfs_set_record_extent_bits(&inode->io_tree, start, + start + len - 1, EXTENT_QGROUP_RESERVED, + reserved); /* Newly reserved space */ to_reserve = reserved->bytes_changed - orig_reserved; @@ -4326,9 +4326,10 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode, * EXTENT_QGROUP_RESERVED, we won't double free. * So not need to rush. */ - ret = clear_record_extent_bits(&inode->io_tree, free_start, - free_start + free_len - 1, - EXTENT_QGROUP_RESERVED, &changeset); + ret = btrfs_clear_record_extent_bits(&inode->io_tree, free_start, + free_start + free_len - 1, + EXTENT_QGROUP_RESERVED, + &changeset); if (ret < 0) goto out; freed += changeset.bytes_changed; @@ -4352,9 +4353,9 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, int ret; if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) { - return clear_record_extent_bits(&inode->io_tree, start, - start + len - 1, - EXTENT_QGROUP_RESERVED, NULL); + return btrfs_clear_record_extent_bits(&inode->io_tree, start, + start + len - 1, + EXTENT_QGROUP_RESERVED, NULL); } /* In release case, we shouldn't have @reserved */ @@ -4362,8 +4363,8 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, if (free && reserved) return qgroup_free_reserved_data(inode, reserved, start, len, released); extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, - EXTENT_QGROUP_RESERVED, &changeset); + ret = btrfs_clear_record_extent_bits(&inode->io_tree, start, start + len - 1, + EXTENT_QGROUP_RESERVED, &changeset); if (ret < 0) goto out; @@ -4472,7 +4473,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, return 0; BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - trace_qgroup_meta_reserve(root, (s64)num_bytes, type); + trace_btrfs_qgroup_meta_reserve(root, (s64)num_bytes, type); ret = qgroup_reserve(root, num_bytes, enforce, type); if (ret < 0) return ret; @@ -4517,7 +4518,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) return; /* TODO: Update trace point to handle such free */ - trace_qgroup_meta_free_all_pertrans(root); + trace_btrfs_qgroup_meta_free_all_pertrans(root); /* Special value -1 means to free all reserved space */ btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1, BTRFS_QGROUP_RSV_META_PERTRANS); @@ -4539,7 +4540,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, */ num_bytes = sub_root_meta_rsv(root, num_bytes, type); BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); - trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); + trace_btrfs_qgroup_meta_reserve(root, -(s64)num_bytes, type); btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type); } @@ -4593,7 +4594,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) /* Same as btrfs_qgroup_free_meta_prealloc() */ num_bytes = sub_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PREALLOC); - trace_qgroup_meta_convert(root, num_bytes); + trace_btrfs_qgroup_meta_convert(root, num_bytes); qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes); if (!sb_rdonly(fs_info->sb)) add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS); @@ -4611,8 +4612,8 @@ void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) int ret; extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, - EXTENT_QGROUP_RESERVED, &changeset); + ret = btrfs_clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, + EXTENT_QGROUP_RESERVED, &changeset); WARN_ON(ret < 0); if (WARN_ON(changeset.bytes_changed)) { @@ -4766,7 +4767,7 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_root *subvol_root, * Marking qgroup inconsistent should be enough * for end users. */ - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN("duplicated but mismatched entry found"); ret = -EEXIST; } kfree(block); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index cdd373c27784..3ff2bedfb3a4 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -134,14 +134,17 @@ struct btrfs_stripe_hash_table { }; /* - * A bvec like structure to present a sector inside a page. - * - * Unlike bvec we don't need bvlen, as it's fixed to sectorsize. + * A structure to present a sector inside a page, the length is fixed to + * sectorsize; */ struct sector_ptr { - struct page *page; - unsigned int pgoff:24; - unsigned int uptodate:8; + /* + * Blocks from the bio list can still be highmem. + * So here we use physical address to present a page and the offset inside it. + */ + phys_addr_t paddr; + bool has_paddr; + bool uptodate; }; static void rmw_rbio_work(struct work_struct *work); @@ -200,8 +203,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) struct btrfs_stripe_hash_table *x; struct btrfs_stripe_hash *cur; struct btrfs_stripe_hash *h; - int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; - int i; + unsigned int num_entries = 1U << BTRFS_STRIPE_HASH_TABLE_BITS; if (info->stripe_hash_table) return 0; @@ -222,7 +224,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) h = table->table; - for (i = 0; i < num_entries; i++) { + for (unsigned int i = 0; i < num_entries; i++) { cur = h + i; INIT_LIST_HEAD(&cur->hash_list); spin_lock_init(&cur->lock); @@ -233,6 +235,14 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) return 0; } +static void memcpy_sectors(const struct sector_ptr *dst, + const struct sector_ptr *src, u32 blocksize) +{ + memcpy_page(phys_to_page(dst->paddr), offset_in_page(dst->paddr), + phys_to_page(src->paddr), offset_in_page(src->paddr), + blocksize); +} + /* * caching an rbio means to copy anything from the * bio_sectors array into the stripe_pages array. We @@ -253,7 +263,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) for (i = 0; i < rbio->nr_sectors; i++) { /* Some range not covered by bio (partial write), skip it */ - if (!rbio->bio_sectors[i].page) { + if (!rbio->bio_sectors[i].has_paddr) { /* * Even if the sector is not covered by bio, if it is * a data sector it should still be uptodate as it is @@ -264,12 +274,8 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) continue; } - ASSERT(rbio->stripe_sectors[i].page); - memcpy_page(rbio->stripe_sectors[i].page, - rbio->stripe_sectors[i].pgoff, - rbio->bio_sectors[i].page, - rbio->bio_sectors[i].pgoff, - rbio->bioc->fs_info->sectorsize); + memcpy_sectors(&rbio->stripe_sectors[i], &rbio->bio_sectors[i], + rbio->bioc->fs_info->sectorsize); rbio->stripe_sectors[i].uptodate = 1; } set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); @@ -326,8 +332,13 @@ static void index_stripe_sectors(struct btrfs_raid_bio *rbio) int page_index = offset >> PAGE_SHIFT; ASSERT(page_index < rbio->nr_pages); - rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; - rbio->stripe_sectors[i].pgoff = offset_in_page(offset); + if (!rbio->stripe_pages[page_index]) + continue; + + rbio->stripe_sectors[i].has_paddr = true; + rbio->stripe_sectors[i].paddr = + page_to_phys(rbio->stripe_pages[page_index]) + + offset_in_page(offset); } } @@ -507,9 +518,8 @@ static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) spin_lock(&table->cache_lock); while (!list_empty(&table->stripe_cache)) { - rbio = list_entry(table->stripe_cache.next, - struct btrfs_raid_bio, - stripe_cache); + rbio = list_first_entry(&table->stripe_cache, + struct btrfs_raid_bio, stripe_cache); __remove_rbio_from_cache(rbio); } spin_unlock(&table->cache_lock); @@ -567,9 +577,9 @@ static void cache_rbio(struct btrfs_raid_bio *rbio) if (table->cache_size > RBIO_CACHE_SIZE) { struct btrfs_raid_bio *found; - found = list_entry(table->stripe_cache.prev, - struct btrfs_raid_bio, - stripe_cache); + found = list_last_entry(&table->stripe_cache, + struct btrfs_raid_bio, + stripe_cache); if (found != rbio) __remove_rbio_from_cache(found); @@ -882,14 +892,14 @@ done_nolock: remove_rbio_from_cache(rbio); } -static void rbio_endio_bio_list(struct bio *cur, blk_status_t err) +static void rbio_endio_bio_list(struct bio *cur, blk_status_t status) { struct bio *next; while (cur) { next = cur->bi_next; cur->bi_next = NULL; - cur->bi_status = err; + cur->bi_status = status; bio_endio(cur); cur = next; } @@ -899,7 +909,7 @@ static void rbio_endio_bio_list(struct bio *cur, blk_status_t err) * this frees the rbio and runs through all the bios in the * bio_list and calls end_io on them */ -static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) +static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t status) { struct bio *cur = bio_list_get(&rbio->bio_list); struct bio *extra; @@ -928,9 +938,9 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) extra = bio_list_get(&rbio->bio_list); free_raid_bio(rbio); - rbio_endio_bio_list(cur, err); + rbio_endio_bio_list(cur, status); if (extra) - rbio_endio_bio_list(extra, err); + rbio_endio_bio_list(extra, status); } /* @@ -962,9 +972,9 @@ static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio, spin_lock(&rbio->bio_list_lock); sector = &rbio->bio_sectors[index]; - if (sector->page || bio_list_only) { + if (sector->has_paddr || bio_list_only) { /* Don't return sector without a valid page pointer */ - if (!sector->page) + if (!sector->has_paddr) sector = NULL; spin_unlock(&rbio->bio_list_lock); return sector; @@ -1142,7 +1152,7 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, rbio, stripe_nr); ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors, rbio, sector_nr); - ASSERT(sector->page); + ASSERT(sector->has_paddr); stripe = &rbio->bioc->stripes[stripe_nr]; disk_start = stripe->physical + sector_nr * sectorsize; @@ -1173,8 +1183,8 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, */ if (last_end == disk_start && !last->bi_status && last->bi_bdev == stripe->dev->bdev) { - ret = bio_add_page(last, sector->page, sectorsize, - sector->pgoff); + ret = bio_add_page(last, phys_to_page(sector->paddr), + sectorsize, offset_in_page(sector->paddr)); if (ret == sectorsize) return 0; } @@ -1187,7 +1197,8 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; bio->bi_private = rbio; - __bio_add_page(bio, sector->page, sectorsize, sector->pgoff); + __bio_add_page(bio, phys_to_page(sector->paddr), sectorsize, + offset_in_page(sector->paddr)); bio_list_add(bio_list, bio); return 0; } @@ -1195,23 +1206,20 @@ static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio) { const u32 sectorsize = rbio->bioc->fs_info->sectorsize; - struct bio_vec bvec; - struct bvec_iter iter; + const u32 sectorsize_bits = rbio->bioc->fs_info->sectorsize_bits; + struct bvec_iter iter = bio->bi_iter; u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - rbio->bioc->full_stripe_logical; - bio_for_each_segment(bvec, bio, iter) { - u32 bvec_offset; - - for (bvec_offset = 0; bvec_offset < bvec.bv_len; - bvec_offset += sectorsize, offset += sectorsize) { - int index = offset / sectorsize; - struct sector_ptr *sector = &rbio->bio_sectors[index]; + while (iter.bi_size) { + unsigned int index = (offset >> sectorsize_bits); + struct sector_ptr *sector = &rbio->bio_sectors[index]; + struct bio_vec bv = bio_iter_iovec(bio, iter); - sector->page = bvec.bv_page; - sector->pgoff = bvec.bv_offset + bvec_offset; - ASSERT(sector->pgoff < PAGE_SIZE); - } + sector->has_paddr = true; + sector->paddr = bvec_phys(&bv); + bio_advance_iter_single(bio, &iter, sectorsize); + offset += sectorsize; } } @@ -1289,6 +1297,15 @@ static void assert_rbio(struct btrfs_raid_bio *rbio) ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio); } +static inline void *kmap_local_sector(const struct sector_ptr *sector) +{ + /* The sector pointer must have a page mapped to it. */ + ASSERT(sector->has_paddr); + + return kmap_local_page(phys_to_page(sector->paddr)) + + offset_in_page(sector->paddr); +} + /* Generate PQ for one vertical stripe. */ static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr) { @@ -1301,14 +1318,13 @@ static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr) /* First collect one sector from each data stripe */ for (stripe = 0; stripe < rbio->nr_data; stripe++) { sector = sector_in_rbio(rbio, stripe, sectornr, 0); - pointers[stripe] = kmap_local_page(sector->page) + - sector->pgoff; + pointers[stripe] = kmap_local_sector(sector); } /* Then add the parity stripe */ sector = rbio_pstripe_sector(rbio, sectornr); sector->uptodate = 1; - pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff; + pointers[stripe++] = kmap_local_sector(sector); if (has_qstripe) { /* @@ -1317,8 +1333,7 @@ static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr) */ sector = rbio_qstripe_sector(rbio, sectornr); sector->uptodate = 1; - pointers[stripe++] = kmap_local_page(sector->page) + - sector->pgoff; + pointers[stripe++] = kmap_local_sector(sector); assert_rbio(rbio); raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, @@ -1477,15 +1492,14 @@ static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio) * stripe_pages[], thus we need to locate the sector. */ static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio, - struct page *page, - unsigned int pgoff) + phys_addr_t paddr) { int i; for (i = 0; i < rbio->nr_sectors; i++) { struct sector_ptr *sector = &rbio->stripe_sectors[i]; - if (sector->page == page && sector->pgoff == pgoff) + if (sector->has_paddr && sector->paddr == paddr) return sector; } return NULL; @@ -1505,11 +1519,10 @@ static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) bio_for_each_segment_all(bvec, bio, iter_all) { struct sector_ptr *sector; - int pgoff; + phys_addr_t paddr = bvec_phys(bvec); - for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len; - pgoff += sectorsize) { - sector = find_stripe_sector(rbio, bvec->bv_page, pgoff); + for (u32 off = 0; off < bvec->bv_len; off += sectorsize) { + sector = find_stripe_sector(rbio, paddr + off); ASSERT(sector); if (sector) sector->uptodate = 1; @@ -1519,17 +1532,14 @@ static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio) { - struct bio_vec *bv = bio_first_bvec_all(bio); + phys_addr_t bvec_paddr = bvec_phys(bio_first_bvec_all(bio)); int i; for (i = 0; i < rbio->nr_sectors; i++) { - struct sector_ptr *sector; - - sector = &rbio->stripe_sectors[i]; - if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) + if (rbio->stripe_sectors[i].paddr == bvec_paddr) break; - sector = &rbio->bio_sectors[i]; - if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset) + if (rbio->bio_sectors[i].has_paddr && + rbio->bio_sectors[i].paddr == bvec_paddr) break; } ASSERT(i < rbio->nr_sectors); @@ -1575,11 +1585,11 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio, return; bio_for_each_segment_all(bvec, bio, iter_all) { - int bv_offset; + void *kaddr; - for (bv_offset = bvec->bv_offset; - bv_offset < bvec->bv_offset + bvec->bv_len; - bv_offset += fs_info->sectorsize, total_sector_nr++) { + kaddr = bvec_kmap_local(bvec); + for (u32 off = 0; off < bvec->bv_len; + off += fs_info->sectorsize, total_sector_nr++) { u8 csum_buf[BTRFS_CSUM_SIZE]; u8 *expected_csum = rbio->csum_buf + total_sector_nr * fs_info->csum_size; @@ -1589,11 +1599,12 @@ static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio, if (!test_bit(total_sector_nr, rbio->csum_bitmap)) continue; - ret = btrfs_check_sector_csum(fs_info, bvec->bv_page, - bv_offset, csum_buf, expected_csum); + ret = btrfs_check_sector_csum(fs_info, kaddr + off, + csum_buf, expected_csum); if (ret < 0) set_bit(total_sector_nr, rbio->error_bitmap); } + kunmap_local(kaddr); } } @@ -1689,8 +1700,8 @@ static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule) list_sort(NULL, &plug->rbio_list, plug_cmp); while (!list_empty(&plug->rbio_list)) { - cur = list_entry(plug->rbio_list.next, - struct btrfs_raid_bio, plug_list); + cur = list_first_entry(&plug->rbio_list, + struct btrfs_raid_bio, plug_list); list_del_init(&cur->plug_list); if (rbio_is_full(cur)) { @@ -1791,6 +1802,7 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio, struct sector_ptr *sector; u8 csum_buf[BTRFS_CSUM_SIZE]; u8 *csum_expected; + void *kaddr; int ret; if (!rbio->csum_bitmap || !rbio->csum_buf) @@ -1809,13 +1821,12 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio, sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr); } - ASSERT(sector->page); - csum_expected = rbio->csum_buf + (stripe_nr * rbio->stripe_nsectors + sector_nr) * fs_info->csum_size; - ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff, - csum_buf, csum_expected); + kaddr = kmap_local_sector(sector); + ret = btrfs_check_sector_csum(fs_info, kaddr, csum_buf, csum_expected); + kunmap_local(kaddr); return ret; } @@ -1872,9 +1883,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr, } else { sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr); } - ASSERT(sector->page); - pointers[stripe_nr] = kmap_local_page(sector->page) + - sector->pgoff; + pointers[stripe_nr] = kmap_local_sector(sector); unmap_array[stripe_nr] = pointers[stripe_nr]; } @@ -2282,9 +2291,8 @@ static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio) static void raid_wait_write_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; - blk_status_t err = bio->bi_status; - if (err) + if (bio->bi_status) rbio_update_error_bitmap(rbio, bio); bio_put(bio); if (atomic_dec_and_test(&rbio->stripes_pending)) @@ -2326,7 +2334,7 @@ static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio) * thus this rbio can not be cached one, as cached one must * have all its data sectors present and uptodate. */ - if (!sector->page || !sector->uptodate) + if (!sector->has_paddr || !sector->uptodate) return true; } return false; @@ -2516,6 +2524,7 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio) int stripe; int sectornr; bool has_qstripe; + struct page *page; struct sector_ptr p_sector = { 0 }; struct sector_ptr q_sector = { 0 }; struct bio_list bio_list; @@ -2547,29 +2556,33 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio) */ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); - p_sector.page = alloc_page(GFP_NOFS); - if (!p_sector.page) + page = alloc_page(GFP_NOFS); + if (!page) return -ENOMEM; - p_sector.pgoff = 0; + p_sector.has_paddr = true; + p_sector.paddr = page_to_phys(page); p_sector.uptodate = 1; + page = NULL; if (has_qstripe) { /* RAID6, allocate and map temp space for the Q stripe */ - q_sector.page = alloc_page(GFP_NOFS); - if (!q_sector.page) { - __free_page(p_sector.page); - p_sector.page = NULL; + page = alloc_page(GFP_NOFS); + if (!page) { + __free_page(phys_to_page(p_sector.paddr)); + p_sector.has_paddr = false; return -ENOMEM; } - q_sector.pgoff = 0; + q_sector.has_paddr = true; + q_sector.paddr = page_to_phys(page); q_sector.uptodate = 1; - pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); + page = NULL; + pointers[rbio->real_stripes - 1] = kmap_local_sector(&q_sector); } bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); /* Map the parity stripe just once */ - pointers[nr_data] = kmap_local_page(p_sector.page); + pointers[nr_data] = kmap_local_sector(&p_sector); for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { struct sector_ptr *sector; @@ -2578,8 +2591,7 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio) /* first collect one page from each data stripe */ for (stripe = 0; stripe < nr_data; stripe++) { sector = sector_in_rbio(rbio, stripe, sectornr, 0); - pointers[stripe] = kmap_local_page(sector->page) + - sector->pgoff; + pointers[stripe] = kmap_local_sector(sector); } if (has_qstripe) { @@ -2595,7 +2607,7 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio) /* Check scrubbing parity and repair it */ sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); - parity = kmap_local_page(sector->page) + sector->pgoff; + parity = kmap_local_sector(sector); if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) memcpy(parity, pointers[rbio->scrubp], sectorsize); else @@ -2608,12 +2620,11 @@ static int finish_parity_scrub(struct btrfs_raid_bio *rbio) } kunmap_local(pointers[nr_data]); - __free_page(p_sector.page); - p_sector.page = NULL; - if (q_sector.page) { - kunmap_local(pointers[rbio->real_stripes - 1]); - __free_page(q_sector.page); - q_sector.page = NULL; + __free_page(phys_to_page(p_sector.paddr)); + p_sector.has_paddr = false; + if (q_sector.has_paddr) { + __free_page(phys_to_page(q_sector.paddr)); + q_sector.has_paddr = false; } /* diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 15c296cb4dac..62161beca559 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -87,7 +87,7 @@ static int copy_inline_to_page(struct btrfs_inode *inode, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, btrfs_alloc_write_mask(mapping)); if (IS_ERR(folio)) { - ret = -ENOMEM; + ret = PTR_ERR(folio); goto out_unlock; } @@ -95,9 +95,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode, if (ret < 0) goto out_unlock; - clear_extent_bit(&inode->io_tree, file_offset, range_end, - EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, - NULL); + btrfs_clear_extent_bits(&inode->io_tree, file_offset, range_end, + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG); ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); if (ret) goto out_unlock; @@ -646,10 +645,10 @@ static int btrfs_extent_same_range(struct btrfs_inode *src, u64 loff, u64 len, * because we have already locked the inode's i_mmap_lock in exclusive * mode. */ - lock_extent(&dst->io_tree, dst_loff, end, &cached_state); + btrfs_lock_extent(&dst->io_tree, dst_loff, end, &cached_state); ret = btrfs_clone(&src->vfs_inode, &dst->vfs_inode, loff, len, ALIGN(len, bs), dst_loff, 1); - unlock_extent(&dst->io_tree, dst_loff, end, &cached_state); + btrfs_unlock_extent(&dst->io_tree, dst_loff, end, &cached_state); btrfs_btree_balance_dirty(fs_info); @@ -749,9 +748,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, * mode. */ end = destoff + len - 1; - lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state); + btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state); ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); - unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state); /* * We may have copied an inline extent into a page of the destination diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e17bcb034595..02086191630d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -178,8 +178,9 @@ static void mark_block_processed(struct reloc_control *rc, in_range(node->bytenr, rc->block_group->start, rc->block_group->length)) { blocksize = rc->extent_root->fs_info->nodesize; - set_extent_bit(&rc->processed_blocks, node->bytenr, - node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL); + btrfs_set_extent_bit(&rc->processed_blocks, node->bytenr, + node->bytenr + blocksize - 1, EXTENT_DIRTY, + NULL); } node->processed = 1; } @@ -195,8 +196,8 @@ static struct btrfs_backref_node *walk_up_backref( int idx = *index; while (!list_empty(&node->upper)) { - edge = list_entry(node->upper.next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&node->upper, struct btrfs_backref_edge, + list[LOWER]); edges[idx++] = edge; node = edge->node[UPPER]; } @@ -222,8 +223,8 @@ static struct btrfs_backref_node *walk_down_backref( idx--; continue; } - edge = list_entry(edge->list[LOWER].next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&edge->list[LOWER], struct btrfs_backref_edge, + list[LOWER]); edges[idx - 1] = edge; *index = idx; return edge->node[UPPER]; @@ -347,8 +348,8 @@ static bool handle_useless_nodes(struct reloc_control *rc, struct btrfs_backref_edge *edge; struct btrfs_backref_node *lower; - edge = list_entry(cur->lower.next, - struct btrfs_backref_edge, list[UPPER]); + edge = list_first_entry(&cur->lower, struct btrfs_backref_edge, + list[UPPER]); list_del(&edge->list[UPPER]); list_del(&edge->list[LOWER]); lower = edge->node[LOWER]; @@ -910,16 +911,16 @@ int replace_file_extents(struct btrfs_trans_handle *trans, /* Take mmap lock to serialize with reflinks. */ if (!down_read_trylock(&inode->i_mmap_lock)) continue; - ret = try_lock_extent(&inode->io_tree, key.offset, - end, &cached_state); + ret = btrfs_try_lock_extent(&inode->io_tree, key.offset, + end, &cached_state); if (!ret) { up_read(&inode->i_mmap_lock); continue; } btrfs_drop_extent_map_range(inode, key.offset, end, true); - unlock_extent(&inode->io_tree, key.offset, end, - &cached_state); + btrfs_unlock_extent(&inode->io_tree, key.offset, end, + &cached_state); up_read(&inode->i_mmap_lock); } } @@ -1378,9 +1379,9 @@ static int invalidate_extent_cache(struct btrfs_root *root, } /* the lock_extent waits for read_folio to complete */ - lock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); btrfs_drop_extent_map_range(inode, start, end, true); - unlock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); } return 0; } @@ -1697,8 +1698,8 @@ again: rc->merge_reloc_tree = true; while (!list_empty(&rc->reloc_roots)) { - reloc_root = list_entry(rc->reloc_roots.next, - struct btrfs_root, root_list); + reloc_root = list_first_entry(&rc->reloc_roots, + struct btrfs_root, root_list); list_del_init(&reloc_root->root_list); root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, @@ -1813,8 +1814,7 @@ again: while (!list_empty(&reloc_roots)) { found = 1; - reloc_root = list_entry(reloc_roots.next, - struct btrfs_root, root_list); + reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list); root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); @@ -1930,11 +1930,11 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, * reloc root without a corresponding root this could return ENOENT. */ if (IS_ERR(root)) { - ASSERT(0); + DEBUG_WARN("error %ld reading root for reloc root", PTR_ERR(root)); return PTR_ERR(root); } if (root->reloc_root != reloc_root) { - ASSERT(0); + DEBUG_WARN("unexpected reloc root found"); btrfs_err(fs_info, "root %llu has two reloc roots associated with it", reloc_root->root_key.offset); @@ -2109,8 +2109,8 @@ static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc, if (list_empty(&next->upper)) break; - edge = list_entry(next->upper.next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&next->upper, struct btrfs_backref_edge, + list[LOWER]); edges[index++] = edge; next = edge->node[UPPER]; } @@ -2356,8 +2356,8 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans, for (level = 0; level < BTRFS_MAX_LEVEL; level++) { while (!list_empty(&cache->pending[level])) { - node = list_entry(cache->pending[level].next, - struct btrfs_backref_node, list); + node = list_first_entry(&cache->pending[level], + struct btrfs_backref_node, list); list_move_tail(&node->list, &list); BUG_ON(!node->pending); @@ -2395,8 +2395,8 @@ static void update_processed_blocks(struct reloc_control *rc, if (list_empty(&next->upper)) break; - edge = list_entry(next->upper.next, - struct btrfs_backref_edge, list[LOWER]); + edge = list_first_entry(&next->upper, struct btrfs_backref_edge, + list[LOWER]); edges[index++] = edge; next = edge->node[UPPER]; } @@ -2408,8 +2408,8 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc) { u32 blocksize = rc->extent_root->fs_info->nodesize; - if (test_range_bit(&rc->processed_blocks, bytenr, - bytenr + blocksize - 1, EXTENT_DIRTY, NULL)) + if (btrfs_test_range_bit(&rc->processed_blocks, bytenr, + bytenr + blocksize - 1, EXTENT_DIRTY, NULL)) return 1; return 0; } @@ -2706,9 +2706,6 @@ static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control if (ret < 0) return ret; - clear_extent_bits(&inode->io_tree, i_size, - round_up(i_size, PAGE_SIZE) - 1, - EXTENT_UPTODATE); folio = filemap_lock_folio(mapping, i_size >> PAGE_SHIFT); /* * If page is freed we don't need to do anything then, as we @@ -2738,21 +2735,21 @@ static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control else end = cluster->end - offset; - lock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); num_bytes = end + 1 - start; ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start, num_bytes, num_bytes, end + 1, &alloc_hint); cur_offset = end + 1; - unlock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); if (ret) break; } btrfs_inode_unlock(inode, 0); if (cur_offset < prealloc_end) - btrfs_free_reserved_data_space_noquota(inode->root->fs_info, - prealloc_end + 1 - cur_offset); + btrfs_free_reserved_data_space_noquota(inode, + prealloc_end + 1 - cur_offset); return ret; } @@ -2766,7 +2763,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_contr u64 end = rc->cluster.end - offset; int ret = 0; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) return -ENOMEM; @@ -2777,10 +2774,10 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_contr em->ram_bytes = em->len; em->flags |= EXTENT_FLAG_PINNED; - lock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_lock_extent(&inode->io_tree, start, end, &cached_state); ret = btrfs_replace_extent_map_range(inode, em, false); - unlock_extent(&inode->io_tree, start, end, &cached_state); - free_extent_map(em); + btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); + btrfs_free_extent_map(em); return ret; } @@ -2902,15 +2899,15 @@ again: goto release_folio; /* Mark the range delalloc and dirty for later writeback */ - lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, - &cached_state); + btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, + clamped_end, &cached_state); ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start, clamped_end, 0, &cached_state); if (ret) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, - clamped_start, clamped_end, - EXTENT_LOCKED | EXTENT_BOUNDARY, - &cached_state); + btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, + clamped_start, clamped_end, + EXTENT_LOCKED | EXTENT_BOUNDARY, + &cached_state); btrfs_delalloc_release_metadata(BTRFS_I(inode), clamped_len, true); btrfs_delalloc_release_extents(BTRFS_I(inode), @@ -2932,12 +2929,12 @@ again: u64 boundary_end = boundary_start + fs_info->sectorsize - 1; - set_extent_bit(&BTRFS_I(inode)->io_tree, - boundary_start, boundary_end, - EXTENT_BOUNDARY, NULL); + btrfs_set_extent_bit(&BTRFS_I(inode)->io_tree, + boundary_start, boundary_end, + EXTENT_BOUNDARY, NULL); } - unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, - &cached_state); + btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, + &cached_state); btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len); cur += clamped_len; @@ -3435,9 +3432,9 @@ next: goto next; } - block_found = find_first_extent_bit(&rc->processed_blocks, - key.objectid, &start, &end, - EXTENT_DIRTY, NULL); + block_found = btrfs_find_first_extent_bit(&rc->processed_blocks, + key.objectid, &start, &end, + EXTENT_DIRTY, NULL); if (block_found && start <= key.objectid) { btrfs_release_path(path); @@ -3646,7 +3643,7 @@ restart: } btrfs_release_path(path); - clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); + btrfs_clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY); if (trans) { btrfs_end_transaction_throttle(trans); @@ -3862,7 +3859,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) btrfs_backref_init_cache(fs_info, &rc->backref_cache, true); rc->reloc_root_tree.rb_root = RB_ROOT; spin_lock_init(&rc->reloc_root_tree.lock); - extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); + btrfs_extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS); return rc; } @@ -4185,8 +4182,7 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info) rc->merge_reloc_tree = true; while (!list_empty(&reloc_roots)) { - reloc_root = list_entry(reloc_roots.next, - struct btrfs_root, root_list); + reloc_root = list_first_entry(&reloc_roots, struct btrfs_root, root_list); list_del(&reloc_root->root_list); if (btrfs_root_refs(&reloc_root->root_item) == 0) { @@ -4279,7 +4275,7 @@ int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered) while (!list_empty(&list)) { struct btrfs_ordered_sum *sums = - list_entry(list.next, struct btrfs_ordered_sum, list); + list_first_entry(&list, struct btrfs_ordered_sum, list); list_del_init(&sums->list); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c3b2e29e3e01..ce36fafc771e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -66,8 +66,6 @@ struct scrub_ctx; /* Represent one sector and its needed info to verify the content. */ struct scrub_sector_verification { - bool is_metadata; - union { /* * Csum pointer for data csum verification. Should point to a @@ -100,6 +98,38 @@ enum scrub_stripe_flags { SCRUB_STRIPE_FLAG_NO_REPORT, }; +/* + * We have multiple bitmaps for one scrub_stripe. + * However each bitmap has at most (BTRFS_STRIPE_LEN / blocksize) bits, + * which is normally 16, and much smaller than BITS_PER_LONG (32 or 64). + * + * So to reduce memory usage for each scrub_stripe, we pack those bitmaps + * into a larger one. + * + * These enum records where the sub-bitmap are inside the larger one. + * Each subbitmap starts at scrub_bitmap_nr_##name * nr_sectors bit. + */ +enum { + /* Which blocks are covered by extent items. */ + scrub_bitmap_nr_has_extent = 0, + + /* Which blocks are meteadata. */ + scrub_bitmap_nr_is_metadata, + + /* + * Which blocks have errors, including IO, csum, and metadata + * errors. + * This sub-bitmap is the OR results of the next few error related + * sub-bitmaps. + */ + scrub_bitmap_nr_error, + scrub_bitmap_nr_io_error, + scrub_bitmap_nr_csum_error, + scrub_bitmap_nr_meta_error, + scrub_bitmap_nr_meta_gen_error, + scrub_bitmap_nr_last, +}; + #define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE) /* @@ -138,36 +168,15 @@ struct scrub_stripe { */ unsigned long state; - /* Indicate which sectors are covered by extent items. */ - unsigned long extent_sector_bitmap; - - /* - * The errors hit during the initial read of the stripe. - * - * Would be utilized for error reporting and repair. - * - * The remaining init_nr_* records the number of errors hit, only used - * by error reporting. - */ - unsigned long init_error_bitmap; - unsigned int init_nr_io_errors; - unsigned int init_nr_csum_errors; - unsigned int init_nr_meta_errors; + /* The large bitmap contains all the sub-bitmaps. */ + unsigned long bitmaps[BITS_TO_LONGS(scrub_bitmap_nr_last * + (BTRFS_STRIPE_LEN / BTRFS_MIN_BLOCKSIZE))]; /* - * The following error bitmaps are all for the current status. - * Every time we submit a new read, these bitmaps may be updated. - * - * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap; - * - * IO and csum errors can happen for both metadata and data. + * For writeback (repair or replace) error reporting. + * This one is protected by a spinlock, thus can not be packed into + * the larger bitmap. */ - unsigned long error_bitmap; - unsigned long io_error_bitmap; - unsigned long csum_error_bitmap; - unsigned long meta_error_bitmap; - - /* For writeback (repair or replace) error reporting. */ unsigned long write_error_bitmap; /* Writeback can be concurrent, thus we need to protect the bitmap. */ @@ -219,6 +228,90 @@ struct scrub_ctx { refcount_t refs; }; +#define scrub_calc_start_bit(stripe, name, block_nr) \ +({ \ + unsigned int __start_bit; \ + \ + ASSERT(block_nr < stripe->nr_sectors, \ + "nr_sectors=%u block_nr=%u", stripe->nr_sectors, block_nr); \ + __start_bit = scrub_bitmap_nr_##name * stripe->nr_sectors + block_nr; \ + __start_bit; \ +}) + +#define IMPLEMENT_SCRUB_BITMAP_OPS(name) \ +static inline void scrub_bitmap_set_##name(struct scrub_stripe *stripe, \ + unsigned int block_nr, \ + unsigned int nr_blocks) \ +{ \ + const unsigned int start_bit = scrub_calc_start_bit(stripe, \ + name, block_nr); \ + \ + bitmap_set(stripe->bitmaps, start_bit, nr_blocks); \ +} \ +static inline void scrub_bitmap_clear_##name(struct scrub_stripe *stripe, \ + unsigned int block_nr, \ + unsigned int nr_blocks) \ +{ \ + const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \ + block_nr); \ + \ + bitmap_clear(stripe->bitmaps, start_bit, nr_blocks); \ +} \ +static inline bool scrub_bitmap_test_bit_##name(struct scrub_stripe *stripe, \ + unsigned int block_nr) \ +{ \ + const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \ + block_nr); \ + \ + return test_bit(start_bit, stripe->bitmaps); \ +} \ +static inline void scrub_bitmap_set_bit_##name(struct scrub_stripe *stripe, \ + unsigned int block_nr) \ +{ \ + const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \ + block_nr); \ + \ + set_bit(start_bit, stripe->bitmaps); \ +} \ +static inline void scrub_bitmap_clear_bit_##name(struct scrub_stripe *stripe, \ + unsigned int block_nr) \ +{ \ + const unsigned int start_bit = scrub_calc_start_bit(stripe, name, \ + block_nr); \ + \ + clear_bit(start_bit, stripe->bitmaps); \ +} \ +static inline unsigned long scrub_bitmap_read_##name(struct scrub_stripe *stripe) \ +{ \ + const unsigned int nr_blocks = stripe->nr_sectors; \ + \ + ASSERT(nr_blocks > 0 && nr_blocks <= BITS_PER_LONG, \ + "nr_blocks=%u BITS_PER_LONG=%u", \ + nr_blocks, BITS_PER_LONG); \ + \ + return bitmap_read(stripe->bitmaps, nr_blocks * scrub_bitmap_nr_##name, \ + stripe->nr_sectors); \ +} \ +static inline bool scrub_bitmap_empty_##name(struct scrub_stripe *stripe) \ +{ \ + unsigned long bitmap = scrub_bitmap_read_##name(stripe); \ + \ + return bitmap_empty(&bitmap, stripe->nr_sectors); \ +} \ +static inline unsigned int scrub_bitmap_weight_##name(struct scrub_stripe *stripe) \ +{ \ + unsigned long bitmap = scrub_bitmap_read_##name(stripe); \ + \ + return bitmap_weight(&bitmap, stripe->nr_sectors); \ +} +IMPLEMENT_SCRUB_BITMAP_OPS(has_extent); +IMPLEMENT_SCRUB_BITMAP_OPS(is_metadata); +IMPLEMENT_SCRUB_BITMAP_OPS(error); +IMPLEMENT_SCRUB_BITMAP_OPS(io_error); +IMPLEMENT_SCRUB_BITMAP_OPS(csum_error); +IMPLEMENT_SCRUB_BITMAP_OPS(meta_error); +IMPLEMENT_SCRUB_BITMAP_OPS(meta_gen_error); + struct scrub_warning { struct btrfs_path *path; u64 extent_item_size; @@ -228,6 +321,19 @@ struct scrub_warning { struct btrfs_device *dev; }; +struct scrub_error_records { + /* + * Bitmap recording which blocks hit errors (IO/csum/...) during the + * initial read. + */ + unsigned long init_error_bitmap; + + unsigned int nr_io_errors; + unsigned int nr_csum_errors; + unsigned int nr_meta_errors; + unsigned int nr_meta_gen_errors; +}; + static void release_scrub_stripe(struct scrub_stripe *stripe) { if (!stripe) @@ -579,20 +685,15 @@ static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) return ret; } -static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr) +static void *scrub_stripe_get_kaddr(struct scrub_stripe *stripe, int sector_nr) { - struct btrfs_fs_info *fs_info = stripe->bg->fs_info; - int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; + u32 offset = (sector_nr << stripe->bg->fs_info->sectorsize_bits); + const struct page *page = stripe->pages[offset >> PAGE_SHIFT]; - return stripe->pages[page_index]; -} - -static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe, - int sector_nr) -{ - struct btrfs_fs_info *fs_info = stripe->bg->fs_info; - - return offset_in_page(sector_nr << fs_info->sectorsize_bits); + /* stripe->pages[] is allocated by us and no highmem is allowed. */ + ASSERT(page); + ASSERT(!PageHighMem(page)); + return page_address(page) + offset_in_page(offset); } static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr) @@ -600,24 +701,22 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr struct btrfs_fs_info *fs_info = stripe->bg->fs_info; const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); - const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr); - const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr); + void *first_kaddr = scrub_stripe_get_kaddr(stripe, sector_nr); + struct btrfs_header *header = first_kaddr; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); u8 on_disk_csum[BTRFS_CSUM_SIZE]; u8 calculated_csum[BTRFS_CSUM_SIZE]; - struct btrfs_header *header; /* * Here we don't have a good way to attach the pages (and subpages) * to a dummy extent buffer, thus we have to directly grab the members * from pages. */ - header = (struct btrfs_header *)(page_address(first_page) + first_off); memcpy(on_disk_csum, header->csum, fs_info->csum_size); if (logical != btrfs_stack_header_bytenr(header)) { - bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); - bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); + scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad bytenr, has %llu want %llu", logical, stripe->mirror_num, @@ -626,8 +725,8 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr } if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) { - bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); - bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); + scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad fsid, has %pU want %pU", logical, stripe->mirror_num, @@ -636,8 +735,8 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr } if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE) != 0) { - bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); - bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); + scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU", logical, stripe->mirror_num, @@ -648,21 +747,18 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr /* Now check tree block csum. */ shash->tfm = fs_info->csum_shash; crypto_shash_init(shash); - crypto_shash_update(shash, page_address(first_page) + first_off + - BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE); + crypto_shash_update(shash, first_kaddr + BTRFS_CSUM_SIZE, + fs_info->sectorsize - BTRFS_CSUM_SIZE); for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) { - struct page *page = scrub_stripe_get_page(stripe, i); - unsigned int page_off = scrub_stripe_get_page_offset(stripe, i); - - crypto_shash_update(shash, page_address(page) + page_off, + crypto_shash_update(shash, scrub_stripe_get_kaddr(stripe, i), fs_info->sectorsize); } crypto_shash_final(shash, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) { - bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); - bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); + scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT, logical, stripe->mirror_num, @@ -672,8 +768,8 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr } if (stripe->sectors[sector_nr].generation != btrfs_stack_header_generation(header)) { - bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); - bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); + scrub_bitmap_set_meta_gen_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad generation, has %llu want %llu", logical, stripe->mirror_num, @@ -681,9 +777,10 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr stripe->sectors[sector_nr].generation); return; } - bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree); - bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); - bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); + scrub_bitmap_clear_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_clear_csum_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_clear_meta_error(stripe, sector_nr, sectors_per_tree); + scrub_bitmap_clear_meta_gen_error(stripe, sector_nr, sectors_per_tree); } static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) @@ -691,23 +788,22 @@ static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; - struct page *page = scrub_stripe_get_page(stripe, sector_nr); - unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); + void *kaddr = scrub_stripe_get_kaddr(stripe, sector_nr); u8 csum_buf[BTRFS_CSUM_SIZE]; int ret; ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); /* Sector not utilized, skip it. */ - if (!test_bit(sector_nr, &stripe->extent_sector_bitmap)) + if (!scrub_bitmap_test_bit_has_extent(stripe, sector_nr)) return; /* IO error, no need to check. */ - if (test_bit(sector_nr, &stripe->io_error_bitmap)) + if (scrub_bitmap_test_bit_io_error(stripe, sector_nr)) return; /* Metadata, verify the full tree block. */ - if (sector->is_metadata) { + if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) { /* * Check if the tree block crosses the stripe boundary. If * crossed the boundary, we cannot verify it but only give a @@ -733,17 +829,17 @@ static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) * cases without csum, we have no other choice but to trust it. */ if (!sector->csum) { - clear_bit(sector_nr, &stripe->error_bitmap); + scrub_bitmap_clear_bit_error(stripe, sector_nr); return; } - ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum); + ret = btrfs_check_sector_csum(fs_info, kaddr, csum_buf, sector->csum); if (ret < 0) { - set_bit(sector_nr, &stripe->csum_error_bitmap); - set_bit(sector_nr, &stripe->error_bitmap); + scrub_bitmap_set_bit_csum_error(stripe, sector_nr); + scrub_bitmap_set_bit_error(stripe, sector_nr); } else { - clear_bit(sector_nr, &stripe->csum_error_bitmap); - clear_bit(sector_nr, &stripe->error_bitmap); + scrub_bitmap_clear_bit_csum_error(stripe, sector_nr); + scrub_bitmap_clear_bit_error(stripe, sector_nr); } } @@ -756,7 +852,7 @@ static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long b for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { scrub_verify_one_sector(stripe, sector_nr); - if (stripe->sectors[sector_nr].is_metadata) + if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) sector_nr += sectors_per_tree - 1; } } @@ -766,8 +862,7 @@ static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first int i; for (i = 0; i < stripe->nr_sectors; i++) { - if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page && - scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset) + if (scrub_stripe_get_kaddr(stripe, i) == bvec_virt(first_bvec)) break; } ASSERT(i < stripe->nr_sectors); @@ -795,13 +890,13 @@ static void scrub_repair_read_endio(struct btrfs_bio *bbio) bio_size += bvec->bv_len; if (bbio->bio.bi_status) { - bitmap_set(&stripe->io_error_bitmap, sector_nr, - bio_size >> fs_info->sectorsize_bits); - bitmap_set(&stripe->error_bitmap, sector_nr, - bio_size >> fs_info->sectorsize_bits); + scrub_bitmap_set_io_error(stripe, sector_nr, + bio_size >> fs_info->sectorsize_bits); + scrub_bitmap_set_error(stripe, sector_nr, + bio_size >> fs_info->sectorsize_bits); } else { - bitmap_clear(&stripe->io_error_bitmap, sector_nr, - bio_size >> fs_info->sectorsize_bits); + scrub_bitmap_clear_io_error(stripe, sector_nr, + bio_size >> fs_info->sectorsize_bits); } bio_put(&bbio->bio); if (atomic_dec_and_test(&stripe->pending_io)) @@ -814,27 +909,39 @@ static int calc_next_mirror(int mirror, int num_copies) return (mirror + 1 > num_copies) ? 1 : mirror + 1; } +static void scrub_bio_add_sector(struct btrfs_bio *bbio, struct scrub_stripe *stripe, + int sector_nr) +{ + void *kaddr = scrub_stripe_get_kaddr(stripe, sector_nr); + int ret; + + ret = bio_add_page(&bbio->bio, virt_to_page(kaddr), bbio->fs_info->sectorsize, + offset_in_page(kaddr)); + /* + * Caller should ensure the bbio has enough size. + * And we cannot use __bio_add_page(), which doesn't do any merge. + * + * Meanwhile for scrub_submit_initial_read() we fully rely on the merge + * to create the minimal amount of bio vectors, for fs block size < page + * size cases. + */ + ASSERT(ret == bbio->fs_info->sectorsize); +} + static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe, int mirror, int blocksize, bool wait) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct btrfs_bio *bbio = NULL; - const unsigned long old_error_bitmap = stripe->error_bitmap; + const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe); int i; ASSERT(stripe->mirror_num >= 1); ASSERT(atomic_read(&stripe->pending_io) == 0); for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { - struct page *page; - int pgoff; - int ret; - - page = scrub_stripe_get_page(stripe, i); - pgoff = scrub_stripe_get_page_offset(stripe, i); - /* The current sector cannot be merged, submit the bio. */ - if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) || + if (bbio && ((i > 0 && !test_bit(i - 1, &old_error_bitmap)) || bbio->bio.bi_iter.bi_size >= blocksize)) { ASSERT(bbio->bio.bi_iter.bi_size); atomic_inc(&stripe->pending_io); @@ -851,8 +958,7 @@ static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe, (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; } - ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); - ASSERT(ret == fs_info->sectorsize); + scrub_bio_add_sector(bbio, stripe, i); } if (bbio) { ASSERT(bbio->bio.bi_iter.bi_size); @@ -864,12 +970,15 @@ static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe, } static void scrub_stripe_report_errors(struct scrub_ctx *sctx, - struct scrub_stripe *stripe) + struct scrub_stripe *stripe, + const struct scrub_error_records *errors) { static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_device *dev = NULL; + const unsigned long extent_bitmap = scrub_bitmap_read_has_extent(stripe); + const unsigned long error_bitmap = scrub_bitmap_read_error(stripe); u64 physical = 0; int nr_data_sectors = 0; int nr_meta_sectors = 0; @@ -886,7 +995,7 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx, * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio() * thus no need for dev/physical, error reporting still needs dev and physical. */ - if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) { + if (!bitmap_empty(&errors->init_error_bitmap, stripe->nr_sectors)) { u64 mapped_len = fs_info->sectorsize; struct btrfs_io_context *bioc = NULL; int stripe_index = stripe->mirror_num - 1; @@ -909,10 +1018,10 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx, } skip: - for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) { + for_each_set_bit(sector_nr, &extent_bitmap, stripe->nr_sectors) { bool repaired = false; - if (stripe->sectors[sector_nr].is_metadata) { + if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) { nr_meta_sectors++; } else { nr_data_sectors++; @@ -920,14 +1029,14 @@ skip: nr_nodatacsum_sectors++; } - if (test_bit(sector_nr, &stripe->init_error_bitmap) && - !test_bit(sector_nr, &stripe->error_bitmap)) { + if (test_bit(sector_nr, &errors->init_error_bitmap) && + !test_bit(sector_nr, &error_bitmap)) { nr_repaired_sectors++; repaired = true; } /* Good sector from the beginning, nothing need to be done. */ - if (!test_bit(sector_nr, &stripe->init_error_bitmap)) + if (!test_bit(sector_nr, &errors->init_error_bitmap)) continue; /* @@ -960,31 +1069,46 @@ skip: stripe->logical, stripe->mirror_num); } - if (test_bit(sector_nr, &stripe->io_error_bitmap)) + if (scrub_bitmap_test_bit_io_error(stripe, sector_nr)) if (__ratelimit(&rs) && dev) scrub_print_common_warning("i/o error", dev, false, stripe->logical, physical); - if (test_bit(sector_nr, &stripe->csum_error_bitmap)) + if (scrub_bitmap_test_bit_csum_error(stripe, sector_nr)) if (__ratelimit(&rs) && dev) scrub_print_common_warning("checksum error", dev, false, stripe->logical, physical); - if (test_bit(sector_nr, &stripe->meta_error_bitmap)) + if (scrub_bitmap_test_bit_meta_error(stripe, sector_nr)) if (__ratelimit(&rs) && dev) scrub_print_common_warning("header error", dev, false, stripe->logical, physical); + if (scrub_bitmap_test_bit_meta_gen_error(stripe, sector_nr)) + if (__ratelimit(&rs) && dev) + scrub_print_common_warning("generation error", dev, false, + stripe->logical, physical); } + /* Update the device stats. */ + for (int i = 0; i < errors->nr_io_errors; i++) + btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_READ_ERRS); + for (int i = 0; i < errors->nr_csum_errors; i++) + btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); + /* Generation mismatch error is based on each metadata, not each block. */ + for (int i = 0; i < errors->nr_meta_gen_errors; + i += (fs_info->nodesize >> fs_info->sectorsize_bits)) + btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_GENERATION_ERRS); + spin_lock(&sctx->stat_lock); sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; sctx->stat.no_csum += nr_nodatacsum_sectors; - sctx->stat.read_errors += stripe->init_nr_io_errors; - sctx->stat.csum_errors += stripe->init_nr_csum_errors; - sctx->stat.verify_errors += stripe->init_nr_meta_errors; + sctx->stat.read_errors += errors->nr_io_errors; + sctx->stat.csum_errors += errors->nr_csum_errors; + sctx->stat.verify_errors += errors->nr_meta_errors + + errors->nr_meta_gen_errors; sctx->stat.uncorrectable_errors += - bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors); + bitmap_weight(&error_bitmap, stripe->nr_sectors); sctx->stat.corrected_errors += nr_repaired_sectors; spin_unlock(&sctx->stat_lock); } @@ -1010,26 +1134,26 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work) struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work); struct scrub_ctx *sctx = stripe->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; + struct scrub_error_records errors = { 0 }; int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, stripe->bg->length); unsigned long repaired; + unsigned long error; int mirror; int i; ASSERT(stripe->mirror_num > 0); wait_scrub_stripe_io(stripe); - scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap); + scrub_verify_one_stripe(stripe, scrub_bitmap_read_has_extent(stripe)); /* Save the initial failed bitmap for later repair and report usage. */ - stripe->init_error_bitmap = stripe->error_bitmap; - stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap, - stripe->nr_sectors); - stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap, - stripe->nr_sectors); - stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap, - stripe->nr_sectors); - - if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) + errors.init_error_bitmap = scrub_bitmap_read_error(stripe); + errors.nr_io_errors = scrub_bitmap_weight_io_error(stripe); + errors.nr_csum_errors = scrub_bitmap_weight_csum_error(stripe); + errors.nr_meta_errors = scrub_bitmap_weight_meta_error(stripe); + errors.nr_meta_gen_errors = scrub_bitmap_weight_meta_gen_error(stripe); + + if (bitmap_empty(&errors.init_error_bitmap, stripe->nr_sectors)) goto out; /* @@ -1041,13 +1165,13 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work) for (mirror = calc_next_mirror(stripe->mirror_num, num_copies); mirror != stripe->mirror_num; mirror = calc_next_mirror(mirror, num_copies)) { - const unsigned long old_error_bitmap = stripe->error_bitmap; + const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe); scrub_stripe_submit_repair_read(stripe, mirror, BTRFS_STRIPE_LEN, false); wait_scrub_stripe_io(stripe); scrub_verify_one_stripe(stripe, old_error_bitmap); - if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) + if (scrub_bitmap_empty_error(stripe)) goto out; } @@ -1065,21 +1189,22 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work) for (i = 0, mirror = stripe->mirror_num; i < num_copies; i++, mirror = calc_next_mirror(mirror, num_copies)) { - const unsigned long old_error_bitmap = stripe->error_bitmap; + const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe); scrub_stripe_submit_repair_read(stripe, mirror, fs_info->sectorsize, true); wait_scrub_stripe_io(stripe); scrub_verify_one_stripe(stripe, old_error_bitmap); - if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) + if (scrub_bitmap_empty_error(stripe)) goto out; } out: + error = scrub_bitmap_read_error(stripe); /* * Submit the repaired sectors. For zoned case, we cannot do repair * in-place, but queue the bg to be relocated. */ - bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap, + bitmap_andnot(&repaired, &errors.init_error_bitmap, &error, stripe->nr_sectors); if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) { if (btrfs_is_zoned(fs_info)) { @@ -1090,7 +1215,7 @@ out: } } - scrub_stripe_report_errors(sctx, stripe); + scrub_stripe_report_errors(sctx, stripe, &errors); set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state); wake_up(&stripe->repair_wait); } @@ -1110,10 +1235,10 @@ static void scrub_read_endio(struct btrfs_bio *bbio) num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits; if (bbio->bio.bi_status) { - bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors); - bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors); + scrub_bitmap_set_io_error(stripe, sector_nr, num_sectors); + scrub_bitmap_set_error(stripe, sector_nr, num_sectors); } else { - bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors); + scrub_bitmap_clear_io_error(stripe, sector_nr, num_sectors); } bio_put(&bbio->bio); if (atomic_dec_and_test(&stripe->pending_io)) { @@ -1142,6 +1267,9 @@ static void scrub_write_endio(struct btrfs_bio *bbio) bitmap_set(&stripe->write_error_bitmap, sector_nr, bio_size >> fs_info->sectorsize_bits); spin_unlock_irqrestore(&stripe->write_error_lock, flags); + for (int i = 0; i < (bio_size >> fs_info->sectorsize_bits); i++) + btrfs_dev_stat_inc_and_print(stripe->dev, + BTRFS_DEV_STAT_WRITE_ERRS); } bio_put(&bbio->bio); @@ -1199,12 +1327,8 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str int sector_nr; for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { - struct page *page = scrub_stripe_get_page(stripe, sector_nr); - unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); - int ret; - /* We should only writeback sectors covered by an extent. */ - ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap)); + ASSERT(scrub_bitmap_test_bit_has_extent(stripe, sector_nr)); /* Cannot merge with previous sector, submit the current one. */ if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { @@ -1218,8 +1342,7 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str (sector_nr << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; } - ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); - ASSERT(ret == fs_info->sectorsize); + scrub_bio_add_sector(bbio, stripe, sector_nr); } if (bbio) scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); @@ -1493,9 +1616,9 @@ static void fill_one_extent_info(struct btrfs_fs_info *fs_info, struct scrub_sector_verification *sector = &stripe->sectors[nr_sector]; - set_bit(nr_sector, &stripe->extent_sector_bitmap); + scrub_bitmap_set_bit_has_extent(stripe, nr_sector); if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { - sector->is_metadata = true; + scrub_bitmap_set_bit_is_metadata(stripe, nr_sector); sector->generation = extent_gen; } } @@ -1503,15 +1626,8 @@ static void fill_one_extent_info(struct btrfs_fs_info *fs_info, static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe) { - stripe->extent_sector_bitmap = 0; - stripe->init_error_bitmap = 0; - stripe->init_nr_io_errors = 0; - stripe->init_nr_csum_errors = 0; - stripe->init_nr_meta_errors = 0; - stripe->error_bitmap = 0; - stripe->io_error_bitmap = 0; - stripe->csum_error_bitmap = 0; - stripe->meta_error_bitmap = 0; + ASSERT(stripe->nr_sectors); + bitmap_zero(stripe->bitmaps, scrub_bitmap_nr_last * stripe->nr_sectors); } /* @@ -1646,7 +1762,6 @@ static void scrub_reset_stripe(struct scrub_stripe *stripe) stripe->state = 0; for (int i = 0; i < stripe->nr_sectors; i++) { - stripe->sectors[i].is_metadata = false; stripe->sectors[i].csum = NULL; stripe->sectors[i].generation = 0; } @@ -1665,24 +1780,21 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe) struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct btrfs_bio *bbio = NULL; unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits; + const unsigned long has_extent = scrub_bitmap_read_has_extent(stripe); u64 stripe_len = BTRFS_STRIPE_LEN; int mirror = stripe->mirror_num; int i; atomic_inc(&stripe->pending_io); - for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) { - struct page *page = scrub_stripe_get_page(stripe, i); - unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i); - + for_each_set_bit(i, &has_extent, stripe->nr_sectors) { /* We're beyond the chunk boundary, no need to read anymore. */ if (i >= nr_sectors) break; /* The current sector cannot be merged, submit the bio. */ if (bbio && - ((i > 0 && - !test_bit(i - 1, &stripe->extent_sector_bitmap)) || + ((i > 0 && !test_bit(i - 1, &has_extent)) || bbio->bio.bi_iter.bi_size >= stripe_len)) { ASSERT(bbio->bio.bi_iter.bi_size); atomic_inc(&stripe->pending_io); @@ -1716,8 +1828,8 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe) * the extent tree, then it's a preallocated * extent and not an error. */ - set_bit(i, &stripe->io_error_bitmap); - set_bit(i, &stripe->error_bitmap); + scrub_bitmap_set_bit_io_error(stripe, i); + scrub_bitmap_set_bit_error(stripe, i); } continue; } @@ -1727,7 +1839,7 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe) bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT; } - __bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); + scrub_bio_add_sector(bbio, stripe, i); } if (bbio) { @@ -1765,15 +1877,8 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx, bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; /* Read the whole range inside the chunk boundary. */ - for (unsigned int cur = 0; cur < nr_sectors; cur++) { - struct page *page = scrub_stripe_get_page(stripe, cur); - unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur); - int ret; - - ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); - /* We should have allocated enough bio vectors. */ - ASSERT(ret == fs_info->sectorsize); - } + for (unsigned int cur = 0; cur < nr_sectors; cur++) + scrub_bio_add_sector(bbio, stripe, cur); atomic_inc(&stripe->pending_io); /* @@ -1794,10 +1899,11 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx, static bool stripe_has_metadata_error(struct scrub_stripe *stripe) { + const unsigned long error = scrub_bitmap_read_error(stripe); int i; - for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) { - if (stripe->sectors[i].is_metadata) { + for_each_set_bit(i, &error, stripe->nr_sectors) { + if (scrub_bitmap_test_bit_is_metadata(stripe, i)) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; btrfs_err(fs_info, @@ -1872,13 +1978,16 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx) } for (int i = 0; i < nr_stripes; i++) { unsigned long good; + unsigned long has_extent; + unsigned long error; stripe = &sctx->stripes[i]; ASSERT(stripe->dev == fs_info->dev_replace.srcdev); - bitmap_andnot(&good, &stripe->extent_sector_bitmap, - &stripe->error_bitmap, stripe->nr_sectors); + has_extent = scrub_bitmap_read_has_extent(stripe); + error = scrub_bitmap_read_error(stripe); + bitmap_andnot(&good, &has_extent, &error, stripe->nr_sectors); scrub_write_sectors(sctx, stripe, good, true); } } @@ -2012,7 +2121,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, /* Check if all data stripes are empty. */ for (int i = 0; i < data_stripes; i++) { stripe = &sctx->raid56_data_stripes[i]; - if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) { + if (!scrub_bitmap_empty_has_extent(stripe)) { all_empty = false; break; } @@ -2044,15 +2153,18 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, */ for (int i = 0; i < data_stripes; i++) { unsigned long error; + unsigned long has_extent; stripe = &sctx->raid56_data_stripes[i]; + error = scrub_bitmap_read_error(stripe); + has_extent = scrub_bitmap_read_has_extent(stripe); + /* * We should only check the errors where there is an extent. * As we may hit an empty data stripe while it's missing. */ - bitmap_and(&error, &stripe->error_bitmap, - &stripe->extent_sector_bitmap, stripe->nr_sectors); + bitmap_and(&error, &error, &has_extent, stripe->nr_sectors); if (!bitmap_empty(&error, stripe->nr_sectors)) { btrfs_err(fs_info, "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl", @@ -2061,8 +2173,8 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, ret = -EIO; goto out; } - bitmap_or(&extent_bitmap, &extent_bitmap, - &stripe->extent_sector_bitmap, stripe->nr_sectors); + bitmap_or(&extent_bitmap, &extent_bitmap, &has_extent, + stripe->nr_sectors); } /* Now we can check and regenerate the P/Q stripe. */ @@ -2770,17 +2882,11 @@ static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev, struct page *page, u64 physical, u64 generation) { struct btrfs_fs_info *fs_info = sctx->fs_info; - struct bio_vec bvec; - struct bio bio; struct btrfs_super_block *sb = page_address(page); int ret; - bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ); - bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT; - __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0); - ret = submit_bio_wait(&bio); - bio_uninit(&bio); - + ret = bdev_rw_virt(dev->bdev, physical >> SECTOR_SHIFT, sb, + BTRFS_SUPER_INFO_SIZE, REQ_OP_READ); if (ret < 0) return ret; ret = btrfs_check_super_csum(fs_info, sb); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 0c8c58c4f29b..2891ec4056c6 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -383,11 +383,11 @@ static void inconsistent_snapshot_error(struct send_ctx *sctx, result_string = "updated"; break; case BTRFS_COMPARE_TREE_SAME: - ASSERT(0); + DEBUG_WARN("no change between trees"); result_string = "unchanged"; break; default: - ASSERT(0); + DEBUG_WARN("unexpected comparison result %d", result); result_string = "unexpected"; } @@ -816,11 +816,8 @@ static int send_cmd(struct send_ctx *sctx) static int send_rename(struct send_ctx *sctx, struct fs_path *from, struct fs_path *to) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; - btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); - ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); if (ret < 0) return ret; @@ -840,11 +837,8 @@ tlv_put_failure: static int send_link(struct send_ctx *sctx, struct fs_path *path, struct fs_path *lnk) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; - btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); - ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); if (ret < 0) return ret; @@ -863,11 +857,8 @@ tlv_put_failure: */ static int send_unlink(struct send_ctx *sctx, struct fs_path *path) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; - btrfs_debug(fs_info, "send_unlink %s", path->start); - ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); if (ret < 0) return ret; @@ -885,11 +876,8 @@ tlv_put_failure: */ static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; - btrfs_debug(fs_info, "send_rmdir %s", path->start); - ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); if (ret < 0) return ret; @@ -1573,7 +1561,6 @@ static int find_extent_clone(struct send_ctx *sctx, struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; int extent_type; - u64 logical; u64 disk_byte; u64 num_bytes; struct btrfs_file_extent_item *fi; @@ -1604,7 +1591,6 @@ static int find_extent_clone(struct send_ctx *sctx, compressed = btrfs_file_extent_compression(eb, fi); num_bytes = btrfs_file_extent_num_bytes(eb, fi); - logical = disk_byte + btrfs_file_extent_offset(eb, fi); /* * Setup the clone roots. @@ -1686,14 +1672,8 @@ static int find_extent_clone(struct send_ctx *sctx, } up_read(&fs_info->commit_root_sem); - btrfs_debug(fs_info, - "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu", - data_offset, ino, num_bytes, logical); - - if (!backref_ctx.found) { - btrfs_debug(fs_info, "no clones found"); + if (!backref_ctx.found) return -ENOENT; - } cur_clone_root = NULL; for (i = 0; i < sctx->clone_roots_cnt; i++) { @@ -2631,12 +2611,9 @@ static void free_path_for_command(const struct send_ctx *sctx, struct fs_path *p static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; - btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); - p = get_path_for_command(sctx, ino, gen); if (IS_ERR(p)) return PTR_ERR(p); @@ -2658,12 +2635,9 @@ out: static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; - btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); - p = get_path_for_command(sctx, ino, gen); if (IS_ERR(p)) return PTR_ERR(p); @@ -2685,15 +2659,12 @@ out: static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; if (sctx->proto < 2) return 0; - btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr); - p = get_path_for_command(sctx, ino, gen); if (IS_ERR(p)) return PTR_ERR(p); @@ -2715,13 +2686,9 @@ out: static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; - btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", - ino, uid, gid); - p = get_path_for_command(sctx, ino, gen); if (IS_ERR(p)) return PTR_ERR(p); @@ -2744,7 +2711,6 @@ out: static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p = NULL; struct btrfs_inode_item *ii; @@ -2753,8 +2719,6 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) struct btrfs_key key; int slot; - btrfs_debug(fs_info, "send_utimes %llu", ino); - p = get_path_for_command(sctx, ino, gen); if (IS_ERR(p)) return PTR_ERR(p); @@ -2861,7 +2825,6 @@ static int trim_dir_utimes_cache(struct send_ctx *sctx) */ static int send_create_inode(struct send_ctx *sctx, u64 ino) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; int cmd; @@ -2870,8 +2833,6 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino) u64 mode; u64 rdev; - btrfs_debug(fs_info, "send_create_inode %llu", ino); - p = fs_path_alloc(); if (!p) return -ENOMEM; @@ -3098,7 +3059,7 @@ static void __free_recorded_refs(struct list_head *head) struct recorded_ref *cur; while (!list_empty(head)) { - cur = list_entry(head->next, struct recorded_ref, list); + cur = list_first_entry(head, struct recorded_ref, list); recorded_ref_free(cur); } } @@ -4224,8 +4185,6 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) bool orphanized_dir = false; bool orphanized_ancestor = false; - btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); - /* * This should never happen as the root dir always has the same ref * which is always '..' @@ -4560,8 +4519,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) /* * We have a moved dir. Add the old parent to check_dirs */ - cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, - list); + cur = list_first_entry(&sctx->deleted_refs, struct recorded_ref, list); ret = dup_ref(cur, &check_dirs); if (ret < 0) goto out; @@ -5263,10 +5221,9 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) { struct btrfs_root *root = sctx->send_root; struct btrfs_fs_info *fs_info = root->fs_info; - struct folio *folio; - pgoff_t index = offset >> PAGE_SHIFT; - pgoff_t last_index; - unsigned pg_offset = offset_in_page(offset); + u64 cur = offset; + const u64 end = offset + len; + const pgoff_t last_index = ((end - 1) >> PAGE_SHIFT); struct address_space *mapping = sctx->cur_inode->i_mapping; int ret; @@ -5274,13 +5231,12 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) if (ret) return ret; - last_index = (offset + len - 1) >> PAGE_SHIFT; + while (cur < end) { + pgoff_t index = (cur >> PAGE_SHIFT); + unsigned int cur_len; + unsigned int pg_offset; + struct folio *folio; - while (index <= last_index) { - unsigned cur_len = min_t(unsigned, len, - PAGE_SIZE - pg_offset); - -again: folio = filemap_lock_folio(mapping, index); if (IS_ERR(folio)) { page_cache_sync_readahead(mapping, @@ -5293,8 +5249,8 @@ again: break; } } - - WARN_ON(folio_order(folio)); + pg_offset = offset_in_folio(folio, cur); + cur_len = min_t(unsigned int, end - cur, folio_size(folio) - pg_offset); if (folio_test_readahead(folio)) page_cache_async_readahead(mapping, &sctx->ra, NULL, folio, @@ -5316,7 +5272,7 @@ again: if (folio->mapping != mapping) { folio_unlock(folio); folio_put(folio); - goto again; + continue; } } @@ -5324,9 +5280,7 @@ again: pg_offset, cur_len); folio_unlock(folio); folio_put(folio); - index++; - pg_offset = 0; - len -= cur_len; + cur += cur_len; sctx->send_size += cur_len; } @@ -5339,12 +5293,9 @@ again: */ static int send_write(struct send_ctx *sctx, u64 offset, u32 len) { - struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; - btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); - p = get_cur_inode_path(sctx); if (IS_ERR(p)) return PTR_ERR(p); @@ -5377,11 +5328,6 @@ static int send_clone(struct send_ctx *sctx, struct fs_path *cur_inode_path; u64 gen; - btrfs_debug(sctx->send_root->fs_info, - "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu", - offset, len, btrfs_root_id(clone_root->root), - clone_root->ino, clone_root->offset); - cur_inode_path = get_cur_inode_path(sctx); if (IS_ERR(cur_inode_path)) return PTR_ERR(cur_inode_path); diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index ff089e3e4103..d9087aa81b21 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -50,11 +50,11 @@ * num_bytes we want to reserve. * * ->reserve - * space_info->bytes_may_reserve += num_bytes + * space_info->bytes_may_use += num_bytes * * ->extent allocation * Call btrfs_add_reserved_bytes() which does - * space_info->bytes_may_reserve -= num_bytes + * space_info->bytes_may_use -= num_bytes * space_info->bytes_reserved += extent_bytes * * ->insert reference @@ -234,19 +234,11 @@ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, WRITE_ONCE(space_info->chunk_size, chunk_size); } -static int create_space_info(struct btrfs_fs_info *info, u64 flags) +static void init_space_info(struct btrfs_fs_info *info, + struct btrfs_space_info *space_info, u64 flags) { - - struct btrfs_space_info *space_info; - int i; - int ret; - - space_info = kzalloc(sizeof(*space_info), GFP_NOFS); - if (!space_info) - return -ENOMEM; - space_info->fs_info = info; - for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) + for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) INIT_LIST_HEAD(&space_info->block_groups[i]); init_rwsem(&space_info->groups_sem); spin_lock_init(&space_info->lock); @@ -257,9 +249,64 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags) INIT_LIST_HEAD(&space_info->priority_tickets); space_info->clamp = 1; btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags)); + space_info->subgroup_id = BTRFS_SUB_GROUP_PRIMARY; if (btrfs_is_zoned(info)) space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; +} + +static int create_space_info_sub_group(struct btrfs_space_info *parent, u64 flags, + enum btrfs_space_info_sub_group id, int index) +{ + struct btrfs_fs_info *fs_info = parent->fs_info; + struct btrfs_space_info *sub_group; + int ret; + + ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY); + ASSERT(id != BTRFS_SUB_GROUP_PRIMARY); + + sub_group = kzalloc(sizeof(*sub_group), GFP_NOFS); + if (!sub_group) + return -ENOMEM; + + init_space_info(fs_info, sub_group, flags); + parent->sub_group[index] = sub_group; + sub_group->parent = parent; + sub_group->subgroup_id = id; + + ret = btrfs_sysfs_add_space_info_type(fs_info, sub_group); + if (ret) { + kfree(sub_group); + parent->sub_group[index] = NULL; + } + return ret; +} + +static int create_space_info(struct btrfs_fs_info *info, u64 flags) +{ + + struct btrfs_space_info *space_info; + int ret = 0; + + space_info = kzalloc(sizeof(*space_info), GFP_NOFS); + if (!space_info) + return -ENOMEM; + + init_space_info(info, space_info, flags); + + if (btrfs_is_zoned(info)) { + if (flags & BTRFS_BLOCK_GROUP_DATA) + ret = create_space_info_sub_group(space_info, flags, + BTRFS_SUB_GROUP_DATA_RELOC, + 0); + else if (flags & BTRFS_BLOCK_GROUP_METADATA) + ret = create_space_info_sub_group(space_info, flags, + BTRFS_SUB_GROUP_TREELOG, + 0); + + if (ret) + return ret; + } ret = btrfs_sysfs_add_space_info_type(info, space_info); if (ret) @@ -312,31 +359,29 @@ out: void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, struct btrfs_block_group *block_group) { - struct btrfs_space_info *found; + struct btrfs_space_info *space_info = block_group->space_info; int factor, index; factor = btrfs_bg_type_to_factor(block_group->flags); - found = btrfs_find_space_info(info, block_group->flags); - ASSERT(found); - spin_lock(&found->lock); - found->total_bytes += block_group->length; - found->disk_total += block_group->length * factor; - found->bytes_used += block_group->used; - found->disk_used += block_group->used * factor; - found->bytes_readonly += block_group->bytes_super; - btrfs_space_info_update_bytes_zone_unusable(found, block_group->zone_unusable); + spin_lock(&space_info->lock); + space_info->total_bytes += block_group->length; + space_info->disk_total += block_group->length * factor; + space_info->bytes_used += block_group->used; + space_info->disk_used += block_group->used * factor; + space_info->bytes_readonly += block_group->bytes_super; + btrfs_space_info_update_bytes_zone_unusable(space_info, block_group->zone_unusable); if (block_group->length > 0) - found->full = 0; - btrfs_try_granting_tickets(info, found); - spin_unlock(&found->lock); + space_info->full = 0; + btrfs_try_granting_tickets(info, space_info); + spin_unlock(&space_info->lock); - block_group->space_info = found; + block_group->space_info = space_info; index = btrfs_bg_flags_to_raid_index(block_group->flags); - down_write(&found->groups_sem); - list_add_tail(&block_group->list, &found->block_groups[index]); - up_write(&found->groups_sem); + down_write(&space_info->groups_sem); + list_add_tail(&block_group->list, &space_info->block_groups[index]); + up_write(&space_info->groups_sem); } struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, @@ -556,8 +601,9 @@ static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info, lockdep_assert_held(&info->lock); /* The free space could be negative in case of overcommit */ - btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", - flag_str, + btrfs_info(fs_info, + "space_info %s (sub-group id %d) has %lld free, is %sfull", + flag_str, info->subgroup_id, (s64)(info->total_bytes - btrfs_space_info_used(info, true)), info->full ? "" : "not "); btrfs_info(fs_info, @@ -812,7 +858,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, ret = PTR_ERR(trans); break; } - ret = btrfs_chunk_alloc(trans, + ret = btrfs_chunk_alloc(trans, space_info, btrfs_get_alloc_profile(fs_info, space_info->flags), (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : CHUNK_ALLOC_FORCE); @@ -1083,23 +1129,15 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, return (tickets_id != space_info->tickets_id); } -/* - * This is for normal flushers, we can wait all goddamned day if we want to. We - * will loop and continuously try to flush as long as we are making progress. - * We count progress as clearing off tickets each time we have to loop. - */ -static void btrfs_async_reclaim_metadata_space(struct work_struct *work) +static void do_async_reclaim_metadata_space(struct btrfs_space_info *space_info) { - struct btrfs_fs_info *fs_info; - struct btrfs_space_info *space_info; + struct btrfs_fs_info *fs_info = space_info->fs_info; u64 to_reclaim; enum btrfs_flush_state flush_state; int commit_cycles = 0; u64 last_tickets_id; enum btrfs_flush_state final_state; - fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); - space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); if (btrfs_is_zoned(fs_info)) final_state = RESET_ZONES; else @@ -1174,6 +1212,25 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) } /* + * This is for normal flushers, it can wait as much time as needed. We will + * loop and continuously try to flush as long as we are making progress. We + * count progress as clearing off tickets each time we have to loop. + */ +static void btrfs_async_reclaim_metadata_space(struct work_struct *work) +{ + struct btrfs_fs_info *fs_info; + struct btrfs_space_info *space_info; + + fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); + space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); + do_async_reclaim_metadata_space(space_info); + for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) { + if (space_info->sub_group[i]) + do_async_reclaim_metadata_space(space_info->sub_group[i]); + } +} + +/* * This handles pre-flushing of metadata space before we get to the point that * we need to start blocking threads on tickets. The logic here is different * from the other flush paths because it doesn't rely on tickets to tell us how @@ -1318,16 +1375,12 @@ static const enum btrfs_flush_state data_flush_states[] = { ALLOC_CHUNK_FORCE, }; -static void btrfs_async_reclaim_data_space(struct work_struct *work) +static void do_async_reclaim_data_space(struct btrfs_space_info *space_info) { - struct btrfs_fs_info *fs_info; - struct btrfs_space_info *space_info; + struct btrfs_fs_info *fs_info = space_info->fs_info; u64 last_tickets_id; enum btrfs_flush_state flush_state = 0; - fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); - space_info = fs_info->data_sinfo; - spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { space_info->flush = 0; @@ -1395,6 +1448,19 @@ aborted_fs: spin_unlock(&space_info->lock); } +static void btrfs_async_reclaim_data_space(struct work_struct *work) +{ + struct btrfs_fs_info *fs_info; + struct btrfs_space_info *space_info; + + fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); + space_info = fs_info->data_sinfo; + do_async_reclaim_data_space(space_info); + for (int i = 0; i < BTRFS_SPACE_INFO_SUB_GROUP_MAX; i++) + if (space_info->sub_group[i]) + do_async_reclaim_data_space(space_info->sub_group[i]); +} + void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) { INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); @@ -1836,10 +1902,10 @@ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, * This will reserve bytes from the data space info. If there is not enough * space then we will attempt to flush space as specified by flush. */ -int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, +int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush) { - struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; + struct btrfs_fs_info *fs_info = space_info->fs_info; int ret; ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || @@ -1847,12 +1913,12 @@ int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, flush == BTRFS_RESERVE_NO_FLUSH); ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); - ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); + ret = __reserve_bytes(fs_info, space_info, bytes, flush); if (ret == -ENOSPC) { trace_btrfs_space_reservation(fs_info, "space_info:enospc", - data_sinfo->flags, bytes, 1); + space_info->flags, bytes, 1); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) - btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); + btrfs_dump_space_info(fs_info, space_info, bytes, 0); } return ret; } diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index a96efdb5e681..92b7f5e2b850 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -98,8 +98,18 @@ enum btrfs_flush_state { RESET_ZONES = 12, }; +enum btrfs_space_info_sub_group { + BTRFS_SUB_GROUP_PRIMARY, + BTRFS_SUB_GROUP_DATA_RELOC, + BTRFS_SUB_GROUP_TREELOG, +}; + +#define BTRFS_SPACE_INFO_SUB_GROUP_MAX 1 struct btrfs_space_info { struct btrfs_fs_info *fs_info; + struct btrfs_space_info *parent; + struct btrfs_space_info *sub_group[BTRFS_SPACE_INFO_SUB_GROUP_MAX]; + int subgroup_id; spinlock_t lock; u64 total_bytes; /* total bytes in the space, @@ -288,7 +298,7 @@ static inline void btrfs_space_info_free_bytes_may_use( btrfs_try_granting_tickets(space_info->fs_info, space_info); spin_unlock(&space_info->lock); } -int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, +int btrfs_reserve_data_bytes(struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush); void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info); void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index c0a0b8b063d0..d4f019233493 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -69,7 +69,8 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, struct btrfs_subpage *subpage; /* For metadata we don't support large folio yet. */ - ASSERT(!folio_test_large(folio)); + if (type == BTRFS_SUBPAGE_METADATA) + ASSERT(!folio_test_large(folio)); /* * We have cases like a dummy extent buffer page, which is not mapped @@ -181,9 +182,6 @@ void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio * static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { - /* For subpage support, the folio must be single page. */ - ASSERT(folio_order(folio) == 0); - /* Basic checks */ ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 7310e2fa8526..a0c65adce1ab 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -125,7 +125,6 @@ enum { /* Rescue options */ Opt_rescue, Opt_usebackuproot, - Opt_nologreplay, /* Debugging options */ Opt_enospc_debug, @@ -246,8 +245,6 @@ static const struct fs_parameter_spec btrfs_fs_parameters[] = { /* Rescue options. */ fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue), - /* Deprecated, with alias rescue=nologreplay */ - __fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL), /* Deprecated, with alias rescue=usebackuproot */ __fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL), /* For compatibility only, alias for "rescue=nologreplay". */ @@ -449,11 +446,6 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) else btrfs_clear_opt(ctx->mount_opt, NOTREELOG); break; - case Opt_nologreplay: - btrfs_warn(NULL, - "'nologreplay' is deprecated, use 'rescue=nologreplay' instead"); - btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); - break; case Opt_norecovery: btrfs_info(NULL, "'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'"); @@ -1152,11 +1144,11 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) /* * subvolumes are identified by ino 256 */ -static inline int is_subvolume_inode(struct inode *inode) +static inline bool is_subvolume_inode(struct inode *inode) { if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) - return 1; - return 0; + return true; + return false; } static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, @@ -2296,7 +2288,7 @@ static int check_dev_super(struct btrfs_device *dev) return 0; /* Only need to check the primary super block. */ - sb = btrfs_read_dev_one_super(dev->bdev, 0, true); + sb = btrfs_read_disk_super(dev->bdev, 0, true); if (IS_ERR(sb)) return PTR_ERR(sb); @@ -2529,8 +2521,8 @@ static const struct init_sequence mod_init_seq[] = { .init_func = btrfs_free_space_init, .exit_func = btrfs_free_space_exit, }, { - .init_func = extent_state_init_cachep, - .exit_func = extent_state_free_cachep, + .init_func = btrfs_extent_state_init_cachep, + .exit_func = btrfs_extent_state_free_cachep, }, { .init_func = extent_buffer_init_cachep, .exit_func = extent_buffer_free_cachep, @@ -2538,8 +2530,8 @@ static const struct init_sequence mod_init_seq[] = { .init_func = btrfs_bioset_init, .exit_func = btrfs_bioset_exit, }, { - .init_func = extent_map_init, - .exit_func = extent_map_exit, + .init_func = btrfs_extent_map_init, + .exit_func = btrfs_extent_map_exit, #ifdef CONFIG_BTRFS_EXPERIMENTAL }, { .init_func = btrfs_read_policy_init, diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index b9af74498b0c..5d93d9dd2c12 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1930,16 +1930,35 @@ void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info) kobject_put(&space_info->kobj); } -static const char *alloc_name(u64 flags) +static const char *alloc_name(struct btrfs_space_info *space_info) { + u64 flags = space_info->flags; + switch (flags) { case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA: return "mixed"; case BTRFS_BLOCK_GROUP_METADATA: - return "metadata"; + switch (space_info->subgroup_id) { + case BTRFS_SUB_GROUP_PRIMARY: + return "metadata"; + case BTRFS_SUB_GROUP_TREELOG: + return "metadata-treelog"; + default: + WARN_ON_ONCE(1); + return "metadata (unknown sub-group)"; + } case BTRFS_BLOCK_GROUP_DATA: - return "data"; + switch (space_info->subgroup_id) { + case BTRFS_SUB_GROUP_PRIMARY: + return "data"; + case BTRFS_SUB_GROUP_DATA_RELOC: + return "data-reloc"; + default: + WARN_ON_ONCE(1); + return "data (unknown sub-group)"; + } case BTRFS_BLOCK_GROUP_SYSTEM: + ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY); return "system"; default: WARN_ON(1); @@ -1958,7 +1977,7 @@ int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype, fs_info->space_info_kobj, "%s", - alloc_name(space_info->flags)); + alloc_name(space_info)); if (ret) { kobject_put(&space_info->kobj); return ret; diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 5eff8d7d2360..b576897d71cc 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -102,7 +102,7 @@ struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info) if (!dev) return ERR_PTR(-ENOMEM); - extent_io_tree_init(fs_info, &dev->alloc_state, 0); + btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, 0); INIT_LIST_HEAD(&dev->dev_list); list_add(&dev->dev_list, &fs_info->fs_devices->devices); @@ -111,7 +111,7 @@ struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info) static void btrfs_free_dummy_device(struct btrfs_device *dev) { - extent_io_tree_release(&dev->alloc_state); + btrfs_extent_io_tree_release(&dev->alloc_state); kfree(dev); } @@ -157,9 +157,9 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) { - struct radix_tree_iter iter; - void **slot; struct btrfs_device *dev, *tmp; + struct extent_buffer *eb; + unsigned long index; if (!fs_info) return; @@ -169,25 +169,13 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) test_mnt->mnt_sb->s_fs_info = NULL; - spin_lock(&fs_info->buffer_lock); - radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { - struct extent_buffer *eb; - - eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); - if (!eb) - continue; - /* Shouldn't happen but that kind of thinking creates CVE's */ - if (radix_tree_exception(eb)) { - if (radix_tree_deref_retry(eb)) - slot = radix_tree_iter_retry(&iter); - continue; - } - slot = radix_tree_iter_resume(slot, &iter); - spin_unlock(&fs_info->buffer_lock); - free_extent_buffer_stale(eb); - spin_lock(&fs_info->buffer_lock); + xa_lock_irq(&fs_info->buffer_tree); + xa_for_each(&fs_info->buffer_tree, index, eb) { + xa_unlock_irq(&fs_info->buffer_tree); + free_extent_buffer(eb); + xa_lock_irq(&fs_info->buffer_tree); } - spin_unlock(&fs_info->buffer_lock); + xa_unlock_irq(&fs_info->buffer_tree); btrfs_mapping_tree_free(fs_info); list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices, diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 74aca7180a5a..00da54f0164c 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -14,9 +14,9 @@ #include "../disk-io.h" #include "../btrfs_inode.h" -#define PROCESS_UNLOCK (1 << 0) -#define PROCESS_RELEASE (1 << 1) -#define PROCESS_TEST_LOCKED (1 << 2) +#define PROCESS_UNLOCK (1U << 0) +#define PROCESS_RELEASE (1U << 1) +#define PROCESS_TEST_LOCKED (1U << 2) static noinline int process_page_range(struct inode *inode, u64 start, u64 end, unsigned long flags) @@ -74,7 +74,6 @@ static void extent_flag_to_str(const struct extent_state *state, char *dest) dest[0] = 0; PRINT_ONE_FLAG(state, dest, cur, DIRTY); - PRINT_ONE_FLAG(state, dest, cur, UPTODATE); PRINT_ONE_FLAG(state, dest, cur, LOCKED); PRINT_ONE_FLAG(state, dest, cur, NEW); PRINT_ONE_FLAG(state, dest, cur, DELALLOC); @@ -150,7 +149,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) * Passing NULL as we don't have fs_info but tracepoints are not used * at this point */ - extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST); + btrfs_extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST); /* * First go through and create and mark all of our pages dirty, we pin @@ -177,7 +176,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) * |--- delalloc ---| * |--- search ---| */ - set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL); + btrfs_set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL); start = 0; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, @@ -191,7 +190,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) sectorsize - 1, start, end); goto out_bits; } - unlock_extent(tmp, start, end, NULL); + btrfs_unlock_extent(tmp, start, end, NULL); unlock_page(locked_page); put_page(locked_page); @@ -208,7 +207,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) test_err("couldn't find the locked page"); goto out_bits; } - set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL); + btrfs_set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL); start = test_start; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, @@ -227,7 +226,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) test_err("there were unlocked pages in the range"); goto out_bits; } - unlock_extent(tmp, start, end, NULL); + btrfs_unlock_extent(tmp, start, end, NULL); /* locked_page was unlocked above */ put_page(locked_page); @@ -263,7 +262,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) * * We are re-using our test_start from above since it works out well. */ - set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL); + btrfs_set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL); start = test_start; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, page_folio(locked_page), &start, @@ -282,7 +281,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) test_err("pages in range were not all locked"); goto out_bits; } - unlock_extent(tmp, start, end, NULL); + btrfs_unlock_extent(tmp, start, end, NULL); /* * Now to test where we run into a page that is no longer dirty in the @@ -327,7 +326,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize) out_bits: if (ret) dump_extent_io_tree(tmp); - clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1); + btrfs_clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1); out: if (locked_page) put_page(locked_page); @@ -565,10 +564,10 @@ static int test_find_first_clear_extent_bit(void) test_msg("running find_first_clear_extent_bit test"); - extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST); + btrfs_extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST); /* Test correct handling of empty tree */ - find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED); + btrfs_find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED); if (start != 0 || end != -1) { test_err( "error getting a range from completely empty tree: start %llu end %llu", @@ -579,11 +578,11 @@ static int test_find_first_clear_extent_bit(void) * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between * 4M-32M */ - set_extent_bit(&tree, SZ_1M, SZ_4M - 1, - CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); + btrfs_set_extent_bit(&tree, SZ_1M, SZ_4M - 1, + CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); - find_first_clear_extent_bit(&tree, SZ_512K, &start, &end, - CHUNK_TRIMMED | CHUNK_ALLOCATED); + btrfs_find_first_clear_extent_bit(&tree, SZ_512K, &start, &end, + CHUNK_TRIMMED | CHUNK_ALLOCATED); if (start != 0 || end != SZ_1M - 1) { test_err("error finding beginning range: start %llu end %llu", @@ -592,14 +591,14 @@ static int test_find_first_clear_extent_bit(void) } /* Now add 32M-64M so that we have a hole between 4M-32M */ - set_extent_bit(&tree, SZ_32M, SZ_64M - 1, - CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); + btrfs_set_extent_bit(&tree, SZ_32M, SZ_64M - 1, + CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); /* * Request first hole starting at 12M, we should get 4M-32M */ - find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end, - CHUNK_TRIMMED | CHUNK_ALLOCATED); + btrfs_find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end, + CHUNK_TRIMMED | CHUNK_ALLOCATED); if (start != SZ_4M || end != SZ_32M - 1) { test_err("error finding trimmed range: start %llu end %llu", @@ -611,8 +610,8 @@ static int test_find_first_clear_extent_bit(void) * Search in the middle of allocated range, should get the next one * available, which happens to be unallocated -> 4M-32M */ - find_first_clear_extent_bit(&tree, SZ_2M, &start, &end, - CHUNK_TRIMMED | CHUNK_ALLOCATED); + btrfs_find_first_clear_extent_bit(&tree, SZ_2M, &start, &end, + CHUNK_TRIMMED | CHUNK_ALLOCATED); if (start != SZ_4M || end != SZ_32M - 1) { test_err("error finding next unalloc range: start %llu end %llu", @@ -624,9 +623,9 @@ static int test_find_first_clear_extent_bit(void) * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag * being unset in this range, we should get the entry in range 64M-72M */ - set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL); - find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end, - CHUNK_TRIMMED); + btrfs_set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL); + btrfs_find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end, + CHUNK_TRIMMED); if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { test_err("error finding exact range: start %llu end %llu", @@ -634,8 +633,8 @@ static int test_find_first_clear_extent_bit(void) goto out; } - find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end, - CHUNK_TRIMMED); + btrfs_find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end, + CHUNK_TRIMMED); /* * Search in the middle of set range whose immediate neighbour doesn't @@ -651,7 +650,7 @@ static int test_find_first_clear_extent_bit(void) * Search beyond any known range, shall return after last known range * and end should be -1 */ - find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED); + btrfs_find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED); if (start != SZ_64M + SZ_8M || end != -1) { test_err( "error handling beyond end of range search: start %llu end %llu", @@ -663,7 +662,7 @@ static int test_find_first_clear_extent_bit(void) out: if (ret) dump_extent_io_tree(&tree); - clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); + btrfs_clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); return ret; } diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c index 609bb6c9c087..3a86534c116f 100644 --- a/fs/btrfs/tests/extent-map-tests.c +++ b/fs/btrfs/tests/extent-map-tests.c @@ -22,7 +22,7 @@ static int free_extent_map_tree(struct btrfs_inode *inode) while (!RB_EMPTY_ROOT(&em_tree->root)) { node = rb_first(&em_tree->root); em = rb_entry(node, struct extent_map, rb_node); - remove_extent_mapping(inode, em); + btrfs_remove_extent_mapping(inode, em); #ifdef CONFIG_BTRFS_DEBUG if (refcount_read(&em->refs) != 1) { @@ -36,7 +36,7 @@ static int free_extent_map_tree(struct btrfs_inode *inode) refcount_set(&em->refs, 1); } #endif - free_extent_map(em); + btrfs_free_extent_map(em); } write_unlock(&em_tree->lock); @@ -68,7 +68,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) int ret; int ret2; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; @@ -87,10 +87,10 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) test_err("cannot add extent range [0, 16K)"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); /* Add [16K, 20K) following [0, 16K) */ - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -109,9 +109,9 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) test_err("cannot add extent range [16K, 20K)"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -137,7 +137,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) ret = -ENOENT; goto out; } - if (em->start != 0 || extent_map_end(em) != SZ_16K || + if (em->start != 0 || btrfs_extent_map_end(em) != SZ_16K || em->disk_bytenr != 0 || em->disk_num_bytes != SZ_16K) { test_err( "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu", @@ -145,7 +145,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) em->disk_bytenr, em->disk_num_bytes); ret = -EINVAL; } - free_extent_map(em); + btrfs_free_extent_map(em); out: ret2 = free_extent_map_tree(inode); if (ret == 0) @@ -167,7 +167,7 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) int ret; int ret2; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; @@ -186,10 +186,10 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) test_err("cannot add extent range [0, 1K)"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); /* Add [4K, 8K) following [0, 1K) */ - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -208,9 +208,9 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) test_err("cannot add extent range [4K, 8K)"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -235,14 +235,14 @@ static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) ret = -ENOENT; goto out; } - if (em->start != 0 || extent_map_end(em) != SZ_1K || + if (em->start != 0 || btrfs_extent_map_end(em) != SZ_1K || em->disk_bytenr != EXTENT_MAP_INLINE) { test_err( "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu disk_bytenr %llu", ret, em->start, em->len, em->disk_bytenr); ret = -EINVAL; } - free_extent_map(em); + btrfs_free_extent_map(em); out: ret2 = free_extent_map_tree(inode); if (ret == 0) @@ -260,7 +260,7 @@ static int __test_case_3(struct btrfs_fs_info *fs_info, int ret; int ret2; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; @@ -279,9 +279,9 @@ static int __test_case_3(struct btrfs_fs_info *fs_info, test_err("cannot add extent range [4K, 8K)"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -312,15 +312,15 @@ static int __test_case_3(struct btrfs_fs_info *fs_info, * Since bytes within em are contiguous, em->block_start is identical to * em->start. */ - if (start < em->start || start + len > extent_map_end(em) || - em->start != extent_map_block_start(em)) { + if (start < em->start || start + len > btrfs_extent_map_end(em) || + em->start != btrfs_extent_map_block_start(em)) { test_err( "case3 [%llu %llu): ret %d em (start %llu len %llu disk_bytenr %llu block_len %llu)", start, start + len, ret, em->start, em->len, em->disk_bytenr, em->disk_num_bytes); ret = -EINVAL; } - free_extent_map(em); + btrfs_free_extent_map(em); out: ret2 = free_extent_map_tree(inode); if (ret == 0) @@ -369,7 +369,7 @@ static int __test_case_4(struct btrfs_fs_info *fs_info, int ret; int ret2; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; @@ -388,9 +388,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info, test_err("cannot add extent range [0, 8K)"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -410,9 +410,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info, test_err("cannot add extent range [8K, 32K)"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -438,14 +438,14 @@ static int __test_case_4(struct btrfs_fs_info *fs_info, ret = -ENOENT; goto out; } - if (start < em->start || start + len > extent_map_end(em)) { + if (start < em->start || start + len > btrfs_extent_map_end(em)) { test_err( "case4 [%llu %llu): ret %d, added wrong em (start %llu len %llu disk_bytenr %llu disk_num_bytes %llu)", start, start + len, ret, em->start, em->len, em->disk_bytenr, em->disk_num_bytes); ret = -EINVAL; } - free_extent_map(em); + btrfs_free_extent_map(em); out: ret2 = free_extent_map_tree(inode); if (ret == 0) @@ -498,7 +498,7 @@ static int add_compressed_extent(struct btrfs_inode *inode, struct extent_map *em; int ret; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; @@ -513,7 +513,7 @@ static int add_compressed_extent(struct btrfs_inode *inode, write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len); write_unlock(&em_tree->lock); - free_extent_map(em); + btrfs_free_extent_map(em); if (ret < 0) { test_err("cannot add extent map [%llu, %llu)", start, start + len); return ret; @@ -719,7 +719,7 @@ static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) if (ret) goto out; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -751,7 +751,7 @@ static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) } ret = 0; out: - free_extent_map(em); + btrfs_free_extent_map(em); ret2 = free_extent_map_tree(inode); if (ret == 0) ret = ret2; @@ -773,7 +773,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) test_msg("Running btrfs_drop_extent_cache with pinned"); - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; @@ -793,9 +793,9 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) test_err("couldn't add extent map"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -815,7 +815,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) test_err("couldn't add extent map"); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); /* * Drop [0, 36K) This should skip the [0, 4K) extent and then split the @@ -826,7 +826,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) /* Make sure our extent maps look sane. */ ret = -EINVAL; - em = lookup_extent_mapping(em_tree, 0, SZ_16K); + em = btrfs_lookup_extent_mapping(em_tree, 0, SZ_16K); if (!em) { test_err("didn't find an em at 0 as expected"); goto out; @@ -842,10 +842,10 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, SZ_16K, SZ_16K); + em = btrfs_lookup_extent_mapping(em_tree, SZ_16K, SZ_16K); read_unlock(&em_tree->lock); if (em) { test_err("found an em when we weren't expecting one"); @@ -853,7 +853,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) } read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, SZ_32K, SZ_16K); + em = btrfs_lookup_extent_mapping(em_tree, SZ_32K, SZ_16K); read_unlock(&em_tree->lock); if (!em) { test_err("didn't find an em at 32K as expected"); @@ -870,16 +870,16 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) goto out; } - if (extent_map_block_start(em) != SZ_32K + SZ_4K) { + if (btrfs_extent_map_block_start(em) != SZ_32K + SZ_4K) { test_err("em->block_start is %llu, expected 36K", - extent_map_block_start(em)); + btrfs_extent_map_block_start(em)); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1); + em = btrfs_lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1); read_unlock(&em_tree->lock); if (em) { test_err("found an unexpected em above 48K"); @@ -888,9 +888,9 @@ static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) ret = 0; out: - free_extent_map(em); + btrfs_free_extent_map(em); /* Unpin our extent to prevent warning when removing it below. */ - ret2 = unpin_extent_cache(inode, 0, SZ_16K, 0); + ret2 = btrfs_unpin_extent_cache(inode, 0, SZ_16K, 0); if (ret == 0) ret = ret2; ret2 = free_extent_map_tree(inode); @@ -913,7 +913,7 @@ static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) int ret; int ret2; - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; @@ -928,13 +928,13 @@ static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len); write_unlock(&em_tree->lock); - free_extent_map(em); + btrfs_free_extent_map(em); if (ret < 0) { test_err("couldn't add extent map for range [120K, 128K)"); goto out; } - em = alloc_extent_map(); + em = btrfs_alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; @@ -967,7 +967,7 @@ static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode) write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(inode, &em, SZ_1K * 140, SZ_4K); write_unlock(&em_tree->lock); - free_extent_map(em); + btrfs_free_extent_map(em); if (ret < 0) { test_err("couldn't add extent map for range [108K, 144K)"); goto out; diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 3ea3bc2225fe..a29d2c02c2c8 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -268,7 +268,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) test_err("expected a hole, got %llu", em->disk_bytenr); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); /* @@ -314,7 +314,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) * this? */ offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { @@ -336,7 +336,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); /* Regular extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); @@ -363,7 +363,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); /* The next 3 are split extents */ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); @@ -389,10 +389,10 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) test_err("wrong offset, want 0, have %llu", em->offset); goto out; } - disk_bytenr = extent_map_block_start(em); + disk_bytenr = btrfs_extent_map_block_start(em); orig_start = em->start; offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { @@ -414,7 +414,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { @@ -441,13 +441,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } disk_bytenr += (em->start - orig_start); - if (extent_map_block_start(em) != disk_bytenr) { + if (btrfs_extent_map_block_start(em) != disk_bytenr) { test_err("wrong block start, want %llu, have %llu", - disk_bytenr, extent_map_block_start(em)); + disk_bytenr, btrfs_extent_map_block_start(em)); goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); /* Prealloc extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); @@ -475,7 +475,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); /* The next 3 are a half written prealloc extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); @@ -502,10 +502,10 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) test_err("wrong offset, want 0, have %llu", em->offset); goto out; } - disk_bytenr = extent_map_block_start(em); + disk_bytenr = btrfs_extent_map_block_start(em); orig_start = em->start; offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { @@ -531,13 +531,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) em->start - orig_start, em->offset); goto out; } - if (extent_map_block_start(em) != disk_bytenr + em->offset) { + if (btrfs_extent_map_block_start(em) != disk_bytenr + em->offset) { test_err("unexpected block start, wanted %llu, have %llu", - disk_bytenr + em->offset, extent_map_block_start(em)); + disk_bytenr + em->offset, btrfs_extent_map_block_start(em)); goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { @@ -564,13 +564,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) em->start, em->offset, orig_start); goto out; } - if (extent_map_block_start(em) != disk_bytenr + em->offset) { + if (btrfs_extent_map_block_start(em) != disk_bytenr + em->offset) { test_err("unexpected block start, wanted %llu, have %llu", - disk_bytenr + em->offset, extent_map_block_start(em)); + disk_bytenr + em->offset, btrfs_extent_map_block_start(em)); goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); /* Now for the compressed extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); @@ -597,13 +597,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) test_err("wrong offset, want 0, have %llu", em->offset); goto out; } - if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) { + if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) { test_err("unexpected compress type, wanted %d, got %d", - BTRFS_COMPRESS_ZLIB, extent_map_compression(em)); + BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em)); goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); /* Split compressed extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); @@ -630,15 +630,15 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) test_err("wrong offset, want 0, have %llu", em->offset); goto out; } - if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) { + if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) { test_err("unexpected compress type, wanted %d, got %d", - BTRFS_COMPRESS_ZLIB, extent_map_compression(em)); + BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em)); goto out; } - disk_bytenr = extent_map_block_start(em); + disk_bytenr = btrfs_extent_map_block_start(em); orig_start = em->start; offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { @@ -664,16 +664,16 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } - if (extent_map_block_start(em) != disk_bytenr) { + if (btrfs_extent_map_block_start(em) != disk_bytenr) { test_err("block start does not match, want %llu got %llu", - disk_bytenr, extent_map_block_start(em)); + disk_bytenr, btrfs_extent_map_block_start(em)); goto out; } if (em->start != offset || em->len != 2 * sectorsize) { @@ -692,13 +692,13 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) em->start, em->offset, orig_start); goto out; } - if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) { + if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) { test_err("unexpected compress type, wanted %d, got %d", - BTRFS_COMPRESS_ZLIB, extent_map_compression(em)); + BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em)); goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); /* A hole between regular extents but no hole extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, offset + 6, sectorsize); @@ -725,7 +725,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, SZ_4M); if (IS_ERR(em)) { @@ -757,7 +757,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } offset = em->start + em->len; - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, offset, sectorsize); if (IS_ERR(em)) { @@ -785,7 +785,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) ret = 0; out: if (!IS_ERR(em)) - free_extent_map(em); + btrfs_free_extent_map(em); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); @@ -858,15 +858,16 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) em->flags); goto out; } - free_extent_map(em); + btrfs_free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, sectorsize, 2 * sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } - if (extent_map_block_start(em) != sectorsize) { - test_err("expected a real extent, got %llu", extent_map_block_start(em)); + if (btrfs_extent_map_block_start(em) != sectorsize) { + test_err("expected a real extent, got %llu", + btrfs_extent_map_block_start(em)); goto out; } if (em->start != sectorsize || em->len != sectorsize) { @@ -883,7 +884,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) ret = 0; out: if (!IS_ERR(em)) - free_extent_map(em); + btrfs_free_extent_map(em); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); @@ -949,11 +950,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */ - ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, - BTRFS_MAX_EXTENT_SIZE >> 1, - (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | - EXTENT_UPTODATE, NULL); + ret = btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, + BTRFS_MAX_EXTENT_SIZE >> 1, + (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1017,11 +1017,10 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ - ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, - BTRFS_MAX_EXTENT_SIZE + sectorsize, - BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | - EXTENT_UPTODATE, NULL); + ret = btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, + BTRFS_MAX_EXTENT_SIZE + sectorsize, + BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1052,9 +1051,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) } /* Empty */ - ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | - EXTENT_UPTODATE, NULL); + ret = btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1068,9 +1066,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) ret = 0; out: if (ret) - clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, - EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | - EXTENT_UPTODATE, NULL); + btrfs_clear_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f26a394a9ec5..b96195d6480f 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -197,7 +197,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) list_del_init(&root->dirty_list); free_extent_buffer(root->commit_root); root->commit_root = btrfs_root_node(root); - extent_io_tree_release(&root->dirty_log_pages); + btrfs_extent_io_tree_release(&root->dirty_log_pages); btrfs_qgroup_clean_swapped_blocks(root); } @@ -383,10 +383,10 @@ loop: INIT_LIST_HEAD(&cur_trans->deleted_bgs); spin_lock_init(&cur_trans->dropped_roots_lock); list_add_tail(&cur_trans->list, &fs_info->trans_list); - extent_io_tree_init(fs_info, &cur_trans->dirty_pages, - IO_TREE_TRANS_DIRTY_PAGES); - extent_io_tree_init(fs_info, &cur_trans->pinned_extents, - IO_TREE_FS_PINNED_EXTENTS); + btrfs_extent_io_tree_init(fs_info, &cur_trans->dirty_pages, + IO_TREE_TRANS_DIRTY_PAGES); + btrfs_extent_io_tree_init(fs_info, &cur_trans->pinned_extents, + IO_TREE_FS_PINNED_EXTENTS); btrfs_set_fs_generation(fs_info, fs_info->generation + 1); cur_trans->transid = fs_info->generation; fs_info->running_transaction = cur_trans; @@ -538,15 +538,15 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info) } } -static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type) +static bool may_wait_transaction(struct btrfs_fs_info *fs_info, int type) { if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) - return 0; + return false; if (type == TRANS_START) - return 1; + return true; - return 0; + return false; } static inline bool need_reserve_reloc_root(struct btrfs_root *root) @@ -761,9 +761,10 @@ got_it: * value here. */ if (do_chunk_alloc && num_bytes) { - u64 flags = h->block_rsv->space_info->flags; + struct btrfs_space_info *space_info = h->block_rsv->space_info; + u64 flags = space_info->flags; - btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), + btrfs_chunk_alloc(h, space_info, btrfs_get_alloc_profile(fs_info, flags), CHUNK_ALLOC_NO_FORCE); } @@ -1128,13 +1129,13 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, u64 start = 0; u64 end; - while (find_first_extent_bit(dirty_pages, start, &start, &end, - mark, &cached_state)) { + while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end, + mark, &cached_state)) { bool wait_writeback = false; - ret = convert_extent_bit(dirty_pages, start, end, - EXTENT_NEED_WAIT, - mark, &cached_state); + ret = btrfs_convert_extent_bit(dirty_pages, start, end, + EXTENT_NEED_WAIT, + mark, &cached_state); /* * convert_extent_bit can return -ENOMEM, which is most of the * time a temporary error. So when it happens, ignore the error @@ -1155,8 +1156,8 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, if (!ret) ret = filemap_fdatawrite_range(mapping, start, end); if (!ret && wait_writeback) - ret = filemap_fdatawait_range(mapping, start, end); - free_extent_state(cached_state); + btrfs_btree_wait_writeback_range(fs_info, start, end); + btrfs_free_extent_state(cached_state); if (ret) break; cached_state = NULL; @@ -1175,14 +1176,13 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages) { - struct address_space *mapping = fs_info->btree_inode->i_mapping; struct extent_state *cached_state = NULL; u64 start = 0; u64 end; int ret = 0; - while (find_first_extent_bit(dirty_pages, start, &start, &end, - EXTENT_NEED_WAIT, &cached_state)) { + while (btrfs_find_first_extent_bit(dirty_pages, start, &start, &end, + EXTENT_NEED_WAIT, &cached_state)) { /* * Ignore -ENOMEM errors returned by clear_extent_bit(). * When committing the transaction, we'll remove any entries @@ -1191,13 +1191,13 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, * concurrently - we do it only at transaction commit time when * it's safe to do it (through extent_io_tree_release()). */ - ret = clear_extent_bit(dirty_pages, start, end, - EXTENT_NEED_WAIT, &cached_state); + ret = btrfs_clear_extent_bit(dirty_pages, start, end, + EXTENT_NEED_WAIT, &cached_state); if (ret == -ENOMEM) ret = 0; if (!ret) - ret = filemap_fdatawait_range(mapping, start, end); - free_extent_state(cached_state); + btrfs_btree_wait_writeback_range(fs_info, start, end); + btrfs_free_extent_state(cached_state); if (ret) break; cached_state = NULL; @@ -1265,7 +1265,7 @@ static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans) blk_finish_plug(&plug); ret2 = btrfs_wait_extents(fs_info, dirty_pages); - extent_io_tree_release(&trans->transaction->dirty_pages); + btrfs_extent_io_tree_release(&trans->transaction->dirty_pages); if (ret) return ret; @@ -1327,7 +1327,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) struct btrfs_fs_info *fs_info = trans->fs_info; struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; struct list_head *io_bgs = &trans->transaction->io_bgs; - struct list_head *next; struct extent_buffer *eb; int ret; @@ -1363,13 +1362,13 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) again: while (!list_empty(&fs_info->dirty_cowonly_roots)) { struct btrfs_root *root; - next = fs_info->dirty_cowonly_roots.next; - list_del_init(next); - root = list_entry(next, struct btrfs_root, dirty_list); + + root = list_first_entry(&fs_info->dirty_cowonly_roots, + struct btrfs_root, dirty_list); clear_bit(BTRFS_ROOT_DIRTY, &root->state); + list_move_tail(&root->dirty_list, + &trans->transaction->switch_commits); - list_add_tail(&root->dirty_list, - &trans->transaction->switch_commits); ret = update_cowonly_root(trans, root); if (ret) return ret; @@ -2271,14 +2270,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) wake_up(&fs_info->transaction_blocked_wait); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); - if (cur_trans->list.prev != &fs_info->trans_list) { + if (!list_is_first(&cur_trans->list, &fs_info->trans_list)) { enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; if (trans->in_fsync) want_state = TRANS_STATE_SUPER_COMMITTED; - prev_trans = list_entry(cur_trans->list.prev, - struct btrfs_transaction, list); + prev_trans = list_prev_entry(cur_trans, list); if (prev_trans->state < want_state) { refcount_inc(&prev_trans->use_count); spin_unlock(&fs_info->trans_lock); @@ -2555,7 +2553,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) wake_up(&cur_trans->commit_wait); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); - btrfs_finish_extent_commit(trans); + ret = btrfs_finish_extent_commit(trans); + if (ret) + goto scrub_continue; if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) btrfs_clear_space_info_full(fs_info); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 2b66a6130269..8f4703b488b7 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1571,7 +1571,7 @@ static int check_extent_item(struct extent_buffer *leaf, inline_type); return -EUCLEAN; } - if (inline_type < last_type) { + if (unlikely(inline_type < last_type)) { extent_err(leaf, slot, "inline ref out-of-order: has type %u, prev type %u", inline_type, last_type); @@ -1580,7 +1580,7 @@ static int check_extent_item(struct extent_buffer *leaf, /* Type changed, allow the sequence starts from U64_MAX again. */ if (inline_type > last_type) last_seq = U64_MAX; - if (seq > last_seq) { + if (unlikely(seq > last_seq)) { extent_err(leaf, slot, "inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx", inline_type, inline_offset, seq, @@ -1929,7 +1929,7 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf, break; } - if (ret) + if (unlikely(ret)) return BTRFS_TREE_BLOCK_INVALID_ITEM; return BTRFS_TREE_BLOCK_CLEAN; } @@ -2229,9 +2229,8 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int ret; found_level = btrfs_header_level(eb); - if (found_level != check->level) { - WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), - KERN_ERR "BTRFS: tree level check failed\n"); + if (unlikely(found_level != check->level)) { + DEBUG_WARN(); btrfs_err(fs_info, "tree level mismatch detected, bytenr=%llu level expected=%u has=%u", eb->start, check->level, found_level); @@ -2251,11 +2250,11 @@ int btrfs_verify_level_key(struct extent_buffer *eb, return 0; /* We have @first_key, so this @eb must have at least one item */ - if (btrfs_header_nritems(eb) == 0) { + if (unlikely(btrfs_header_nritems(eb) == 0)) { btrfs_err(fs_info, "invalid tree nritems, bytenr=%llu nritems=0 expect >0", eb->start); - WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + DEBUG_WARN(); return -EUCLEAN; } @@ -2263,11 +2262,10 @@ int btrfs_verify_level_key(struct extent_buffer *eb, btrfs_node_key_to_cpu(eb, &found_key, 0); else btrfs_item_key_to_cpu(eb, &found_key, 0); - ret = btrfs_comp_cpu_keys(&check->first_key, &found_key); - if (ret) { - WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), - KERN_ERR "BTRFS: tree first key check failed\n"); + ret = btrfs_comp_cpu_keys(&check->first_key, &found_key); + if (unlikely(ret)) { + DEBUG_WARN(); btrfs_err(fs_info, "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", eb->start, check->transid, check->first_key.objectid, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 90dc094cfa5e..97e933113b82 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -860,9 +860,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sums; struct btrfs_root *csum_root; - sums = list_entry(ordered_sums.next, - struct btrfs_ordered_sum, - list); + sums = list_first_entry(&ordered_sums, + struct btrfs_ordered_sum, + list); csum_root = btrfs_csum_root(fs_info, sums->logical); if (!ret) @@ -3251,8 +3251,8 @@ static void free_log_tree(struct btrfs_trans_handle *trans, } } - extent_io_tree_release(&log->dirty_log_pages); - extent_io_tree_release(&log->log_csum_range); + btrfs_extent_io_tree_release(&log->dirty_log_pages); + btrfs_extent_io_tree_release(&log->log_csum_range); btrfs_put_root(log); } @@ -4300,8 +4300,8 @@ static int log_csums(struct btrfs_trans_handle *trans, * file which happens to refer to the same extent as well. Such races * can leave checksum items in the log with overlapping ranges. */ - ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end, - &cached_state); + ret = btrfs_lock_extent(&log_root->log_csum_range, sums->logical, lock_end, + &cached_state); if (ret) return ret; /* @@ -4317,8 +4317,8 @@ static int log_csums(struct btrfs_trans_handle *trans, if (!ret) ret = btrfs_csum_file_blocks(trans, log_root, sums); - unlock_extent(&log_root->log_csum_range, sums->logical, lock_end, - &cached_state); + btrfs_unlock_extent(&log_root->log_csum_range, sums->logical, lock_end, + &cached_state); return ret; } @@ -4648,7 +4648,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans, return 0; /* If we're compressed we have to save the entire range of csums. */ - if (extent_map_is_compressed(em)) { + if (btrfs_extent_map_is_compressed(em)) { csum_offset = 0; csum_len = em->disk_num_bytes; } else { @@ -4657,7 +4657,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans, } /* block start is already adjusted for the file extent offset. */ - block_start = extent_map_block_start(em); + block_start = btrfs_extent_map_block_start(em); csum_root = btrfs_csum_root(trans->fs_info, block_start); ret = btrfs_lookup_csums_list(csum_root, block_start + csum_offset, block_start + csum_offset + csum_len - 1, @@ -4667,9 +4667,9 @@ static int log_extent_csums(struct btrfs_trans_handle *trans, ret = 0; while (!list_empty(&ordered_sums)) { - struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, - struct btrfs_ordered_sum, - list); + struct btrfs_ordered_sum *sums = list_first_entry(&ordered_sums, + struct btrfs_ordered_sum, + list); if (!ret) ret = log_csums(trans, inode, log_root, sums); list_del(&sums->list); @@ -4692,7 +4692,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, struct btrfs_key key; enum btrfs_compression_type compress_type; u64 extent_offset = em->offset; - u64 block_start = extent_map_block_start(em); + u64 block_start = btrfs_extent_map_block_start(em); u64 block_len; int ret; @@ -4703,7 +4703,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG); block_len = em->disk_num_bytes; - compress_type = extent_map_compression(em); + compress_type = btrfs_extent_map_compression(em); if (compress_type != BTRFS_COMPRESS_NONE) { btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start); btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len); @@ -4947,7 +4947,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, list_sort(NULL, &extents, extent_cmp); process: while (!list_empty(&extents)) { - em = list_entry(extents.next, struct extent_map, list); + em = list_first_entry(&extents, struct extent_map, list); list_del_init(&em->list); @@ -4956,8 +4956,8 @@ process: * private list. */ if (ret) { - clear_em_logging(inode, em); - free_extent_map(em); + btrfs_clear_em_logging(inode, em); + btrfs_free_extent_map(em); continue; } @@ -4965,8 +4965,8 @@ process: ret = log_one_extent(trans, inode, em, path, ctx); write_lock(&tree->lock); - clear_em_logging(inode, em); - free_extent_map(em); + btrfs_clear_em_logging(inode, em); + btrfs_free_extent_map(em); } WARN_ON(!list_empty(&extents)); write_unlock(&tree->lock); @@ -6583,6 +6583,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, btrfs_log_get_delayed_items(inode, &delayed_ins_list, &delayed_del_list); + /* + * If we are fsyncing a file with 0 hard links, then commit the delayed + * inode because the last inode ref (or extref) item may still be in the + * subvolume tree and if we log it the file will still exist after a log + * replay. So commit the delayed inode to delete that last ref and we + * skip logging it. + */ + if (inode->vfs_inode.i_nlink == 0) { + ret = btrfs_commit_inode_delayed_inode(inode); + if (ret) + goto out_unlock; + } + ret = copy_inode_items_to_log(trans, inode, &min_key, &max_key, path, dst_path, logged_isize, inode_only, ctx, @@ -7051,14 +7064,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (btrfs_root_generation(&root->root_item) == trans->transid) return BTRFS_LOG_FORCE_COMMIT; - /* - * Skip already logged inodes or inodes corresponding to tmpfiles - * (since logging them is pointless, a link count of 0 means they - * will never be accessible). - */ - if ((btrfs_inode_in_log(inode, trans->transid) && - list_empty(&ctx->ordered_extents)) || - inode->vfs_inode.i_nlink == 0) + /* Skip already logged inodes and without new extents. */ + if (btrfs_inode_in_log(inode, trans->transid) && + list_empty(&ctx->ordered_extents)) return BTRFS_NO_LOG_SYNC; ret = start_log_trans(trans, root, ctx); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8e6b6fed7429..89835071cfea 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -404,7 +404,7 @@ static void btrfs_free_device(struct btrfs_device *device) { WARN_ON(!list_empty(&device->post_commit_list)); rcu_string_free(device->name); - extent_io_tree_release(&device->alloc_state); + btrfs_extent_io_tree_release(&device->alloc_state); btrfs_destroy_dev_zone_info(device); kfree(device); } @@ -415,8 +415,8 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices) WARN_ON(fs_devices->opened); while (!list_empty(&fs_devices->devices)) { - device = list_entry(fs_devices->devices.next, - struct btrfs_device, dev_list); + device = list_first_entry(&fs_devices->devices, + struct btrfs_device, dev_list); list_del(&device->dev_list); btrfs_free_device(device); } @@ -428,8 +428,8 @@ void __exit btrfs_cleanup_fs_uuids(void) struct btrfs_fs_devices *fs_devices; while (!list_empty(&fs_uuids)) { - fs_devices = list_entry(fs_uuids.next, - struct btrfs_fs_devices, fs_list); + fs_devices = list_first_entry(&fs_uuids, struct btrfs_fs_devices, + fs_list); list_del(&fs_devices->fs_list); free_fs_devices(fs_devices); } @@ -493,7 +493,7 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, } } invalidate_bdev(bdev); - *disk_super = btrfs_read_dev_super(bdev); + *disk_super = btrfs_read_disk_super(bdev, 0, false); if (IS_ERR(*disk_super)) { ret = PTR_ERR(*disk_super); fput(*bdev_file); @@ -1149,7 +1149,7 @@ static void btrfs_close_one_device(struct btrfs_device *device) device->fs_info = NULL; atomic_set(&device->dev_stats_ccnt, 0); - extent_io_tree_release(&device->alloc_state); + btrfs_extent_io_tree_release(&device->alloc_state); /* * Reset the flush error record. We might have a transient flush error @@ -1325,48 +1325,58 @@ void btrfs_release_disk_super(struct btrfs_super_block *super) put_page(page); } -static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, - u64 bytenr, u64 bytenr_orig) +struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, + int copy_num, bool drop_cache) { - struct btrfs_super_block *disk_super; + struct btrfs_super_block *super; struct page *page; - void *p; - pgoff_t index; + u64 bytenr, bytenr_orig; + struct address_space *mapping = bdev->bd_mapping; + int ret; - /* make sure our super fits in the device */ - if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev)) - return ERR_PTR(-EINVAL); + bytenr_orig = btrfs_sb_offset(copy_num); + ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr); + if (ret < 0) { + if (ret == -ENOENT) + ret = -EINVAL; + return ERR_PTR(ret); + } - /* make sure our super fits in the page */ - if (sizeof(*disk_super) > PAGE_SIZE) + if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev)) return ERR_PTR(-EINVAL); - /* make sure our super doesn't straddle pages on disk */ - index = bytenr >> PAGE_SHIFT; - if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) - return ERR_PTR(-EINVAL); + if (drop_cache) { + /* This should only be called with the primary sb. */ + ASSERT(copy_num == 0); - /* pull in the page with our super */ - page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL); + /* + * Drop the page of the primary superblock, so later read will + * always read from the device. + */ + invalidate_inode_pages2_range(mapping, bytenr >> PAGE_SHIFT, + (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT); + } + page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); if (IS_ERR(page)) return ERR_CAST(page); - p = page_address(page); - - /* align our pointer to the offset of the super block */ - disk_super = p + offset_in_page(bytenr); - - if (btrfs_super_bytenr(disk_super) != bytenr_orig || - btrfs_super_magic(disk_super) != BTRFS_MAGIC) { - btrfs_release_disk_super(p); + super = page_address(page); + if (btrfs_super_magic(super) != BTRFS_MAGIC || + btrfs_super_bytenr(super) != bytenr_orig) { + btrfs_release_disk_super(super); return ERR_PTR(-EINVAL); } - if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1]) - disk_super->label[BTRFS_LABEL_SIZE - 1] = 0; + /* + * Make sure the last byte of label is properly NUL termiated. We use + * '%s' to print the label, if not properly NUL termiated we can access + * beyond the label. + */ + if (super->label[0] && super->label[BTRFS_LABEL_SIZE - 1]) + super->label[BTRFS_LABEL_SIZE - 1] = 0; - return disk_super; + return super; } int btrfs_forget_devices(dev_t devt) @@ -1437,9 +1447,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, bool new_device_added = false; struct btrfs_device *device = NULL; struct file *bdev_file; - u64 bytenr; dev_t devt; - int ret; lockdep_assert_held(&uuid_mutex); @@ -1457,20 +1465,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, if (IS_ERR(bdev_file)) return ERR_CAST(bdev_file); - /* - * We would like to check all the super blocks, but doing so would - * allow a mount to succeed after a mkfs from a different filesystem. - * Currently, recovery from a bad primary btrfs superblock is done - * using the userspace command 'btrfs check --super'. - */ - ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr); - if (ret) { - device = ERR_PTR(ret); - goto error_bdev_put; - } - - disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr, - btrfs_sb_offset(0)); + disk_super = btrfs_read_disk_super(file_bdev(bdev_file), 0, false); if (IS_ERR(disk_super)) { device = ERR_CAST(disk_super); goto error_bdev_put; @@ -1511,9 +1506,9 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start, lockdep_assert_held(&device->fs_info->chunk_mutex); - if (find_first_extent_bit(&device->alloc_state, *start, - &physical_start, &physical_end, - CHUNK_ALLOCATED, NULL)) { + if (btrfs_find_first_extent_bit(&device->alloc_state, *start, + &physical_start, &physical_end, + CHUNK_ALLOCATED, NULL)) { if (in_range(physical_start, *start, len) || in_range(*start, physical_start, @@ -1528,6 +1523,9 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start, static u64 dev_extent_search_start(struct btrfs_device *device) { switch (device->fs_devices->chunk_alloc_policy) { + default: + btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy); + fallthrough; case BTRFS_CHUNK_ALLOC_REGULAR: return BTRFS_DEVICE_RANGE_RESERVED; case BTRFS_CHUNK_ALLOC_ZONED: @@ -1537,8 +1535,6 @@ static u64 dev_extent_search_start(struct btrfs_device *device) * for superblock logging. */ return 0; - default: - BUG(); } } @@ -1551,7 +1547,8 @@ static bool dev_extent_hole_check_zoned(struct btrfs_device *device, int ret; bool changed = false; - ASSERT(IS_ALIGNED(*hole_start, zone_size)); + ASSERT(IS_ALIGNED(*hole_start, zone_size), + "hole_start=%llu zone_size=%llu", *hole_start, zone_size); while (*hole_size > 0) { pos = btrfs_find_allocatable_zones(device, *hole_start, @@ -1617,6 +1614,9 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, } switch (device->fs_devices->chunk_alloc_policy) { + default: + btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy); + fallthrough; case BTRFS_CHUNK_ALLOC_REGULAR: /* No extra check */ break; @@ -1631,8 +1631,6 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start, continue; } break; - default: - BUG(); } break; @@ -1802,7 +1800,9 @@ next: else ret = 0; - ASSERT(max_hole_start + max_hole_size <= search_end); + ASSERT(max_hole_start + max_hole_size <= search_end, + "max_hole_start=%llu max_hole_size=%llu search_end=%llu", + max_hole_start, max_hole_size, search_end); out: btrfs_free_path(path); *start = max_hole_start; @@ -2115,7 +2115,7 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) down_read(&fs_info->dev_replace.rwsem); if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { - ASSERT(num_devices > 1); + ASSERT(num_devices > 1, "num_devices=%llu", num_devices); num_devices--; } up_read(&fs_info->dev_replace.rwsem); @@ -2131,7 +2131,7 @@ static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, const u64 bytenr = btrfs_sb_offset(copy_num); int ret; - disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); + disk_super = btrfs_read_disk_super(bdev, copy_num, false); if (IS_ERR(disk_super)) return; @@ -2319,7 +2319,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, */ if (cur_devices->num_devices == 0) { list_del_init(&cur_devices->seed_list); - ASSERT(cur_devices->opened == 1); + ASSERT(cur_devices->opened == 1, "opened=%d", cur_devices->opened); cur_devices->opened--; free_fs_devices(cur_devices); } @@ -3249,7 +3249,8 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) * user having built with ASSERT enabled, so if ASSERT doesn't * do anything we still error out. */ - ASSERT(0); + DEBUG_WARN("errr %ld reading chunk map at offset %llu", + PTR_ERR(map), chunk_offset); return PTR_ERR(map); } @@ -3330,8 +3331,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) if (ret == -ENOSPC) { const u64 sys_flags = btrfs_system_alloc_profile(fs_info); struct btrfs_block_group *sys_bg; + struct btrfs_space_info *space_info; + + space_info = btrfs_find_space_info(fs_info, sys_flags); + if (!space_info) { + ret = -EINVAL; + btrfs_abort_transaction(trans, ret); + goto out; + } - sys_bg = btrfs_create_chunk(trans, sys_flags); + sys_bg = btrfs_create_chunk(trans, space_info, sys_flags); if (IS_ERR(sys_bg)) { ret = PTR_ERR(sys_bg); btrfs_abort_transaction(trans, ret); @@ -3791,26 +3800,25 @@ static void reset_balance_state(struct btrfs_fs_info *fs_info) * Balance filters. Return 1 if chunk should be filtered out * (should not be balanced). */ -static int chunk_profiles_filter(u64 chunk_type, - struct btrfs_balance_args *bargs) +static bool chunk_profiles_filter(u64 chunk_type, struct btrfs_balance_args *bargs) { chunk_type = chunk_to_extended(chunk_type) & BTRFS_EXTENDED_PROFILE_MASK; if (bargs->profiles & chunk_type) - return 0; + return false; - return 1; + return true; } -static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, - struct btrfs_balance_args *bargs) +static bool chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, + struct btrfs_balance_args *bargs) { struct btrfs_block_group *cache; u64 chunk_used; u64 user_thresh_min; u64 user_thresh_max; - int ret = 1; + bool ret = true; cache = btrfs_lookup_block_group(fs_info, chunk_offset); chunk_used = cache->used; @@ -3828,18 +3836,18 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off user_thresh_max = mult_perc(cache->length, bargs->usage_max); if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) - ret = 0; + ret = false; btrfs_put_block_group(cache); return ret; } -static int chunk_usage_filter(struct btrfs_fs_info *fs_info, - u64 chunk_offset, struct btrfs_balance_args *bargs) +static bool chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, + struct btrfs_balance_args *bargs) { struct btrfs_block_group *cache; u64 chunk_used, user_thresh; - int ret = 1; + bool ret = true; cache = btrfs_lookup_block_group(fs_info, chunk_offset); chunk_used = cache->used; @@ -3852,15 +3860,14 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, user_thresh = mult_perc(cache->length, bargs->usage); if (chunk_used < user_thresh) - ret = 0; + ret = false; btrfs_put_block_group(cache); return ret; } -static int chunk_devid_filter(struct extent_buffer *leaf, - struct btrfs_chunk *chunk, - struct btrfs_balance_args *bargs) +static bool chunk_devid_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, + struct btrfs_balance_args *bargs) { struct btrfs_stripe *stripe; int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); @@ -3869,10 +3876,10 @@ static int chunk_devid_filter(struct extent_buffer *leaf, for (i = 0; i < num_stripes; i++) { stripe = btrfs_stripe_nr(chunk, i); if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) - return 0; + return false; } - return 1; + return true; } static u64 calc_data_stripes(u64 type, int num_stripes) @@ -3885,9 +3892,8 @@ static u64 calc_data_stripes(u64 type, int num_stripes) } /* [pstart, pend) */ -static int chunk_drange_filter(struct extent_buffer *leaf, - struct btrfs_chunk *chunk, - struct btrfs_balance_args *bargs) +static bool chunk_drange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, + struct btrfs_balance_args *bargs) { struct btrfs_stripe *stripe; int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); @@ -3898,7 +3904,7 @@ static int chunk_drange_filter(struct extent_buffer *leaf, int i; if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) - return 0; + return false; type = btrfs_chunk_type(leaf, chunk); factor = calc_data_stripes(type, num_stripes); @@ -3914,56 +3920,53 @@ static int chunk_drange_filter(struct extent_buffer *leaf, if (stripe_offset < bargs->pend && stripe_offset + stripe_length > bargs->pstart) - return 0; + return false; } - return 1; + return true; } /* [vstart, vend) */ -static int chunk_vrange_filter(struct extent_buffer *leaf, - struct btrfs_chunk *chunk, - u64 chunk_offset, - struct btrfs_balance_args *bargs) +static bool chunk_vrange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, + u64 chunk_offset, struct btrfs_balance_args *bargs) { if (chunk_offset < bargs->vend && chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) /* at least part of the chunk is inside this vrange */ - return 0; + return false; - return 1; + return true; } -static int chunk_stripes_range_filter(struct extent_buffer *leaf, - struct btrfs_chunk *chunk, - struct btrfs_balance_args *bargs) +static bool chunk_stripes_range_filter(struct extent_buffer *leaf, + struct btrfs_chunk *chunk, + struct btrfs_balance_args *bargs) { int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); if (bargs->stripes_min <= num_stripes && num_stripes <= bargs->stripes_max) - return 0; + return false; - return 1; + return true; } -static int chunk_soft_convert_filter(u64 chunk_type, - struct btrfs_balance_args *bargs) +static bool chunk_soft_convert_filter(u64 chunk_type, struct btrfs_balance_args *bargs) { if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) - return 0; + return false; chunk_type = chunk_to_extended(chunk_type) & BTRFS_EXTENDED_PROFILE_MASK; if (bargs->target == chunk_type) - return 1; + return true; - return 0; + return false; } -static int should_balance_chunk(struct extent_buffer *leaf, - struct btrfs_chunk *chunk, u64 chunk_offset) +static bool should_balance_chunk(struct extent_buffer *leaf, struct btrfs_chunk *chunk, + u64 chunk_offset) { struct btrfs_fs_info *fs_info = leaf->fs_info; struct btrfs_balance_control *bctl = fs_info->balance_ctl; @@ -3973,7 +3976,7 @@ static int should_balance_chunk(struct extent_buffer *leaf, /* type filter */ if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { - return 0; + return false; } if (chunk_type & BTRFS_BLOCK_GROUP_DATA) @@ -3986,46 +3989,46 @@ static int should_balance_chunk(struct extent_buffer *leaf, /* profiles filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && chunk_profiles_filter(chunk_type, bargs)) { - return 0; + return false; } /* usage filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && chunk_usage_filter(fs_info, chunk_offset, bargs)) { - return 0; + return false; } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { - return 0; + return false; } /* devid filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && chunk_devid_filter(leaf, chunk, bargs)) { - return 0; + return false; } /* drange filter, makes sense only with devid filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && chunk_drange_filter(leaf, chunk, bargs)) { - return 0; + return false; } /* vrange filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { - return 0; + return false; } /* stripes filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && chunk_stripes_range_filter(leaf, chunk, bargs)) { - return 0; + return false; } /* soft profile changing mode */ if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && chunk_soft_convert_filter(chunk_type, bargs)) { - return 0; + return false; } /* @@ -4033,7 +4036,7 @@ static int should_balance_chunk(struct extent_buffer *leaf, */ if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { if (bargs->limit == 0) - return 0; + return false; else bargs->limit--; } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { @@ -4043,12 +4046,12 @@ static int should_balance_chunk(struct extent_buffer *leaf, * about the count of all chunks that satisfy the filters. */ if (bargs->limit_max == 0) - return 0; + return false; else bargs->limit_max--; } - return 1; + return true; } static int __btrfs_balance(struct btrfs_fs_info *fs_info) @@ -4663,7 +4666,8 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) } spin_lock(&fs_info->super_lock); - ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED); + ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED, + "exclusive_operation=%d", fs_info->exclusive_operation); fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE; spin_unlock(&fs_info->super_lock); /* @@ -4999,8 +5003,8 @@ again: mutex_lock(&fs_info->chunk_mutex); /* Clear all state bits beyond the shrunk device size */ - clear_extent_bits(&device->alloc_state, new_size, (u64)-1, - CHUNK_STATE_MASK); + btrfs_clear_extent_bits(&device->alloc_state, new_size, (u64)-1, + CHUNK_STATE_MASK); btrfs_device_set_disk_total_bytes(device, new_size); if (list_empty(&device->post_commit_list)) @@ -5127,6 +5131,8 @@ struct alloc_chunk_ctl { u64 stripe_size; u64 chunk_size; int ndevs; + /* Space_info the block group is going to belong. */ + struct btrfs_space_info *space_info; }; static void init_alloc_chunk_ctl_policy_regular( @@ -5200,14 +5206,15 @@ static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices, ctl->ndevs = 0; switch (fs_devices->chunk_alloc_policy) { + default: + btrfs_warn_unknown_chunk_allocation(fs_devices->chunk_alloc_policy); + fallthrough; case BTRFS_CHUNK_ALLOC_REGULAR: init_alloc_chunk_ctl_policy_regular(fs_devices, ctl); break; case BTRFS_CHUNK_ALLOC_ZONED: init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl); break; - default: - BUG(); } } @@ -5346,7 +5353,9 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, * It should hold because: * dev_extent_min == dev_extent_want == zone_size * dev_stripes */ - ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min); + ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min, + "ndevs=%d max_avail=%llu dev_extent_min=%llu", ctl->ndevs, + devices_info[ctl->ndevs - 1].max_avail, ctl->dev_extent_min); ctl->stripe_size = zone_size; ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; @@ -5359,7 +5368,9 @@ static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl, ctl->dev_stripes); ctl->num_stripes = ctl->ndevs * ctl->dev_stripes; data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies; - ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size); + ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size, + "stripe_size=%llu data_stripes=%d max_chunk_size=%llu", + ctl->stripe_size, data_stripes, ctl->max_chunk_size); } ctl->chunk_size = ctl->stripe_size * data_stripes; @@ -5392,12 +5403,13 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices, ctl->ndevs = min(ctl->ndevs, ctl->devs_max); switch (fs_devices->chunk_alloc_policy) { + default: + btrfs_warn_unknown_chunk_allocation(fs_devices->chunk_alloc_policy); + fallthrough; case BTRFS_CHUNK_ALLOC_REGULAR: return decide_stripe_size_regular(ctl, devices_info); case BTRFS_CHUNK_ALLOC_ZONED: return decide_stripe_size_zoned(ctl, devices_info); - default: - BUG(); } } @@ -5407,9 +5419,9 @@ static void chunk_map_device_set_bits(struct btrfs_chunk_map *map, unsigned int struct btrfs_io_stripe *stripe = &map->stripes[i]; struct btrfs_device *device = stripe->dev; - set_extent_bit(&device->alloc_state, stripe->physical, - stripe->physical + map->stripe_size - 1, - bits | EXTENT_NOWAIT, NULL); + btrfs_set_extent_bit(&device->alloc_state, stripe->physical, + stripe->physical + map->stripe_size - 1, + bits | EXTENT_NOWAIT, NULL); } } @@ -5419,10 +5431,9 @@ static void chunk_map_device_clear_bits(struct btrfs_chunk_map *map, unsigned in struct btrfs_io_stripe *stripe = &map->stripes[i]; struct btrfs_device *device = stripe->dev; - __clear_extent_bit(&device->alloc_state, stripe->physical, - stripe->physical + map->stripe_size - 1, - bits | EXTENT_NOWAIT, - NULL, NULL); + btrfs_clear_extent_bits(&device->alloc_state, stripe->physical, + stripe->physical + map->stripe_size - 1, + bits | EXTENT_NOWAIT); } } @@ -5529,7 +5540,8 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, return ERR_PTR(ret); } - block_group = btrfs_make_block_group(trans, type, start, ctl->chunk_size); + block_group = btrfs_make_block_group(trans, ctl->space_info, type, start, + ctl->chunk_size); if (IS_ERR(block_group)) { btrfs_remove_chunk_map(info, map); return block_group; @@ -5555,7 +5567,8 @@ static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans, } struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, - u64 type) + struct btrfs_space_info *space_info, + u64 type) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_fs_devices *fs_devices = info->fs_devices; @@ -5567,7 +5580,7 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, lockdep_assert_held(&info->chunk_mutex); if (!alloc_profile_is_valid(type, 0)) { - ASSERT(0); + DEBUG_WARN("invalid alloc profile for type %llu", type); return ERR_PTR(-EINVAL); } @@ -5579,12 +5592,13 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { btrfs_err(info, "invalid chunk type 0x%llx requested", type); - ASSERT(0); + DEBUG_WARN(); return ERR_PTR(-EINVAL); } ctl.start = find_next_chunk(info); ctl.type = type; + ctl.space_info = space_info; init_alloc_chunk_ctl(fs_devices, &ctl); devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), @@ -5728,7 +5742,9 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) struct btrfs_fs_info *fs_info = trans->fs_info; u64 alloc_profile; struct btrfs_block_group *meta_bg; + struct btrfs_space_info *meta_space_info; struct btrfs_block_group *sys_bg; + struct btrfs_space_info *sys_space_info; /* * When adding a new device for sprouting, the seed device is read-only @@ -5752,12 +5768,22 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans) */ alloc_profile = btrfs_metadata_alloc_profile(fs_info); - meta_bg = btrfs_create_chunk(trans, alloc_profile); + meta_space_info = btrfs_find_space_info(fs_info, alloc_profile); + if (!meta_space_info) { + DEBUG_WARN(); + return -EINVAL; + } + meta_bg = btrfs_create_chunk(trans, meta_space_info, alloc_profile); if (IS_ERR(meta_bg)) return PTR_ERR(meta_bg); alloc_profile = btrfs_system_alloc_profile(fs_info); - sys_bg = btrfs_create_chunk(trans, alloc_profile); + sys_space_info = btrfs_find_space_info(fs_info, alloc_profile); + if (!sys_space_info) { + DEBUG_WARN(); + return -EINVAL; + } + sys_bg = btrfs_create_chunk(trans, sys_space_info, alloc_profile); if (IS_ERR(sys_bg)) return PTR_ERR(sys_bg); @@ -5957,7 +5983,7 @@ static int btrfs_read_rr(const struct btrfs_chunk_map *map, int first, int num_s static int find_live_mirror(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map, int first, - int dev_replace_is_ongoing) + bool dev_replace_is_ongoing) { const enum btrfs_read_policy policy = READ_ONCE(fs_info->fs_devices->read_policy); int i; @@ -5966,8 +5992,8 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info, int tolerance; struct btrfs_device *srcdev; - ASSERT((map->type & - (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10))); + ASSERT((map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)), + "type=%llu", map->type); if (map->type & BTRFS_BLOCK_GROUP_RAID10) num_stripes = map->sub_stripes; @@ -6268,7 +6294,7 @@ static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc, } /* We can only have at most 2 extra nr_stripes (for DUP). */ - ASSERT(nr_extra_stripes <= 2); + ASSERT(nr_extra_stripes <= 2, "nr_extra_stripes=%d", nr_extra_stripes); /* * For GET_READ_MIRRORS, we can only return at most 1 extra stripe for * replace. @@ -6279,7 +6305,8 @@ static void handle_ops_on_dev_replace(struct btrfs_io_context *bioc, struct btrfs_io_stripe *second = &bioc->stripes[num_stripes + 1]; /* Only DUP can have two extra stripes. */ - ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP); + ASSERT(bioc->map_type & BTRFS_BLOCK_GROUP_DUP, + "map_type=%llu", bioc->map_type); /* * Swap the last stripe stripes and reduce @nr_extra_stripes. @@ -6306,7 +6333,8 @@ static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset, */ io_geom->stripe_offset = offset & BTRFS_STRIPE_LEN_MASK; io_geom->stripe_nr = offset >> BTRFS_STRIPE_LEN_SHIFT; - ASSERT(io_geom->stripe_offset < U32_MAX); + ASSERT(io_geom->stripe_offset < U32_MAX, + "stripe_offset=%llu", io_geom->stripe_offset); if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { unsigned long full_stripe_len = @@ -6324,8 +6352,12 @@ static u64 btrfs_max_io_len(struct btrfs_chunk_map *map, u64 offset, io_geom->raid56_full_stripe_start = btrfs_stripe_nr_to_offset( rounddown(io_geom->stripe_nr, nr_data_stripes(map))); - ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset); - ASSERT(io_geom->raid56_full_stripe_start <= offset); + ASSERT(io_geom->raid56_full_stripe_start + full_stripe_len > offset, + "raid56_full_stripe_start=%llu full_stripe_len=%lu offset=%llu", + io_geom->raid56_full_stripe_start, full_stripe_len, offset); + ASSERT(io_geom->raid56_full_stripe_start <= offset, + "raid56_full_stripe_start=%llu offset=%llu", + io_geom->raid56_full_stripe_start, offset); /* * For writes to RAID56, allow to write a full stripe set, but * no straddling of stripe sets. @@ -6491,7 +6523,7 @@ static void map_blocks_raid56_read(struct btrfs_chunk_map *map, { int data_stripes = nr_data_stripes(map); - ASSERT(io_geom->mirror_num <= 1); + ASSERT(io_geom->mirror_num <= 1, "mirror_num=%d", io_geom->mirror_num); /* Just grab the data stripe directly. */ io_geom->stripe_index = io_geom->stripe_nr % data_stripes; io_geom->stripe_nr /= data_stripes; @@ -6559,7 +6591,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, int num_copies; struct btrfs_io_context *bioc = NULL; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; - int dev_replace_is_ongoing = 0; + bool dev_replace_is_ongoing = false; u16 num_alloc_stripes; u64 max_len; @@ -6864,7 +6896,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, atomic_set(&dev->dev_stats_ccnt, 0); btrfs_device_data_ordered_init(dev); - extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); + btrfs_extent_io_tree_init(fs_info, &dev->alloc_state, IO_TREE_DEVICE_ALLOC_STATE); if (devid) tmp = *devid; @@ -7836,7 +7868,7 @@ void btrfs_commit_device_sizes(struct btrfs_transaction *trans) { struct btrfs_device *curr, *next; - ASSERT(trans->state == TRANS_STATE_COMMIT_DOING); + ASSERT(trans->state == TRANS_STATE_COMMIT_DOING, "state=%d" , trans->state); if (list_empty(&trans->dev_update_list)) return; @@ -8205,7 +8237,7 @@ static void map_raid56_repair_block(struct btrfs_io_context *bioc, logical < stripe_start + BTRFS_STRIPE_LEN) break; } - ASSERT(i < data_stripes); + ASSERT(i < data_stripes, "i=%d data_stripes=%d", i, data_stripes); smap->dev = bioc->stripes[i].dev; smap->physical = bioc->stripes[i].physical + ((logical - bioc->full_stripe_logical) & @@ -8234,7 +8266,7 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, int mirror_ret = mirror_num; int ret; - ASSERT(mirror_num > 0); + ASSERT(mirror_num > 0, "mirror_num=%d", mirror_num); ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length, &bioc, smap, &mirror_ret); @@ -8242,7 +8274,7 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, return ret; /* The map range should not cross stripe boundary. */ - ASSERT(map_length >= length); + ASSERT(map_length >= length, "map_length=%llu length=%u", map_length, length); /* Already mapped to single stripe. */ if (!bioc) @@ -8254,7 +8286,8 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info, goto out; } - ASSERT(mirror_num <= bioc->num_stripes); + ASSERT(mirror_num <= bioc->num_stripes, + "mirror_num=%d num_stripes=%d", mirror_num, bioc->num_stripes); smap->dev = bioc->stripes[mirror_num - 1].dev; smap->physical = bioc->stripes[mirror_num - 1].physical; out: diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index e247d551da67..137cc232f58e 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -473,7 +473,6 @@ struct btrfs_io_stripe { struct btrfs_device *dev; /* Block mapping. */ u64 physical; - u64 length; bool rst_search_commit_root; /* For the endio handler. */ struct btrfs_io_context *bioc; @@ -715,7 +714,8 @@ struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, - u64 type); + struct btrfs_space_info *space_info, + u64 type); void btrfs_mapping_tree_free(struct btrfs_fs_info *fs_info); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, blk_mode_t flags, void *holder); @@ -786,6 +786,8 @@ struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_inf struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, u64 logical, u64 length); void btrfs_remove_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map); +struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev, + int copy_num, bool drop_cache); void btrfs_release_disk_super(struct btrfs_super_block *super); static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, @@ -847,6 +849,11 @@ static inline const char *btrfs_dev_name(const struct btrfs_device *device) return rcu_str_deref(device->name); } +static inline void btrfs_warn_unknown_chunk_allocation(enum btrfs_chunk_allocation_policy pol) +{ + WARN_ONCE(1, "unknown allocation policy %d, fallback to regular", pol); +} + void btrfs_commit_device_sizes(struct btrfs_transaction *trans); struct list_head * __attribute_const__ btrfs_get_fs_uuids(void); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 545f413d81fc..5292cd341f70 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -120,8 +120,6 @@ static int copy_data_into_buffer(struct address_space *mapping, ret = btrfs_compress_filemap_get_folio(mapping, cur, &folio); if (ret < 0) return ret; - /* No large folio support yet. */ - ASSERT(!folio_test_large(folio)); offset = offset_in_folio(folio, cur); copy_length = min(folio_size(folio) - offset, @@ -205,7 +203,6 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping, workspace->strm.next_in = workspace->buf; workspace->strm.avail_in = copy_length; } else { - unsigned int pg_off; unsigned int cur_len; if (data_in) { @@ -217,9 +214,9 @@ int zlib_compress_folios(struct list_head *ws, struct address_space *mapping, start, &in_folio); if (ret < 0) goto out; - pg_off = offset_in_page(start); - cur_len = btrfs_calc_input_length(orig_end, start); - data_in = kmap_local_folio(in_folio, pg_off); + cur_len = btrfs_calc_input_length(in_folio, orig_end, start); + data_in = kmap_local_folio(in_folio, + offset_in_folio(in_folio, start)); start += cur_len; workspace->strm.next_in = data_in; workspace->strm.avail_in = cur_len; diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 4a3e02b49f29..b5b0156d5b95 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -989,7 +989,7 @@ int btrfs_advance_sb_log(struct btrfs_device *device, int mirror) } /* All the zones are FULL. Should not reach here. */ - ASSERT(0); + DEBUG_WARN("unexpected state, all zones full"); return -EIO; } @@ -1797,12 +1797,12 @@ static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered, ordered->disk_bytenr = logical; write_lock(&em_tree->lock); - em = search_extent_mapping(em_tree, ordered->file_offset, - ordered->num_bytes); + em = btrfs_search_extent_mapping(em_tree, ordered->file_offset, + ordered->num_bytes); /* The em should be a new COW extent, thus it should not have an offset. */ ASSERT(em->offset == 0); em->disk_bytenr = logical; - free_extent_map(em); + btrfs_free_extent_map(em); write_unlock(&em_tree->lock); } @@ -1812,8 +1812,8 @@ static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered, struct btrfs_ordered_extent *new; if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && - split_extent_map(ordered->inode, ordered->file_offset, - ordered->num_bytes, len, logical)) + btrfs_split_extent_map(ordered->inode, ordered->file_offset, + ordered->num_bytes, len, logical)) return false; new = btrfs_split_ordered_extent(ordered, len); @@ -2171,27 +2171,15 @@ static void wait_eb_writebacks(struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; const u64 end = block_group->start + block_group->length; - struct radix_tree_iter iter; struct extent_buffer *eb; - void __rcu **slot; + unsigned long index, start = (block_group->start >> fs_info->sectorsize_bits); rcu_read_lock(); - radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, - block_group->start >> fs_info->sectorsize_bits) { - eb = radix_tree_deref_slot(slot); - if (!eb) - continue; - if (radix_tree_deref_retry(eb)) { - slot = radix_tree_iter_retry(&iter); - continue; - } - + xa_for_each_start(&fs_info->buffer_tree, index, eb, start) { if (eb->start < block_group->start) continue; if (eb->start >= end) break; - - slot = radix_tree_iter_resume(slot, &iter); rcu_read_unlock(); wait_on_extent_buffer_writeback(eb); rcu_read_lock(); diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 3541efa765c7..4a796a049b5a 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -24,7 +24,7 @@ #include "super.h" #define ZSTD_BTRFS_MAX_WINDOWLOG 17 -#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) +#define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG) #define ZSTD_BTRFS_DEFAULT_LEVEL 3 #define ZSTD_BTRFS_MIN_LEVEL -15 #define ZSTD_BTRFS_MAX_LEVEL 15 @@ -426,8 +426,8 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping, ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio); if (ret < 0) goto out; - cur_len = btrfs_calc_input_length(orig_end, start); - workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_page(start)); + cur_len = btrfs_calc_input_length(in_folio, orig_end, start); + workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start)); workspace->in_buf.pos = 0; workspace->in_buf.size = cur_len; @@ -511,9 +511,9 @@ int zstd_compress_folios(struct list_head *ws, struct address_space *mapping, ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio); if (ret < 0) goto out; - cur_len = btrfs_calc_input_length(orig_end, start); + cur_len = btrfs_calc_input_length(in_folio, orig_end, start); workspace->in_buf.src = kmap_local_folio(in_folio, - offset_in_page(start)); + offset_in_folio(in_folio, start)); workspace->in_buf.pos = 0; workspace->in_buf.size = cur_len; } diff --git a/fs/buffer.c b/fs/buffer.c index 7ba1807145aa..8cf4a1dc481e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -297,7 +297,6 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) still_busy: spin_unlock_irqrestore(&first->b_uptodate_lock, flags); - return; } struct postprocess_bh_ctx { @@ -422,7 +421,6 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate) still_busy: spin_unlock_irqrestore(&first->b_uptodate_lock, flags); - return; } /* @@ -1122,6 +1120,8 @@ static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, unsigned size, gfp_t gfp) { + bool blocking = gfpflags_allow_blocking(gfp); + /* Size must be multiple of hard sectorsize */ if (unlikely(size & (bdev_logical_block_size(bdev)-1) || (size < 512 || size > PAGE_SIZE))) { @@ -1137,12 +1137,15 @@ __getblk_slow(struct block_device *bdev, sector_t block, for (;;) { struct buffer_head *bh; - bh = __find_get_block(bdev, block, size); - if (bh) - return bh; - if (!grow_buffers(bdev, block, size, gfp)) return NULL; + + if (blocking) + bh = __find_get_block_nonatomic(bdev, block, size); + else + bh = __find_get_block(bdev, block, size); + if (bh) + return bh; } } @@ -1611,8 +1614,8 @@ static void discard_buffer(struct buffer_head * bh) bh->b_bdev = NULL; b_state = READ_ONCE(bh->b_state); do { - } while (!try_cmpxchg(&bh->b_state, &b_state, - b_state & ~BUFFER_FLAGS_DISCARD)); + } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state, + b_state & ~BUFFER_FLAGS_DISCARD)); unlock_buffer(bh); } @@ -1677,7 +1680,6 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) filemap_release_folio(folio, 0); out: folio_clear_mappedtodisk(folio); - return; } EXPORT_SYMBOL(block_invalidate_folio); @@ -2728,7 +2730,7 @@ unlock: EXPORT_SYMBOL(block_truncate_page); /* - * The generic ->writepage function for buffer-backed address_spaces + * The generic write folio function for buffer-backed address_spaces */ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, void *get_block) @@ -2748,7 +2750,7 @@ int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, /* * The folio straddles i_size. It must be zeroed out on each and every - * writepage invocation because it may be mmapped. "A file is mapped + * writeback invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 38c236e38cef..b62cd3e9a18e 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -71,7 +71,6 @@ struct cachefiles_object { int debug_id; spinlock_t lock; refcount_t ref; - u8 d_name_len; /* Length of filename */ enum cachefiles_content content_info:8; /* Info about content presence */ unsigned long flags; #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ diff --git a/fs/cachefiles/key.c b/fs/cachefiles/key.c index b48525680e73..aae86af48ed5 100644 --- a/fs/cachefiles/key.c +++ b/fs/cachefiles/key.c @@ -132,7 +132,6 @@ bool cachefiles_cook_key(struct cachefiles_object *object) success: name[len] = 0; object->d_name = name; - object->d_name_len = len; _leave(" = %s", object->d_name); return true; } diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 14d0cc894000..aecfc5c37b49 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -98,7 +98,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, retry: ret = cachefiles_inject_read_error(); if (ret == 0) - subdir = lookup_one_len(dirname, dir, strlen(dirname)); + subdir = lookup_one(&nop_mnt_idmap, &QSTR(dirname), dir); else subdir = ERR_PTR(ret); trace_cachefiles_lookup(NULL, dir, subdir); @@ -338,7 +338,7 @@ try_again: return -EIO; } - grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); + grave = lookup_one(&nop_mnt_idmap, &QSTR(nbuffer), cache->graveyard); if (IS_ERR(grave)) { unlock_rename(cache->graveyard, dir); trace_cachefiles_vfs_error(object, d_inode(cache->graveyard), @@ -630,8 +630,8 @@ bool cachefiles_look_up_object(struct cachefiles_object *object) /* Look up path "cache/vol/fanout/file". */ ret = cachefiles_inject_read_error(); if (ret == 0) - dentry = lookup_positive_unlocked(object->d_name, fan, - object->d_name_len); + dentry = lookup_one_positive_unlocked(&nop_mnt_idmap, + &QSTR(object->d_name), fan); else dentry = ERR_PTR(ret); trace_cachefiles_lookup(object, fan, dentry); @@ -683,7 +683,7 @@ bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache, inode_lock_nested(d_inode(fan), I_MUTEX_PARENT); ret = cachefiles_inject_read_error(); if (ret == 0) - dentry = lookup_one_len(object->d_name, fan, object->d_name_len); + dentry = lookup_one(&nop_mnt_idmap, &QSTR(object->d_name), fan); else dentry = ERR_PTR(ret); if (IS_ERR(dentry)) { @@ -702,7 +702,7 @@ bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache, dput(dentry); ret = cachefiles_inject_read_error(); if (ret == 0) - dentry = lookup_one_len(object->d_name, fan, object->d_name_len); + dentry = lookup_one(&nop_mnt_idmap, &QSTR(object->d_name), fan); else dentry = ERR_PTR(ret); if (IS_ERR(dentry)) { @@ -751,7 +751,7 @@ static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache, inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); - victim = lookup_one_len(filename, dir, strlen(filename)); + victim = lookup_one(&nop_mnt_idmap, &QSTR(filename), dir); if (IS_ERR(victim)) goto lookup_error; if (d_is_negative(victim)) diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 5568cb74b322..ebf32822e29b 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -619,7 +619,7 @@ static int populate_attrs(struct config_item *item) break; } } - if (t->ct_bin_attrs) { + if (!error && t->ct_bin_attrs) { for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) { if (ops && ops->is_bin_visible && !ops->is_bin_visible(item, bin_attr, i)) continue; @@ -970,7 +970,7 @@ static void configfs_dump_one(struct configfs_dirent *sd, int level) { pr_info("%*s\"%s\":\n", level, " ", configfs_get_name(sd)); -#define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type); +#define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type) type_print(CONFIGFS_ROOT); type_print(CONFIGFS_DIR); type_print(CONFIGFS_ITEM_ATTR); diff --git a/fs/configfs/item.c b/fs/configfs/item.c index 254170a82aa3..c378b5cbf87d 100644 --- a/fs/configfs/item.c +++ b/fs/configfs/item.c @@ -66,7 +66,7 @@ int config_item_set_name(struct config_item *item, const char *fmt, ...) name = kvasprintf(GFP_KERNEL, fmt, args); va_end(args); if (!name) - return -EFAULT; + return -ENOMEM; } /* Free the old name, if necessary. */ diff --git a/fs/coredump.c b/fs/coredump.c index c33c177a701b..f217ebf2b3b6 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -43,6 +43,14 @@ #include <linux/timekeeping.h> #include <linux/sysctl.h> #include <linux/elf.h> +#include <linux/pidfs.h> +#include <linux/net.h> +#include <linux/socket.h> +#include <net/af_unix.h> +#include <net/net_namespace.h> +#include <net/sock.h> +#include <uapi/linux/pidfd.h> +#include <uapi/linux/un.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> @@ -60,6 +68,12 @@ static void free_vma_snapshot(struct coredump_params *cprm); #define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024) /* Define a reasonable max cap */ #define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024) +/* + * File descriptor number for the pidfd for the thread-group leader of + * the coredumping task installed into the usermode helper's file + * descriptor table. + */ +#define COREDUMP_PIDFD_NUMBER 3 static int core_uses_pid; static unsigned int core_pipe_limit; @@ -68,9 +82,16 @@ static char core_pattern[CORENAME_MAX_SIZE] = "core"; static int core_name_size = CORENAME_MAX_SIZE; unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT; +enum coredump_type_t { + COREDUMP_FILE = 1, + COREDUMP_PIPE = 2, + COREDUMP_SOCK = 3, +}; + struct core_name { char *corename; int used, size; + enum coredump_type_t core_type; }; static int expand_corename(struct core_name *cn, int size) @@ -210,18 +231,24 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, { const struct cred *cred = current_cred(); const char *pat_ptr = core_pattern; - int ispipe = (*pat_ptr == '|'); bool was_space = false; int pid_in_pattern = 0; int err = 0; cn->used = 0; cn->corename = NULL; + if (*pat_ptr == '|') + cn->core_type = COREDUMP_PIPE; + else if (*pat_ptr == '@') + cn->core_type = COREDUMP_SOCK; + else + cn->core_type = COREDUMP_FILE; if (expand_corename(cn, core_name_size)) return -ENOMEM; cn->corename[0] = '\0'; - if (ispipe) { + switch (cn->core_type) { + case COREDUMP_PIPE: { int argvs = sizeof(core_pattern) / 2; (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); if (!(*argv)) @@ -230,6 +257,45 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, ++pat_ptr; if (!(*pat_ptr)) return -ENOMEM; + break; + } + case COREDUMP_SOCK: { + /* skip the @ */ + pat_ptr++; + if (!(*pat_ptr)) + return -ENOMEM; + + err = cn_printf(cn, "%s", pat_ptr); + if (err) + return err; + + /* Require absolute paths. */ + if (cn->corename[0] != '/') + return -EINVAL; + + /* + * Ensure we can uses spaces to indicate additional + * parameters in the future. + */ + if (strchr(cn->corename, ' ')) { + coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename); + return -EINVAL; + } + + /* + * Currently no need to parse any other options. + * Relevant information can be retrieved from the peer + * pidfd retrievable via SO_PEERPIDFD by the receiver or + * via /proc/<pid>, using the SO_PEERPIDFD to guard + * against pid recycling when opening /proc/<pid>. + */ + return 0; + } + case COREDUMP_FILE: + break; + default: + WARN_ON_ONCE(true); + return -EINVAL; } /* Repeat as long as we have more pattern to process and more output @@ -239,7 +305,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, * Split on spaces before doing template expansion so that * %e and %E don't get split if they have spaces in them */ - if (ispipe) { + if (cn->core_type == COREDUMP_PIPE) { if (isspace(*pat_ptr)) { if (cn->used != 0) was_space = true; @@ -339,6 +405,27 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, case 'C': err = cn_printf(cn, "%d", cprm->cpu); break; + /* pidfd number */ + case 'F': { + /* + * Installing a pidfd only makes sense if + * we actually spawn a usermode helper. + */ + if (cn->core_type != COREDUMP_PIPE) + break; + + /* + * Note that we'll install a pidfd for the + * thread-group leader. We know that task + * linkage hasn't been removed yet and even if + * this @current isn't the actual thread-group + * leader we know that the thread-group leader + * cannot be reaped until @current has exited. + */ + cprm->pid = task_tgid(current); + err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER); + break; + } default: break; } @@ -355,12 +442,10 @@ out: * If core_pattern does not include a %p (as is the default) * and core_uses_pid is set, then .%pid will be appended to * the filename. Do not do this for piped commands. */ - if (!ispipe && !pid_in_pattern && core_uses_pid) { - err = cn_printf(cn, ".%d", task_tgid_vnr(current)); - if (err) - return err; - } - return ispipe; + if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid) + return cn_printf(cn, ".%d", task_tgid_vnr(current)); + + return 0; } static int zap_process(struct signal_struct *signal, int exit_code) @@ -493,7 +578,7 @@ static void wait_for_dump_helpers(struct file *file) } /* - * umh_pipe_setup + * umh_coredump_setup * helper function to customize the process used * to collect the core in userspace. Specifically * it sets up a pipe and installs it as fd 0 (stdin) @@ -503,11 +588,34 @@ static void wait_for_dump_helpers(struct file *file) * is a special value that we use to trap recursive * core dumps */ -static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) +static int umh_coredump_setup(struct subprocess_info *info, struct cred *new) { struct file *files[2]; struct coredump_params *cp = (struct coredump_params *)info->data; - int err = create_pipe_files(files, 0); + int err; + + if (cp->pid) { + struct file *pidfs_file __free(fput) = NULL; + + pidfs_file = pidfs_alloc_file(cp->pid, 0); + if (IS_ERR(pidfs_file)) + return PTR_ERR(pidfs_file); + + pidfs_coredump(cp); + + /* + * Usermode helpers are childen of either + * system_unbound_wq or of kthreadd. So we know that + * we're starting off with a clean file descriptor + * table. So we should always be able to use + * COREDUMP_PIDFD_NUMBER as our file descriptor value. + */ + err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0); + if (err < 0) + return err; + } + + err = create_pipe_files(files, 0); if (err) return err; @@ -515,10 +623,13 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) err = replace_fd(0, files[0], 0); fput(files[0]); + if (err < 0) + return err; + /* and disallow core files too */ current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; - return err; + return 0; } void do_coredump(const kernel_siginfo_t *siginfo) @@ -530,7 +641,6 @@ void do_coredump(const kernel_siginfo_t *siginfo) const struct cred *old_cred; struct cred *cred; int retval = 0; - int ispipe; size_t *argv = NULL; int argc = 0; /* require nonrelative corefile path and be extra careful */ @@ -579,70 +689,14 @@ void do_coredump(const kernel_siginfo_t *siginfo) old_cred = override_creds(cred); - ispipe = format_corename(&cn, &cprm, &argv, &argc); - - if (ispipe) { - int argi; - int dump_count; - char **helper_argv; - struct subprocess_info *sub_info; - - if (ispipe < 0) { - coredump_report_failure("format_corename failed, aborting core"); - goto fail_unlock; - } - - if (cprm.limit == 1) { - /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. - * - * Normally core limits are irrelevant to pipes, since - * we're not writing to the file system, but we use - * cprm.limit of 1 here as a special value, this is a - * consistent way to catch recursive crashes. - * We can still crash if the core_pattern binary sets - * RLIM_CORE = !1, but it runs as root, and can do - * lots of stupid things. - * - * Note that we use task_tgid_vnr here to grab the pid - * of the process group leader. That way we get the - * right pid if a thread in a multi-threaded - * core_pattern process dies. - */ - coredump_report_failure("RLIMIT_CORE is set to 1, aborting core"); - goto fail_unlock; - } - cprm.limit = RLIM_INFINITY; - - dump_count = atomic_inc_return(&core_dump_count); - if (core_pipe_limit && (core_pipe_limit < dump_count)) { - coredump_report_failure("over core_pipe_limit, skipping core dump"); - goto fail_dropcount; - } - - helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), - GFP_KERNEL); - if (!helper_argv) { - coredump_report_failure("%s failed to allocate memory", __func__); - goto fail_dropcount; - } - for (argi = 0; argi < argc; argi++) - helper_argv[argi] = cn.corename + argv[argi]; - helper_argv[argi] = NULL; - - retval = -ENOMEM; - sub_info = call_usermodehelper_setup(helper_argv[0], - helper_argv, NULL, GFP_KERNEL, - umh_pipe_setup, NULL, &cprm); - if (sub_info) - retval = call_usermodehelper_exec(sub_info, - UMH_WAIT_EXEC); + retval = format_corename(&cn, &cprm, &argv, &argc); + if (retval < 0) { + coredump_report_failure("format_corename failed, aborting core"); + goto fail_unlock; + } - kfree(helper_argv); - if (retval) { - coredump_report_failure("|%s pipe failed", cn.corename); - goto close_fail; - } - } else { + switch (cn.core_type) { + case COREDUMP_FILE: { struct mnt_idmap *idmap; struct inode *inode; int open_flags = O_CREAT | O_WRONLY | O_NOFOLLOW | @@ -736,6 +790,143 @@ void do_coredump(const kernel_siginfo_t *siginfo) if (do_truncate(idmap, cprm.file->f_path.dentry, 0, 0, cprm.file)) goto close_fail; + break; + } + case COREDUMP_PIPE: { + int argi; + int dump_count; + char **helper_argv; + struct subprocess_info *sub_info; + + if (cprm.limit == 1) { + /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. + * + * Normally core limits are irrelevant to pipes, since + * we're not writing to the file system, but we use + * cprm.limit of 1 here as a special value, this is a + * consistent way to catch recursive crashes. + * We can still crash if the core_pattern binary sets + * RLIM_CORE = !1, but it runs as root, and can do + * lots of stupid things. + * + * Note that we use task_tgid_vnr here to grab the pid + * of the process group leader. That way we get the + * right pid if a thread in a multi-threaded + * core_pattern process dies. + */ + coredump_report_failure("RLIMIT_CORE is set to 1, aborting core"); + goto fail_unlock; + } + cprm.limit = RLIM_INFINITY; + + dump_count = atomic_inc_return(&core_dump_count); + if (core_pipe_limit && (core_pipe_limit < dump_count)) { + coredump_report_failure("over core_pipe_limit, skipping core dump"); + goto fail_dropcount; + } + + helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), + GFP_KERNEL); + if (!helper_argv) { + coredump_report_failure("%s failed to allocate memory", __func__); + goto fail_dropcount; + } + for (argi = 0; argi < argc; argi++) + helper_argv[argi] = cn.corename + argv[argi]; + helper_argv[argi] = NULL; + + retval = -ENOMEM; + sub_info = call_usermodehelper_setup(helper_argv[0], + helper_argv, NULL, GFP_KERNEL, + umh_coredump_setup, NULL, &cprm); + if (sub_info) + retval = call_usermodehelper_exec(sub_info, + UMH_WAIT_EXEC); + + kfree(helper_argv); + if (retval) { + coredump_report_failure("|%s pipe failed", cn.corename); + goto close_fail; + } + break; + } + case COREDUMP_SOCK: { +#ifdef CONFIG_UNIX + struct file *file __free(fput) = NULL; + struct sockaddr_un addr = { + .sun_family = AF_UNIX, + }; + ssize_t addr_len; + struct socket *socket; + + addr_len = strscpy(addr.sun_path, cn.corename); + if (addr_len < 0) + goto close_fail; + addr_len += offsetof(struct sockaddr_un, sun_path) + 1; + + /* + * It is possible that the userspace process which is + * supposed to handle the coredump and is listening on + * the AF_UNIX socket coredumps. Userspace should just + * mark itself non dumpable. + */ + + retval = sock_create_kern(&init_net, AF_UNIX, SOCK_STREAM, 0, &socket); + if (retval < 0) + goto close_fail; + + file = sock_alloc_file(socket, 0, NULL); + if (IS_ERR(file)) + goto close_fail; + + /* + * Set the thread-group leader pid which is used for the + * peer credentials during connect() below. Then + * immediately register it in pidfs... + */ + cprm.pid = task_tgid(current); + retval = pidfs_register_pid(cprm.pid); + if (retval) + goto close_fail; + + /* + * ... and set the coredump information so userspace + * has it available after connect()... + */ + pidfs_coredump(&cprm); + + retval = kernel_connect(socket, (struct sockaddr *)(&addr), + addr_len, O_NONBLOCK | SOCK_COREDUMP); + + /* + * ... Make sure to only put our reference after connect() took + * its own reference keeping the pidfs entry alive ... + */ + pidfs_put_pid(cprm.pid); + + if (retval) { + if (retval == -EAGAIN) + coredump_report_failure("Coredump socket %s receive queue full", addr.sun_path); + else + coredump_report_failure("Coredump socket connection %s failed %d", addr.sun_path, retval); + goto close_fail; + } + + /* ... and validate that @sk_peer_pid matches @cprm.pid. */ + if (WARN_ON_ONCE(unix_peer(socket->sk)->sk_peer_pid != cprm.pid)) + goto close_fail; + + cprm.limit = RLIM_INFINITY; + cprm.file = no_free_ptr(file); +#else + coredump_report_failure("Core dump socket support %s disabled", cn.corename); + goto close_fail; +#endif + break; + } + default: + WARN_ON_ONCE(true); + goto close_fail; } /* get us an unshared descriptor table; almost always a no-op */ @@ -770,13 +961,49 @@ void do_coredump(const kernel_siginfo_t *siginfo) file_end_write(cprm.file); free_vma_snapshot(&cprm); } - if (ispipe && core_pipe_limit) - wait_for_dump_helpers(cprm.file); + +#ifdef CONFIG_UNIX + /* Let userspace know we're done processing the coredump. */ + if (sock_from_file(cprm.file)) + kernel_sock_shutdown(sock_from_file(cprm.file), SHUT_WR); +#endif + + /* + * When core_pipe_limit is set we wait for the coredump server + * or usermodehelper to finish before exiting so it can e.g., + * inspect /proc/<pid>. + */ + if (core_pipe_limit) { + switch (cn.core_type) { + case COREDUMP_PIPE: + wait_for_dump_helpers(cprm.file); + break; +#ifdef CONFIG_UNIX + case COREDUMP_SOCK: { + ssize_t n; + + /* + * We use a simple read to wait for the coredump + * processing to finish. Either the socket is + * closed or we get sent unexpected data. In + * both cases, we're done. + */ + n = __kernel_read(cprm.file, &(char){ 0 }, 1, NULL); + if (n != 0) + coredump_report_failure("Unexpected data on coredump socket"); + break; + } +#endif + default: + break; + } + } + close_fail: if (cprm.file) filp_close(cprm.file, NULL); fail_dropcount: - if (ispipe) + if (cn.core_type == COREDUMP_PIPE) atomic_dec(&core_dump_count); fail_unlock: kfree(argv); @@ -799,10 +1026,9 @@ static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr) struct file *file = cprm->file; loff_t pos = file->f_pos; ssize_t n; + if (cprm->written + nr > cprm->limit) return 0; - - if (dump_interrupted()) return 0; n = __kernel_write(file, addr, nr, &pos); @@ -819,20 +1045,21 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr) { static char zeroes[PAGE_SIZE]; struct file *file = cprm->file; + if (file->f_mode & FMODE_LSEEK) { - if (dump_interrupted() || - vfs_llseek(file, nr, SEEK_CUR) < 0) + if (dump_interrupted() || vfs_llseek(file, nr, SEEK_CUR) < 0) return 0; cprm->pos += nr; return 1; - } else { - while (nr > PAGE_SIZE) { - if (!__dump_emit(cprm, zeroes, PAGE_SIZE)) - return 0; - nr -= PAGE_SIZE; - } - return __dump_emit(cprm, zeroes, nr); } + + while (nr > PAGE_SIZE) { + if (!__dump_emit(cprm, zeroes, PAGE_SIZE)) + return 0; + nr -= PAGE_SIZE; + } + + return __dump_emit(cprm, zeroes, nr); } int dump_emit(struct coredump_params *cprm, const void *addr, int nr) @@ -1001,7 +1228,7 @@ EXPORT_SYMBOL(dump_align); void validate_coredump_safety(void) { if (suid_dumpable == SUID_DUMP_ROOT && - core_pattern[0] != '/' && core_pattern[0] != '|') { + core_pattern[0] != '/' && core_pattern[0] != '|' && core_pattern[0] != '@') { coredump_report_failure("Unsafe core_pattern used with fs.suid_dumpable=2: " "pipe handler or fully qualified core dump path required. " @@ -1009,18 +1236,55 @@ void validate_coredump_safety(void) } } +static inline bool check_coredump_socket(void) +{ + if (core_pattern[0] != '@') + return true; + + /* + * Coredump socket must be located in the initial mount + * namespace. Don't give the impression that anything else is + * supported right now. + */ + if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns) + return false; + + /* Must be an absolute path. */ + if (*(core_pattern + 1) != '/') + return false; + + return true; +} + static int proc_dostring_coredump(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { - int error = proc_dostring(table, write, buffer, lenp, ppos); + int error; + ssize_t retval; + char old_core_pattern[CORENAME_MAX_SIZE]; + + retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE); + + error = proc_dostring(table, write, buffer, lenp, ppos); + if (error) + return error; + if (!check_coredump_socket()) { + strscpy(core_pattern, old_core_pattern, retval + 1); + return -EINVAL; + } - if (!error) - validate_coredump_safety(); + validate_coredump_safety(); return error; } static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT; static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX; +static char core_modes[] = { + "file\npipe" +#ifdef CONFIG_UNIX + "\nsocket" +#endif +}; static const struct ctl_table coredump_sysctls[] = { { @@ -1064,6 +1328,13 @@ static const struct ctl_table coredump_sysctls[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "core_modes", + .data = core_modes, + .maxlen = sizeof(core_modes) - 1, + .mode = 0444, + .proc_handler = proc_dostring, + }, }; static int __init init_fs_coredump_sysctls(void) diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 8371e4e1f596..c1d92074b65c 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -12,6 +12,7 @@ #define _FSCRYPT_PRIVATE_H #include <linux/fscrypt.h> +#include <linux/minmax.h> #include <linux/siphash.h> #include <crypto/hash.h> #include <linux/blk-crypto.h> @@ -27,6 +28,23 @@ */ #define FSCRYPT_MIN_KEY_SIZE 16 +/* Maximum size of a raw fscrypt master key */ +#define FSCRYPT_MAX_RAW_KEY_SIZE 64 + +/* Maximum size of a hardware-wrapped fscrypt master key */ +#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE + +/* Maximum size of an fscrypt master key across both key types */ +#define FSCRYPT_MAX_ANY_KEY_SIZE \ + MAX(FSCRYPT_MAX_RAW_KEY_SIZE, FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE) + +/* + * FSCRYPT_MAX_KEY_SIZE is defined in the UAPI header, but the addition of + * hardware-wrapped keys has made it misleading as it's only for raw keys. + * Don't use it in kernel code; use one of the above constants instead. + */ +#undef FSCRYPT_MAX_KEY_SIZE + #define FSCRYPT_CONTEXT_V1 1 #define FSCRYPT_CONTEXT_V2 2 @@ -360,13 +378,15 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, * outputs are unique and cryptographically isolated, i.e. knowledge of one * output doesn't reveal another. */ -#define HKDF_CONTEXT_KEY_IDENTIFIER 1 /* info=<empty> */ +#define HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY 1 /* info=<empty> */ #define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 /* info=file_nonce */ #define HKDF_CONTEXT_DIRECT_KEY 3 /* info=mode_num */ #define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 /* info=mode_num||fs_uuid */ #define HKDF_CONTEXT_DIRHASH_KEY 5 /* info=file_nonce */ #define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 /* info=mode_num||fs_uuid */ #define HKDF_CONTEXT_INODE_HASH_KEY 7 /* info=<empty> */ +#define HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY \ + 8 /* info=<empty> */ int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, @@ -376,7 +396,8 @@ void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); /* inline_crypt.c */ #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT -int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci); +int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci, + bool is_hw_wrapped_key); static inline bool fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci) @@ -385,12 +406,17 @@ fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci) } int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, + const u8 *key_bytes, size_t key_size, + bool is_hw_wrapped, const struct fscrypt_inode_info *ci); void fscrypt_destroy_inline_crypt_key(struct super_block *sb, struct fscrypt_prepared_key *prep_key); +int fscrypt_derive_sw_secret(struct super_block *sb, + const u8 *wrapped_key, size_t wrapped_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]); + /* * Check whether the crypto transform or blk-crypto key has been allocated in * @prep_key, depending on which encryption implementation the file will use. @@ -414,7 +440,8 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ -static inline int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci) +static inline int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci, + bool is_hw_wrapped_key) { return 0; } @@ -427,7 +454,8 @@ fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci) static inline int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, + const u8 *key_bytes, size_t key_size, + bool is_hw_wrapped, const struct fscrypt_inode_info *ci) { WARN_ON_ONCE(1); @@ -440,6 +468,15 @@ fscrypt_destroy_inline_crypt_key(struct super_block *sb, { } +static inline int +fscrypt_derive_sw_secret(struct super_block *sb, + const u8 *wrapped_key, size_t wrapped_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + fscrypt_warn(NULL, "kernel doesn't support hardware-wrapped keys"); + return -EOPNOTSUPP; +} + static inline bool fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, const struct fscrypt_inode_info *ci) @@ -456,20 +493,38 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, struct fscrypt_master_key_secret { /* - * For v2 policy keys: HKDF context keyed by this master key. - * For v1 policy keys: not set (hkdf.hmac_tfm == NULL). + * The KDF with which subkeys of this key can be derived. + * + * For v1 policy keys, this isn't applicable and won't be set. + * Otherwise, this KDF will be keyed by this master key if + * ->is_hw_wrapped=false, or by the "software secret" that hardware + * derived from this master key if ->is_hw_wrapped=true. */ struct fscrypt_hkdf hkdf; /* - * Size of the raw key in bytes. This remains set even if ->raw was + * True if this key is a hardware-wrapped key; false if this key is a + * raw key (i.e. a "software key"). For v1 policy keys this will always + * be false, as v1 policy support is a legacy feature which doesn't + * support newer functionality such as hardware-wrapped keys. + */ + bool is_hw_wrapped; + + /* + * Size of the key in bytes. This remains set even if ->bytes was * zeroized due to no longer being needed. I.e. we still remember the * size of the key even if we don't need to remember the key itself. */ u32 size; - /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */ - u8 raw[FSCRYPT_MAX_KEY_SIZE]; + /* + * The bytes of the key, when still needed. This can be either a raw + * key or a hardware-wrapped key, as indicated by ->is_hw_wrapped. In + * the case of a raw, v2 policy key, there is no need to remember the + * actual key separately from ->hkdf so this field will be zeroized as + * soon as ->hkdf is initialized. + */ + u8 bytes[FSCRYPT_MAX_ANY_KEY_SIZE]; } __randomize_layout; diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c index 855a0f4b7318..0f3028adc9c7 100644 --- a/fs/crypto/hkdf.c +++ b/fs/crypto/hkdf.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * This is used to derive keys from the fscrypt master keys. + * This is used to derive keys from the fscrypt master keys (or from the + * "software secrets" which hardware derives from the fscrypt master keys, in + * the case that the fscrypt master keys are hardware-wrapped keys). * * Copyright 2019 Google LLC */ diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 7fa53d30aec3..1d008c440cb6 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -89,7 +89,8 @@ static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode, } /* Enable inline encryption for this file if supported. */ -int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci) +int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci, + bool is_hw_wrapped_key) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; @@ -130,7 +131,8 @@ int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci) crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits; crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); - crypto_cfg.key_type = BLK_CRYPTO_KEY_TYPE_RAW; + crypto_cfg.key_type = is_hw_wrapped_key ? + BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW; devs = fscrypt_get_devices(sb, &num_devs); if (IS_ERR(devs)) @@ -151,12 +153,15 @@ out_free_devs: } int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, + const u8 *key_bytes, size_t key_size, + bool is_hw_wrapped, const struct fscrypt_inode_info *ci) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + enum blk_crypto_key_type key_type = is_hw_wrapped ? + BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW; struct blk_crypto_key *blk_key; struct block_device **devs; unsigned int num_devs; @@ -167,9 +172,8 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, if (!blk_key) return -ENOMEM; - err = blk_crypto_init_key(blk_key, raw_key, ci->ci_mode->keysize, - BLK_CRYPTO_KEY_TYPE_RAW, crypto_mode, - fscrypt_get_dun_bytes(ci), + err = blk_crypto_init_key(blk_key, key_bytes, key_size, key_type, + crypto_mode, fscrypt_get_dun_bytes(ci), 1U << ci->ci_data_unit_bits); if (err) { fscrypt_err(inode, "error %d initializing blk-crypto key", err); @@ -228,6 +232,34 @@ void fscrypt_destroy_inline_crypt_key(struct super_block *sb, kfree_sensitive(blk_key); } +/* + * Ask the inline encryption hardware to derive the software secret from a + * hardware-wrapped key. Returns -EOPNOTSUPP if hardware-wrapped keys aren't + * supported on this filesystem or hardware. + */ +int fscrypt_derive_sw_secret(struct super_block *sb, + const u8 *wrapped_key, size_t wrapped_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + int err; + + /* The filesystem must be mounted with -o inlinecrypt. */ + if (!(sb->s_flags & SB_INLINECRYPT)) { + fscrypt_warn(NULL, + "%s: filesystem not mounted with inlinecrypt\n", + sb->s_id); + return -EOPNOTSUPP; + } + + err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key, + wrapped_key_size, sw_secret); + if (err == -EOPNOTSUPP) + fscrypt_warn(NULL, + "%s: block device doesn't support hardware-wrapped keys\n", + sb->s_id); + return err; +} + bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) { return inode->i_crypt_info->ci_inlinecrypt; diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 787e9c8938ba..ace369f13068 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -149,11 +149,11 @@ static int fscrypt_user_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { /* - * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for - * each key, regardless of the exact key size. The amount of memory + * We just charge FSCRYPT_MAX_RAW_KEY_SIZE bytes to the user's key quota + * for each key, regardless of the exact key size. The amount of memory * actually used is greater than the size of the raw key anyway. */ - return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE); + return key_payload_reserve(key, FSCRYPT_MAX_RAW_KEY_SIZE); } static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m) @@ -558,20 +558,45 @@ static int add_master_key(struct super_block *sb, int err; if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { - err = fscrypt_init_hkdf(&secret->hkdf, secret->raw, - secret->size); - if (err) - return err; + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]; + u8 *kdf_key = secret->bytes; + unsigned int kdf_key_size = secret->size; + u8 keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY; /* - * Now that the HKDF context is initialized, the raw key is no - * longer needed. + * For raw keys, the fscrypt master key is used directly as the + * fscrypt KDF key. For hardware-wrapped keys, we have to pass + * the master key to the hardware to derive the KDF key, which + * is then only used to derive non-file-contents subkeys. + */ + if (secret->is_hw_wrapped) { + err = fscrypt_derive_sw_secret(sb, secret->bytes, + secret->size, sw_secret); + if (err) + return err; + kdf_key = sw_secret; + kdf_key_size = sizeof(sw_secret); + /* + * To avoid weird behavior if someone manages to + * determine sw_secret and add it as a raw key, ensure + * that hardware-wrapped keys and raw keys will have + * different key identifiers by deriving their key + * identifiers using different KDF contexts. + */ + keyid_kdf_ctx = + HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY; + } + err = fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size); + /* + * Now that the KDF context is initialized, the raw KDF key is + * no longer needed. */ - memzero_explicit(secret->raw, secret->size); + memzero_explicit(kdf_key, kdf_key_size); + if (err) + return err; /* Calculate the key identifier */ - err = fscrypt_hkdf_expand(&secret->hkdf, - HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0, + err = fscrypt_hkdf_expand(&secret->hkdf, keyid_kdf_ctx, NULL, 0, key_spec->u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); if (err) @@ -580,19 +605,36 @@ static int add_master_key(struct super_block *sb, return do_add_master_key(sb, secret, key_spec); } +/* + * Validate the size of an fscrypt master key being added. Note that this is + * just an initial check, as we don't know which ciphers will be used yet. + * There is a stricter size check later when the key is actually used by a file. + */ +static inline bool fscrypt_valid_key_size(size_t size, u32 add_key_flags) +{ + u32 max_size = (add_key_flags & FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ? + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : + FSCRYPT_MAX_RAW_KEY_SIZE; + + return size >= FSCRYPT_MIN_KEY_SIZE && size <= max_size; +} + static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; - if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE || - prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE) + if (prep->datalen < sizeof(*payload)) + return -EINVAL; + + if (!fscrypt_valid_key_size(prep->datalen - sizeof(*payload), + payload->flags)) return -EINVAL; if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) return -EINVAL; - if (payload->__reserved) + if (payload->flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) return -EINVAL; prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL); @@ -636,21 +678,21 @@ static struct key_type key_type_fscrypt_provisioning = { }; /* - * Retrieve the raw key from the Linux keyring key specified by 'key_id', and - * store it into 'secret'. + * Retrieve the key from the Linux keyring key specified by 'key_id', and store + * it into 'secret'. * - * The key must be of type "fscrypt-provisioning" and must have the field - * fscrypt_provisioning_key_payload::type set to 'type', indicating that it's - * only usable with fscrypt with the particular KDF version identified by - * 'type'. We don't use the "logon" key type because there's no way to - * completely restrict the use of such keys; they can be used by any kernel API - * that accepts "logon" keys and doesn't require a specific service prefix. + * The key must be of type "fscrypt-provisioning" and must have the 'type' and + * 'flags' field of the payload set to the given values, indicating that the key + * is intended for use for the specified purpose. We don't use the "logon" key + * type because there's no way to completely restrict the use of such keys; they + * can be used by any kernel API that accepts "logon" keys and doesn't require a + * specific service prefix. * * The ability to specify the key via Linux keyring key is intended for cases * where userspace needs to re-add keys after the filesystem is unmounted and - * re-mounted. Most users should just provide the raw key directly instead. + * re-mounted. Most users should just provide the key directly instead. */ -static int get_keyring_key(u32 key_id, u32 type, +static int get_keyring_key(u32 key_id, u32 type, u32 flags, struct fscrypt_master_key_secret *secret) { key_ref_t ref; @@ -667,12 +709,16 @@ static int get_keyring_key(u32 key_id, u32 type, goto bad_key; payload = key->payload.data[0]; - /* Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. */ - if (payload->type != type) + /* + * Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. + * Similarly, don't allow hardware-wrapped keys to be used as + * non-hardware-wrapped keys and vice versa. + */ + if (payload->type != type || payload->flags != flags) goto bad_key; secret->size = key->datalen - sizeof(*payload); - memcpy(secret->raw, payload->raw, secret->size); + memcpy(secret->bytes, payload->raw, secret->size); err = 0; goto out_put; @@ -734,19 +780,28 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) return -EACCES; memset(&secret, 0, sizeof(secret)); + + if (arg.flags) { + if (arg.flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) + return -EINVAL; + if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) + return -EINVAL; + secret.is_hw_wrapped = true; + } + if (arg.key_id) { if (arg.raw_size != 0) return -EINVAL; - err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret); + err = get_keyring_key(arg.key_id, arg.key_spec.type, arg.flags, + &secret); if (err) goto out_wipe_secret; } else { - if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || - arg.raw_size > FSCRYPT_MAX_KEY_SIZE) + if (!fscrypt_valid_key_size(arg.raw_size, arg.flags)) return -EINVAL; secret.size = arg.raw_size; err = -EFAULT; - if (copy_from_user(secret.raw, uarg->raw, secret.size)) + if (copy_from_user(secret.bytes, uarg->raw, secret.size)) goto out_wipe_secret; } @@ -770,13 +825,13 @@ EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); static void fscrypt_get_test_dummy_secret(struct fscrypt_master_key_secret *secret) { - static u8 test_key[FSCRYPT_MAX_KEY_SIZE]; + static u8 test_key[FSCRYPT_MAX_RAW_KEY_SIZE]; - get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE); + get_random_once(test_key, sizeof(test_key)); memset(secret, 0, sizeof(*secret)); - secret->size = FSCRYPT_MAX_KEY_SIZE; - memcpy(secret->raw, test_key, FSCRYPT_MAX_KEY_SIZE); + secret->size = sizeof(test_key); + memcpy(secret->bytes, test_key, sizeof(test_key)); } int fscrypt_get_test_dummy_key_identifier( @@ -787,10 +842,11 @@ int fscrypt_get_test_dummy_key_identifier( fscrypt_get_test_dummy_secret(&secret); - err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); + err = fscrypt_init_hkdf(&secret.hkdf, secret.bytes, secret.size); if (err) goto out; - err = fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER, + err = fscrypt_hkdf_expand(&secret.hkdf, + HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY, NULL, 0, key_identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); out: diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index b4fe01ea4bd4..0d71843af946 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -153,7 +153,9 @@ int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, struct crypto_skcipher *tfm; if (fscrypt_using_inline_encryption(ci)) - return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci); + return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, + ci->ci_mode->keysize, + false, ci); tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); if (IS_ERR(tfm)) @@ -195,14 +197,29 @@ static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci, struct fscrypt_mode *mode = ci->ci_mode; const u8 mode_num = mode - fscrypt_modes; struct fscrypt_prepared_key *prep_key; - u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; + u8 mode_key[FSCRYPT_MAX_RAW_KEY_SIZE]; u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; unsigned int hkdf_infolen = 0; + bool use_hw_wrapped_key = false; int err; if (WARN_ON_ONCE(mode_num > FSCRYPT_MODE_MAX)) return -EINVAL; + if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) { + /* Using a hardware-wrapped key for file contents encryption */ + if (!fscrypt_using_inline_encryption(ci)) { + if (sb->s_flags & SB_INLINECRYPT) + fscrypt_warn(ci->ci_inode, + "Hardware-wrapped key required, but no suitable inline encryption capabilities are available"); + else + fscrypt_warn(ci->ci_inode, + "Hardware-wrapped keys require inline encryption (-o inlinecrypt)"); + return -EINVAL; + } + use_hw_wrapped_key = true; + } + prep_key = &keys[mode_num]; if (fscrypt_is_key_prepared(prep_key, ci)) { ci->ci_enc_key = *prep_key; @@ -214,6 +231,16 @@ static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci, if (fscrypt_is_key_prepared(prep_key, ci)) goto done_unlock; + if (use_hw_wrapped_key) { + err = fscrypt_prepare_inline_crypt_key(prep_key, + mk->mk_secret.bytes, + mk->mk_secret.size, true, + ci); + if (err) + goto out_unlock; + goto done_unlock; + } + BUILD_BUG_ON(sizeof(mode_num) != 1); BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); BUILD_BUG_ON(sizeof(hkdf_info) != 17); @@ -336,6 +363,14 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci, { int err; + if (mk->mk_secret.is_hw_wrapped && + !(ci->ci_policy.v2.flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 | + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))) { + fscrypt_warn(ci->ci_inode, + "Hardware-wrapped keys are only supported with IV_INO_LBLK policies"); + return -EINVAL; + } + if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { /* * DIRECT_KEY: instead of deriving per-file encryption keys, the @@ -362,7 +397,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci, FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk); } else { - u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; + u8 derived_key[FSCRYPT_MAX_RAW_KEY_SIZE]; err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_PER_FILE_ENC_KEY, @@ -445,10 +480,6 @@ static int setup_file_encryption_key(struct fscrypt_inode_info *ci, struct fscrypt_master_key *mk; int err; - err = fscrypt_select_encryption_impl(ci); - if (err) - return err; - err = fscrypt_policy_to_key_spec(&ci->ci_policy, &mk_spec); if (err) return err; @@ -476,6 +507,10 @@ static int setup_file_encryption_key(struct fscrypt_inode_info *ci, if (ci->ci_policy.version != FSCRYPT_POLICY_V1) return -ENOKEY; + err = fscrypt_select_encryption_impl(ci, false); + if (err) + return err; + /* * As a legacy fallback for v1 policies, search for the key in * the current task's subscribed keyrings too. Don't move this @@ -497,9 +532,21 @@ static int setup_file_encryption_key(struct fscrypt_inode_info *ci, goto out_release_key; } + err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped); + if (err) + goto out_release_key; + switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: - err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); + if (WARN_ON_ONCE(mk->mk_secret.is_hw_wrapped)) { + /* + * This should never happen, as adding a v1 policy key + * that is hardware-wrapped isn't allowed. + */ + err = -EINVAL; + goto out_release_key; + } + err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.bytes); break; case FSCRYPT_POLICY_V2: err = fscrypt_setup_v2_file_key(ci, mk, need_dirhash_key); diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index cf3b58ec32cc..b70521c55132 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -118,7 +118,7 @@ find_and_lock_process_key(const char *prefix, payload = (const struct fscrypt_key *)ukp->data; if (ukp->datalen != sizeof(struct fscrypt_key) || - payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) { + payload->size < 1 || payload->size > sizeof(payload->raw)) { fscrypt_warn(NULL, "key with description '%s' has invalid payload", key->description); @@ -149,7 +149,7 @@ struct fscrypt_direct_key { const struct fscrypt_mode *dk_mode; struct fscrypt_prepared_key dk_key; u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; - u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; + u8 dk_raw[FSCRYPT_MAX_RAW_KEY_SIZE]; }; static void free_direct_key(struct fscrypt_direct_key *dk) diff --git a/fs/dcache.c b/fs/dcache.c index bd5aa136153a..03d58b2d4fa3 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -74,10 +74,11 @@ * arbitrary, since it's serialized on rename_lock */ static int sysctl_vfs_cache_pressure __read_mostly = 100; +static int sysctl_vfs_cache_pressure_denom __read_mostly = 100; unsigned long vfs_pressure_ratio(unsigned long val) { - return mult_frac(val, sysctl_vfs_cache_pressure, 100); + return mult_frac(val, sysctl_vfs_cache_pressure, sysctl_vfs_cache_pressure_denom); } EXPORT_SYMBOL_GPL(vfs_pressure_ratio); @@ -225,6 +226,14 @@ static const struct ctl_table vm_dcache_sysctls[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, + { + .procname = "vfs_cache_pressure_denom", + .data = &sysctl_vfs_cache_pressure_denom, + .maxlen = sizeof(sysctl_vfs_cache_pressure_denom), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE_HUNDRED, + }, }; static int __init init_fs_dcache_sysctls(void) @@ -2412,7 +2421,6 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) } return d_lookup(dir, name); } -EXPORT_SYMBOL(d_hash_and_lookup); /* * When a file is deleted, we have two options: diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 75715d8877ee..30c4944e1862 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -346,7 +346,7 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent) if (!parent) parent = debugfs_mount->mnt_root; - dentry = lookup_positive_unlocked(name, parent, strlen(name)); + dentry = lookup_noperm_positive_unlocked(&QSTR(name), parent); if (IS_ERR(dentry)) return NULL; return dentry; @@ -388,7 +388,7 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) if (unlikely(IS_DEADDIR(d_inode(parent)))) dentry = ERR_PTR(-ENOENT); else - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (!IS_ERR(dentry) && d_really_is_positive(dentry)) { if (d_is_dir(dentry)) pr_err("Directory '%s' with parent '%s' already present!\n", @@ -872,7 +872,7 @@ int __printf(2, 3) debugfs_change_name(struct dentry *dentry, const char *fmt, . } if (strcmp(old_name.name.name, new_name) == 0) goto out; - target = lookup_one_len(new_name, parent, strlen(new_name)); + target = lookup_noperm(&QSTR(new_name), parent); if (IS_ERR(target)) { error = PTR_ERR(target); goto out; diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 51a5c54eb740..493d7f194956 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -394,8 +394,8 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, char *encrypted_and_encoded_name = NULL; struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct dentry *lower_dir_dentry, *lower_dentry; - const char *name = ecryptfs_dentry->d_name.name; - size_t len = ecryptfs_dentry->d_name.len; + struct qstr qname = QSTR_INIT(ecryptfs_dentry->d_name.name, + ecryptfs_dentry->d_name.len); struct dentry *res; int rc = 0; @@ -404,23 +404,25 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { + size_t len = qname.len; rc = ecryptfs_encrypt_and_encode_filename( &encrypted_and_encoded_name, &len, - mount_crypt_stat, name, len); + mount_crypt_stat, qname.name, len); if (rc) { printk(KERN_ERR "%s: Error attempting to encrypt and encode " "filename; rc = [%d]\n", __func__, rc); return ERR_PTR(rc); } - name = encrypted_and_encoded_name; + qname.name = encrypted_and_encoded_name; + qname.len = len; } - lower_dentry = lookup_one_len_unlocked(name, lower_dir_dentry, len); + lower_dentry = lookup_noperm_unlocked(&qname, lower_dir_dentry); if (IS_ERR(lower_dentry)) { - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " + ecryptfs_printk(KERN_DEBUG, "%s: lookup_noperm() returned " "[%ld] on lower_dentry = [%s]\n", __func__, PTR_ERR(lower_dentry), - name); + qname.name); res = ERR_CAST(lower_dentry); } else { res = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry); diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h index ac6a1dd0a6a5..f913b6824289 100644 --- a/fs/efivarfs/internal.h +++ b/fs/efivarfs/internal.h @@ -17,7 +17,6 @@ struct efivarfs_fs_info { struct efivarfs_mount_opts mount_opts; struct super_block *sb; struct notifier_block nb; - struct notifier_block pm_nb; }; struct efi_variable { diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 0486e9b68bc6..c900d98bf494 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -18,8 +18,10 @@ #include <linux/statfs.h> #include <linux/notifier.h> #include <linux/printk.h> +#include <linux/namei.h> #include "internal.h" +#include "../internal.h" static int efivarfs_ops_notifier(struct notifier_block *nb, unsigned long event, void *data) @@ -119,12 +121,18 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } + +static int efivarfs_freeze_fs(struct super_block *sb); +static int efivarfs_unfreeze_fs(struct super_block *sb); + static const struct super_operations efivarfs_ops = { .statfs = efivarfs_statfs, .drop_inode = generic_delete_inode, .alloc_inode = efivarfs_alloc_inode, .free_inode = efivarfs_free_inode, .show_options = efivarfs_show_options, + .freeze_fs = efivarfs_freeze_fs, + .unfreeze_fs = efivarfs_unfreeze_fs, }; /* @@ -204,7 +212,6 @@ bool efivarfs_variable_is_present(efi_char16_t *variable_name, char *name = efivar_get_utf8name(variable_name, vendor); struct super_block *sb = data; struct dentry *dentry; - struct qstr qstr; if (!name) /* @@ -217,9 +224,7 @@ bool efivarfs_variable_is_present(efi_char16_t *variable_name, */ return true; - qstr.name = name; - qstr.len = strlen(name); - dentry = d_hash_and_lookup(sb->s_root, &qstr); + dentry = try_lookup_noperm(&QSTR(name), sb->s_root); kfree(name); if (!IS_ERR_OR_NULL(dentry)) dput(dentry); @@ -367,8 +372,6 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; - register_pm_notifier(&sfi->pm_nb); - return efivar_init(efivarfs_callback, sb, true); } @@ -393,55 +396,12 @@ static const struct fs_context_operations efivarfs_context_ops = { .reconfigure = efivarfs_reconfigure, }; -struct efivarfs_ctx { - struct dir_context ctx; - struct super_block *sb; - struct dentry *dentry; -}; - -static bool efivarfs_actor(struct dir_context *ctx, const char *name, int len, - loff_t offset, u64 ino, unsigned mode) -{ - unsigned long size; - struct efivarfs_ctx *ectx = container_of(ctx, struct efivarfs_ctx, ctx); - struct qstr qstr = { .name = name, .len = len }; - struct dentry *dentry = d_hash_and_lookup(ectx->sb->s_root, &qstr); - struct inode *inode; - struct efivar_entry *entry; - int err; - - if (IS_ERR_OR_NULL(dentry)) - return true; - - inode = d_inode(dentry); - entry = efivar_entry(inode); - - err = efivar_entry_size(entry, &size); - size += sizeof(__u32); /* attributes */ - if (err) - size = 0; - - inode_lock_nested(inode, I_MUTEX_CHILD); - i_size_write(inode, size); - inode_unlock(inode); - - if (!size) { - ectx->dentry = dentry; - return false; - } - - dput(dentry); - - return true; -} - static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor, unsigned long name_size, void *data) { char *name; struct super_block *sb = data; struct dentry *dentry; - struct qstr qstr; int err; if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) @@ -451,9 +411,7 @@ static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor, if (!name) return -ENOMEM; - qstr.name = name; - qstr.len = strlen(name); - dentry = d_hash_and_lookup(sb->s_root, &qstr); + dentry = try_lookup_noperm(&QSTR(name), sb->s_root); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out; @@ -474,111 +432,59 @@ static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor, return err; } -static void efivarfs_deactivate_super_work(struct work_struct *work) -{ - struct super_block *s = container_of(work, struct super_block, - destroy_work); - /* - * note: here s->destroy_work is free for reuse (which - * will happen in deactivate_super) - */ - deactivate_super(s); -} - static struct file_system_type efivarfs_type; -static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action, - void *ptr) +static int efivarfs_freeze_fs(struct super_block *sb) { - struct efivarfs_fs_info *sfi = container_of(nb, struct efivarfs_fs_info, - pm_nb); - struct path path; - struct efivarfs_ctx ectx = { - .ctx = { - .actor = efivarfs_actor, - }, - .sb = sfi->sb, - }; - struct file *file; - struct super_block *s = sfi->sb; - static bool rescan_done = true; - - if (action == PM_HIBERNATION_PREPARE) { - rescan_done = false; - return NOTIFY_OK; - } else if (action != PM_POST_HIBERNATION) { - return NOTIFY_DONE; - } - - if (rescan_done) - return NOTIFY_DONE; - - /* ensure single superblock is alive and pin it */ - if (!atomic_inc_not_zero(&s->s_active)) - return NOTIFY_DONE; - - pr_info("efivarfs: resyncing variable state\n"); - - path.dentry = sfi->sb->s_root; - - /* - * do not add SB_KERNMOUNT which a single superblock could - * expose to userspace and which also causes MNT_INTERNAL, see - * below - */ - path.mnt = vfs_kern_mount(&efivarfs_type, 0, - efivarfs_type.name, NULL); - if (IS_ERR(path.mnt)) { - pr_err("efivarfs: internal mount failed\n"); - /* - * We may be the last pinner of the superblock but - * calling efivarfs_kill_sb from within the notifier - * here would deadlock trying to unregister it - */ - INIT_WORK(&s->destroy_work, efivarfs_deactivate_super_work); - schedule_work(&s->destroy_work); - return PTR_ERR(path.mnt); - } - - /* path.mnt now has pin on superblock, so this must be above one */ - atomic_dec(&s->s_active); - - file = kernel_file_open(&path, O_RDONLY | O_DIRECTORY | O_NOATIME, - current_cred()); - /* - * safe even if last put because no MNT_INTERNAL means this - * will do delayed deactivate_super and not deadlock - */ - mntput(path.mnt); - if (IS_ERR(file)) - return NOTIFY_DONE; + /* Nothing for us to do. */ + return 0; +} - rescan_done = true; +static int efivarfs_unfreeze_fs(struct super_block *sb) +{ + struct dentry *child = NULL; /* - * First loop over the directory and verify each entry exists, - * removing it if it doesn't + * Unconditionally resync the variable state on a thaw request. + * Given the size of efivarfs it really doesn't matter to simply + * iterate through all of the entries and resync. Freeze/thaw + * requests are rare enough for that to not matter and the + * number of entries is pretty low too. So we really don't care. */ - file->f_pos = 2; /* skip . and .. */ - do { - ectx.dentry = NULL; - iterate_dir(file, &ectx.ctx); - if (ectx.dentry) { - pr_info("efivarfs: removing variable %pd\n", - ectx.dentry); - simple_recursive_removal(ectx.dentry, NULL); - dput(ectx.dentry); + pr_info("efivarfs: resyncing variable state\n"); + for (;;) { + int err; + unsigned long size = 0; + struct inode *inode; + struct efivar_entry *entry; + + child = find_next_child(sb->s_root, child); + if (!child) + break; + + inode = d_inode(child); + entry = efivar_entry(inode); + + err = efivar_entry_size(entry, &size); + if (err) + size = 0; + else + size += sizeof(__u32); + + inode_lock(inode); + i_size_write(inode, size); + inode_unlock(inode); + + /* The variable doesn't exist anymore, delete it. */ + if (!size) { + pr_info("efivarfs: removing variable %pd\n", child); + simple_recursive_removal(child, NULL); } - } while (ectx.dentry); - fput(file); - - /* - * then loop over variables, creating them if there's no matching - * dentry - */ - efivar_init(efivarfs_check_missing, sfi->sb, false); + } - return NOTIFY_OK; + efivar_init(efivarfs_check_missing, sb, false); + pr_info("efivarfs: finished resyncing variable state\n"); + return 0; } static int efivarfs_init_fs_context(struct fs_context *fc) @@ -598,9 +504,6 @@ static int efivarfs_init_fs_context(struct fs_context *fc) fc->s_fs_info = sfi; fc->ops = &efivarfs_context_ops; - sfi->pm_nb.notifier_call = efivarfs_pm_notify; - sfi->pm_nb.priority = 0; - return 0; } @@ -610,7 +513,6 @@ static void efivarfs_kill_sb(struct super_block *sb) blocking_notifier_chain_unregister(&efivar_ops_nh, &sfi->nb); kill_litter_super(sb); - unregister_pm_notifier(&sfi->pm_nb); kfree(sfi); } diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index 8f68ec49ad89..6beeb7063871 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -144,6 +144,20 @@ config EROFS_FS_ZIP_ZSTD If unsure, say N. +config EROFS_FS_ZIP_ACCEL + bool "EROFS hardware decompression support" + depends on EROFS_FS_ZIP + help + Saying Y here includes hardware accelerator support for reading + EROFS file systems containing compressed data. It gives better + decompression speed than the software-implemented decompression, and + it costs lower CPU overhead. + + Hardware accelerator support is an experimental feature for now and + file systems are still readable without selecting this option. + + If unsure, say N. + config EROFS_FS_ONDEMAND bool "EROFS fscache-based on-demand read support (deprecated)" depends on EROFS_FS diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile index 4331d53c7109..549abc424763 100644 --- a/fs/erofs/Makefile +++ b/fs/erofs/Makefile @@ -7,5 +7,6 @@ erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o +erofs-$(CONFIG_EROFS_FS_ZIP_ACCEL) += decompressor_crypto.o erofs-$(CONFIG_EROFS_FS_BACKED_BY_FILE) += fileio.o erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 2704d7a592a5..510e922c5193 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -76,4 +76,14 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, unsigned int padbufsize); int __init z_erofs_init_decompressor(void); void z_erofs_exit_decompressor(void); +int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl); +int z_erofs_crypto_enable_engine(const char *name, int len); +#ifdef CONFIG_EROFS_FS_ZIP_ACCEL +void z_erofs_crypto_disable_all_engines(void); +int z_erofs_crypto_show_engines(char *buf, int size, char sep); +#else +static inline void z_erofs_crypto_disable_all_engines(void) {} +static inline int z_erofs_crypto_show_engines(char *buf, int size, char sep) { return 0; } +#endif #endif diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 2409d2ab0c28..6a329c329f43 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -27,7 +27,7 @@ void erofs_put_metabuf(struct erofs_buf *buf) void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap) { - pgoff_t index = offset >> PAGE_SHIFT; + pgoff_t index = (buf->off + offset) >> PAGE_SHIFT; struct folio *folio = NULL; if (buf->page) { @@ -54,6 +54,7 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) struct erofs_sb_info *sbi = EROFS_SB(sb); buf->file = NULL; + buf->off = sbi->dif0.fsoff; if (erofs_is_fileio_mode(sbi)) { buf->file = sbi->dif0.file; /* some fs like FUSE needs it */ buf->mapping = buf->file->f_mapping; @@ -299,7 +300,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, iomap->private = buf.base; } else { iomap->type = IOMAP_MAPPED; - iomap->addr = mdev.m_pa; + iomap->addr = mdev.m_dif->fsoff + mdev.m_pa; if (flags & IOMAP_DAX) iomap->addr += mdev.m_dif->dax_part_off; } diff --git a/fs/erofs/decompressor_crypto.c b/fs/erofs/decompressor_crypto.c new file mode 100644 index 000000000000..97b77ab64432 --- /dev/null +++ b/fs/erofs/decompressor_crypto.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/scatterlist.h> +#include <crypto/acompress.h> +#include "compress.h" + +static int __z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq, + struct crypto_acomp *tfm) +{ + struct sg_table st_src, st_dst; + struct acomp_req *req; + struct crypto_wait wait; + u8 *headpage; + int ret; + + headpage = kmap_local_page(*rq->in); + ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in, + min_t(unsigned int, rq->inputsize, + rq->sb->s_blocksize - rq->pageofs_in)); + kunmap_local(headpage); + if (ret) + return ret; + + req = acomp_request_alloc(tfm); + if (!req) + return -ENOMEM; + + ret = sg_alloc_table_from_pages_segment(&st_src, rq->in, rq->inpages, + rq->pageofs_in, rq->inputsize, UINT_MAX, GFP_KERNEL); + if (ret < 0) + goto failed_src_alloc; + + ret = sg_alloc_table_from_pages_segment(&st_dst, rq->out, rq->outpages, + rq->pageofs_out, rq->outputsize, UINT_MAX, GFP_KERNEL); + if (ret < 0) + goto failed_dst_alloc; + + acomp_request_set_params(req, st_src.sgl, + st_dst.sgl, rq->inputsize, rq->outputsize); + + crypto_init_wait(&wait); + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); + if (ret) { + erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", + ret, rq->inputsize, rq->pageofs_in, rq->outputsize); + ret = -EIO; + } + + sg_free_table(&st_dst); +failed_dst_alloc: + sg_free_table(&st_src); +failed_src_alloc: + acomp_request_free(req); + return ret; +} + +struct z_erofs_crypto_engine { + char *crypto_name; + struct crypto_acomp *tfm; +}; + +struct z_erofs_crypto_engine *z_erofs_crypto[Z_EROFS_COMPRESSION_MAX] = { + [Z_EROFS_COMPRESSION_LZ4] = (struct z_erofs_crypto_engine[]) { + {}, + }, + [Z_EROFS_COMPRESSION_LZMA] = (struct z_erofs_crypto_engine[]) { + {}, + }, + [Z_EROFS_COMPRESSION_DEFLATE] = (struct z_erofs_crypto_engine[]) { + { .crypto_name = "qat_deflate", }, + {}, + }, + [Z_EROFS_COMPRESSION_ZSTD] = (struct z_erofs_crypto_engine[]) { + {}, + }, +}; +static DECLARE_RWSEM(z_erofs_crypto_rwsem); + +static struct crypto_acomp *z_erofs_crypto_get_engine(int alg) +{ + struct z_erofs_crypto_engine *e; + + for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) + if (e->tfm) + return e->tfm; + return NULL; +} + +int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) +{ + struct crypto_acomp *tfm; + int i, err; + + down_read(&z_erofs_crypto_rwsem); + tfm = z_erofs_crypto_get_engine(rq->alg); + if (!tfm) { + err = -EOPNOTSUPP; + goto out; + } + + for (i = 0; i < rq->outpages; i++) { + struct page *const page = rq->out[i]; + struct page *victim; + + if (!page) { + victim = __erofs_allocpage(pgpl, rq->gfp, true); + if (!victim) { + err = -ENOMEM; + goto out; + } + set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); + rq->out[i] = victim; + } + } + err = __z_erofs_crypto_decompress(rq, tfm); +out: + up_read(&z_erofs_crypto_rwsem); + return err; +} + +int z_erofs_crypto_enable_engine(const char *name, int len) +{ + struct z_erofs_crypto_engine *e; + struct crypto_acomp *tfm; + int alg; + + down_write(&z_erofs_crypto_rwsem); + for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) { + for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) { + if (!strncmp(name, e->crypto_name, len)) { + if (e->tfm) + break; + tfm = crypto_alloc_acomp(e->crypto_name, 0, 0); + if (IS_ERR(tfm)) { + up_write(&z_erofs_crypto_rwsem); + return -EOPNOTSUPP; + } + e->tfm = tfm; + break; + } + } + } + up_write(&z_erofs_crypto_rwsem); + return 0; +} + +void z_erofs_crypto_disable_all_engines(void) +{ + struct z_erofs_crypto_engine *e; + int alg; + + down_write(&z_erofs_crypto_rwsem); + for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) { + for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) { + if (!e->tfm) + continue; + crypto_free_acomp(e->tfm); + e->tfm = NULL; + } + } + up_write(&z_erofs_crypto_rwsem); +} + +int z_erofs_crypto_show_engines(char *buf, int size, char sep) +{ + struct z_erofs_crypto_engine *e; + int alg, len = 0; + + for (alg = 0; alg < Z_EROFS_COMPRESSION_MAX; ++alg) { + for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) { + if (!e->tfm) + continue; + len += scnprintf(buf + len, size - len, "%s%c", + e->crypto_name, sep); + } + } + return len; +} diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index c6908a487054..6909b2d529c7 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -97,8 +97,8 @@ failed: return -ENOMEM; } -static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pgpl) +static int __z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) { struct super_block *sb = rq->sb; struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 }; @@ -178,6 +178,22 @@ failed_zinit: return err; } +static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) +{ +#ifdef CONFIG_EROFS_FS_ZIP_ACCEL + int err; + + if (!rq->partial_decoding) { + err = z_erofs_crypto_decompress(rq, pgpl); + if (err != -EOPNOTSUPP) + return err; + + } +#endif + return __z_erofs_deflate_decompress(rq, pgpl); +} + const struct z_erofs_decompressor z_erofs_deflate_decomp = { .config = z_erofs_load_deflate_config, .decompress = z_erofs_deflate_decompress, diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index 60c7cc4c105c..7d81f504bff0 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -147,7 +147,8 @@ io_retry: if (err) break; io->rq = erofs_fileio_rq_alloc(&io->dev); - io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9; + io->rq->bio.bi_iter.bi_sector = + (io->dev.m_dif->fsoff + io->dev.m_pa) >> 9; attached = 0; } if (!bio_add_folio(&io->rq->bio, folio, len, cur)) @@ -180,7 +181,7 @@ static void erofs_fileio_readahead(struct readahead_control *rac) struct folio *folio; int err; - trace_erofs_readpages(inode, readahead_index(rac), + trace_erofs_readahead(inode, readahead_index(rac), readahead_count(rac), true); while ((folio = readahead_folio(rac))) { err = erofs_fileio_scan_folio(&io, folio); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 4ac188d5d894..a32c03a80c70 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -44,7 +44,7 @@ struct erofs_device_info { struct erofs_fscache *fscache; struct file *file; struct dax_device *dax_dev; - u64 dax_part_off; + u64 fsoff, dax_part_off; erofs_blk_t blocks; erofs_blk_t uniaddr; @@ -199,6 +199,7 @@ enum { struct erofs_buf { struct address_space *mapping; struct file *file; + u64 off; struct page *page; void *base; }; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index da6ee7c39290..e1e9f06e8342 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -165,8 +165,11 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0) : bdev_file_open_by_path(dif->path, BLK_OPEN_READ, sb->s_type, NULL); - if (IS_ERR(file)) + if (IS_ERR(file)) { + if (file == ERR_PTR(-ENOTBLK)) + return -EINVAL; return PTR_ERR(file); + } if (!erofs_is_fileio_mode(sbi)) { dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file), @@ -356,7 +359,7 @@ static void erofs_default_options(struct erofs_sb_info *sbi) enum { Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum, - Opt_device, Opt_fsid, Opt_domain_id, Opt_directio, + Opt_device, Opt_fsid, Opt_domain_id, Opt_directio, Opt_fsoffset, }; static const struct constant_table erofs_param_cache_strategy[] = { @@ -383,6 +386,7 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = { fsparam_string("fsid", Opt_fsid), fsparam_string("domain_id", Opt_domain_id), fsparam_flag_no("directio", Opt_directio), + fsparam_u64("fsoffset", Opt_fsoffset), {} }; @@ -506,28 +510,59 @@ static int erofs_fc_parse_param(struct fs_context *fc, errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); #endif break; + case Opt_fsoffset: + sbi->dif0.fsoff = result.uint_64; + break; } return 0; } -static struct inode *erofs_nfs_get_inode(struct super_block *sb, - u64 ino, u32 generation) +static int erofs_encode_fh(struct inode *inode, u32 *fh, int *max_len, + struct inode *parent) { - return erofs_iget(sb, ino); + erofs_nid_t nid = EROFS_I(inode)->nid; + int len = parent ? 6 : 3; + + if (*max_len < len) { + *max_len = len; + return FILEID_INVALID; + } + + fh[0] = (u32)(nid >> 32); + fh[1] = (u32)(nid & 0xffffffff); + fh[2] = inode->i_generation; + + if (parent) { + nid = EROFS_I(parent)->nid; + + fh[3] = (u32)(nid >> 32); + fh[4] = (u32)(nid & 0xffffffff); + fh[5] = parent->i_generation; + } + + *max_len = len; + return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN; } static struct dentry *erofs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { - return generic_fh_to_dentry(sb, fid, fh_len, fh_type, - erofs_nfs_get_inode); + if ((fh_type != FILEID_INO64_GEN && + fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3) + return NULL; + + return d_obtain_alias(erofs_iget(sb, + ((u64)fid->raw[0] << 32) | fid->raw[1])); } static struct dentry *erofs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { - return generic_fh_to_parent(sb, fid, fh_len, fh_type, - erofs_nfs_get_inode); + if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6) + return NULL; + + return d_obtain_alias(erofs_iget(sb, + ((u64)fid->raw[3] << 32) | fid->raw[4])); } static struct dentry *erofs_get_parent(struct dentry *child) @@ -543,7 +578,7 @@ static struct dentry *erofs_get_parent(struct dentry *child) } static const struct export_operations erofs_export_ops = { - .encode_fh = generic_encode_ino32_fh, + .encode_fh = erofs_encode_fh, .fh_to_dentry = erofs_fh_to_dentry, .fh_to_parent = erofs_fh_to_parent, .get_parent = erofs_get_parent, @@ -618,6 +653,14 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) } } + if (sbi->dif0.fsoff) { + if (sbi->dif0.fsoff & (sb->s_blocksize - 1)) + return invalfc(fc, "fsoffset %llu is not aligned to block size %lu", + sbi->dif0.fsoff, sb->s_blocksize); + if (erofs_is_fscache_mode(sb)) + return invalfc(fc, "cannot use fsoffset in fscache mode"); + } + if (test_opt(&sbi->opt, DAX_ALWAYS)) { if (!sbi->dif0.dax_dev) { errorfc(fc, "DAX unsupported by block device. Turning off DAX."); @@ -947,6 +990,8 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) if (sbi->domain_id) seq_printf(seq, ",domain_id=%s", sbi->domain_id); #endif + if (sbi->dif0.fsoff) + seq_printf(seq, ",fsoffset=%llu", sbi->dif0.fsoff); return 0; } diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c index dad4e6c6c155..eed8797a193f 100644 --- a/fs/erofs/sysfs.c +++ b/fs/erofs/sysfs.c @@ -7,12 +7,14 @@ #include <linux/kobject.h> #include "internal.h" +#include "compress.h" enum { attr_feature, attr_drop_caches, attr_pointer_ui, attr_pointer_bool, + attr_accel, }; enum { @@ -60,14 +62,25 @@ static struct erofs_attr erofs_attr_##_name = { \ EROFS_ATTR_RW_UI(sync_decompress, erofs_mount_opts); EROFS_ATTR_FUNC(drop_caches, 0200); #endif +#ifdef CONFIG_EROFS_FS_ZIP_ACCEL +EROFS_ATTR_FUNC(accel, 0644); +#endif -static struct attribute *erofs_attrs[] = { +static struct attribute *erofs_sb_attrs[] = { #ifdef CONFIG_EROFS_FS_ZIP ATTR_LIST(sync_decompress), ATTR_LIST(drop_caches), #endif NULL, }; +ATTRIBUTE_GROUPS(erofs_sb); + +static struct attribute *erofs_attrs[] = { +#ifdef CONFIG_EROFS_FS_ZIP_ACCEL + ATTR_LIST(accel), +#endif + NULL, +}; ATTRIBUTE_GROUPS(erofs); /* Features this copy of erofs supports */ @@ -128,12 +141,14 @@ static ssize_t erofs_attr_show(struct kobject *kobj, if (!ptr) return 0; return sysfs_emit(buf, "%d\n", *(bool *)ptr); + case attr_accel: + return z_erofs_crypto_show_engines(buf, PAGE_SIZE, '\n'); } return 0; } static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t len) + const char *buf, size_t len) { struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info, s_kobj); @@ -182,6 +197,19 @@ static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr, invalidate_mapping_pages(MNGD_MAPPING(sbi), 0, -1); return len; #endif +#ifdef CONFIG_EROFS_FS_ZIP_ACCEL + case attr_accel: + buf = skip_spaces(buf); + z_erofs_crypto_disable_all_engines(); + while (*buf) { + t = strcspn(buf, "\n"); + ret = z_erofs_crypto_enable_engine(buf, t); + if (ret < 0) + return ret; + buf += buf[t] != '\0' ? t + 1 : t; + } + return len; +#endif } return 0; } @@ -199,12 +227,13 @@ static const struct sysfs_ops erofs_attr_ops = { }; static const struct kobj_type erofs_sb_ktype = { - .default_groups = erofs_groups, + .default_groups = erofs_sb_groups, .sysfs_ops = &erofs_attr_ops, .release = erofs_sb_release, }; static const struct kobj_type erofs_ktype = { + .default_groups = erofs_groups, .sysfs_ops = &erofs_attr_ops, }; @@ -248,6 +277,12 @@ void erofs_unregister_sysfs(struct super_block *sb) } } +void erofs_exit_sysfs(void) +{ + kobject_put(&erofs_feat); + kset_unregister(&erofs_root); +} + int __init erofs_init_sysfs(void) { int ret; @@ -255,24 +290,12 @@ int __init erofs_init_sysfs(void) kobject_set_name(&erofs_root.kobj, "erofs"); erofs_root.kobj.parent = fs_kobj; ret = kset_register(&erofs_root); - if (ret) - goto root_err; - - ret = kobject_init_and_add(&erofs_feat, &erofs_feat_ktype, - NULL, "features"); - if (ret) - goto feat_err; - return ret; - -feat_err: - kobject_put(&erofs_feat); - kset_unregister(&erofs_root); -root_err: + if (!ret) { + ret = kobject_init_and_add(&erofs_feat, &erofs_feat_ktype, + NULL, "features"); + if (!ret) + return 0; + erofs_exit_sysfs(); + } return ret; } - -void erofs_exit_sysfs(void) -{ - kobject_put(&erofs_feat); - kset_unregister(&erofs_root); -} diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index b8e6b76c23d5..fe8071844724 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -288,6 +288,7 @@ static struct workqueue_struct *z_erofs_workqueue __read_mostly; #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD static struct kthread_worker __rcu **z_erofs_pcpu_workers; +static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0); static void erofs_destroy_percpu_workers(void) { @@ -333,12 +334,8 @@ static int erofs_init_percpu_workers(void) } return 0; } -#else -static inline void erofs_destroy_percpu_workers(void) {} -static inline int erofs_init_percpu_workers(void) { return 0; } -#endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD) +#ifdef CONFIG_HOTPLUG_CPU static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock); static enum cpuhp_state erofs_cpuhp_state; @@ -395,17 +392,56 @@ static void erofs_cpu_hotplug_destroy(void) if (erofs_cpuhp_state) cpuhp_remove_state_nocalls(erofs_cpuhp_state); } -#else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */ +#else /* !CONFIG_HOTPLUG_CPU */ static inline int erofs_cpu_hotplug_init(void) { return 0; } static inline void erofs_cpu_hotplug_destroy(void) {} -#endif +#endif/* CONFIG_HOTPLUG_CPU */ +static int z_erofs_init_pcpu_workers(struct super_block *sb) +{ + int err; -void z_erofs_exit_subsystem(void) + if (atomic_xchg(&erofs_percpu_workers_initialized, 1)) + return 0; + + err = erofs_init_percpu_workers(); + if (err) { + erofs_err(sb, "per-cpu workers: failed to allocate."); + goto err_init_percpu_workers; + } + + err = erofs_cpu_hotplug_init(); + if (err < 0) { + erofs_err(sb, "per-cpu workers: failed CPU hotplug init."); + goto err_cpuhp_init; + } + erofs_info(sb, "initialized per-cpu workers successfully."); + return err; + +err_cpuhp_init: + erofs_destroy_percpu_workers(); +err_init_percpu_workers: + atomic_set(&erofs_percpu_workers_initialized, 0); + return err; +} + +static void z_erofs_destroy_pcpu_workers(void) { + if (!atomic_xchg(&erofs_percpu_workers_initialized, 0)) + return; erofs_cpu_hotplug_destroy(); erofs_destroy_percpu_workers(); +} +#else /* !CONFIG_EROFS_FS_PCPU_KTHREAD */ +static inline int z_erofs_init_pcpu_workers(struct super_block *sb) { return 0; } +static inline void z_erofs_destroy_pcpu_workers(void) {} +#endif/* CONFIG_EROFS_FS_PCPU_KTHREAD */ + +void z_erofs_exit_subsystem(void) +{ + z_erofs_destroy_pcpu_workers(); destroy_workqueue(z_erofs_workqueue); z_erofs_destroy_pcluster_pool(); + z_erofs_crypto_disable_all_engines(); z_erofs_exit_decompressor(); } @@ -427,19 +463,8 @@ int __init z_erofs_init_subsystem(void) goto err_workqueue_init; } - err = erofs_init_percpu_workers(); - if (err) - goto err_pcpu_worker; - - err = erofs_cpu_hotplug_init(); - if (err < 0) - goto err_cpuhp_init; return err; -err_cpuhp_init: - erofs_destroy_percpu_workers(); -err_pcpu_worker: - destroy_workqueue(z_erofs_workqueue); err_workqueue_init: z_erofs_destroy_pcluster_pool(); err_pcluster_pool: @@ -641,8 +666,14 @@ static const struct address_space_operations z_erofs_cache_aops = { int z_erofs_init_super(struct super_block *sb) { - struct inode *const inode = new_inode(sb); + struct inode *inode; + int err; + err = z_erofs_init_pcpu_workers(sb); + if (err) + return err; + + inode = new_inode(sb); if (!inode) return -ENOMEM; set_nlink(inode, 1); @@ -1707,7 +1738,8 @@ drain_io: bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOIO); bio->bi_end_io = z_erofs_endio; - bio->bi_iter.bi_sector = cur >> 9; + bio->bi_iter.bi_sector = + (mdev.m_dif->fsoff + cur) >> 9; bio->bi_private = q[JQ_SUBMIT]; if (readahead) bio->bi_opf |= REQ_RAHEAD; @@ -1855,13 +1887,12 @@ static void z_erofs_readahead(struct readahead_control *rac) { struct inode *const inode = rac->mapping->host; Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac)); - struct folio *head = NULL, *folio; unsigned int nrpages = readahead_count(rac); + struct folio *head = NULL, *folio; int err; + trace_erofs_readahead(inode, readahead_index(rac), nrpages, false); z_erofs_pcluster_readmore(&f, rac, true); - nrpages = readahead_count(rac); - trace_erofs_readpages(inode, readahead_index(rac), nrpages, false); while ((folio = readahead_folio(rac))) { folio->private = head; head = folio; diff --git a/fs/exec.c b/fs/exec.c index 8e4ea5f1e64c..cfbb2b9ee3c9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -115,66 +115,6 @@ bool path_noexec(const struct path *path) (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC); } -#ifdef CONFIG_USELIB -/* - * Note that a shared library must be both readable and executable due to - * security reasons. - * - * Also note that we take the address to load from the file itself. - */ -SYSCALL_DEFINE1(uselib, const char __user *, library) -{ - struct linux_binfmt *fmt; - struct file *file; - struct filename *tmp = getname(library); - int error = PTR_ERR(tmp); - static const struct open_flags uselib_flags = { - .open_flag = O_LARGEFILE | O_RDONLY, - .acc_mode = MAY_READ | MAY_EXEC, - .intent = LOOKUP_OPEN, - .lookup_flags = LOOKUP_FOLLOW, - }; - - if (IS_ERR(tmp)) - goto out; - - file = do_filp_open(AT_FDCWD, tmp, &uselib_flags); - putname(tmp); - error = PTR_ERR(file); - if (IS_ERR(file)) - goto out; - - /* - * Check do_open_execat() for an explanation. - */ - error = -EACCES; - if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) || - path_noexec(&file->f_path)) - goto exit; - - error = -ENOEXEC; - - read_lock(&binfmt_lock); - list_for_each_entry(fmt, &formats, lh) { - if (!fmt->load_shlib) - continue; - if (!try_module_get(fmt->module)) - continue; - read_unlock(&binfmt_lock); - error = fmt->load_shlib(file); - read_lock(&binfmt_lock); - put_binfmt(fmt); - if (error != -ENOEXEC) - break; - } - read_unlock(&binfmt_lock); -exit: - fput(file); -out: - return error; -} -#endif /* #ifdef CONFIG_USELIB */ - #ifdef CONFIG_MMU /* * The nascent bprm->mm is not visible until exec_mmap() but it can diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 128dd092916b..cdefea17986a 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -143,7 +143,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt, if (err) goto out_err; dprintk("%s: found name: %s\n", __func__, nbuf); - tmp = lookup_one_unlocked(mnt_idmap(mnt), nbuf, parent, strlen(nbuf)); + tmp = lookup_one_unlocked(mnt_idmap(mnt), &QSTR(nbuf), parent); if (IS_ERR(tmp)) { dprintk("lookup failed: %ld\n", PTR_ERR(tmp)); err = PTR_ERR(tmp); @@ -284,6 +284,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child) }; struct getdents_callback buffer = { .ctx.actor = filldir_one, + .ctx.count = INT_MAX, .name = name, }; @@ -549,8 +550,7 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len, } inode_lock(target_dir->d_inode); - nresult = lookup_one(mnt_idmap(mnt), nbuf, - target_dir, strlen(nbuf)); + nresult = lookup_one(mnt_idmap(mnt), &QSTR(nbuf), target_dir); if (!IS_ERR(nresult)) { if (unlikely(nresult->d_inode != result->d_inode)) { dput(nresult); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 94c7d2d828a6..cdf01e60fa6d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5692,7 +5692,7 @@ int ext4_getattr(struct mnt_idmap *idmap, const struct path *path, awu_max = sbi->s_awu_max; } - generic_fill_statx_atomic_writes(stat, awu_min, awu_max); + generic_fill_statx_atomic_writes(stat, awu_min, awu_max, 0); } flags = ei->i_flags & EXT4_FL_USER_VISIBLE; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 2b8f9239bede..dd0ba0532e01 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -2271,12 +2271,12 @@ out_drop_write: if (err) return err; - err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE); + err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); if (err) return err; if (f2fs_readonly(sbi->sb)) { - err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); + err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); if (err) return err; return -EROFS; @@ -2333,6 +2333,6 @@ recover_out: out_err: f2fs_up_write(&sbi->cp_global_sem); f2fs_up_write(&sbi->gc_lock); - thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); + thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL); return err; } diff --git a/fs/file_table.c b/fs/file_table.c index c04ed94cdc4b..138114d64307 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(get_max_files); static int proc_nr_files(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { - files_stat.nr_files = get_nr_files(); + files_stat.nr_files = percpu_counter_sum_positive(&nr_files); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } diff --git a/fs/filesystems.c b/fs/filesystems.c index 58b9067b2391..95e5256821a5 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -156,15 +156,19 @@ static int fs_index(const char __user * __name) static int fs_name(unsigned int index, char __user * buf) { struct file_system_type * tmp; - int len, res; + int len, res = -EINVAL; read_lock(&file_systems_lock); - for (tmp = file_systems; tmp; tmp = tmp->next, index--) - if (index <= 0 && try_module_get(tmp->owner)) + for (tmp = file_systems; tmp; tmp = tmp->next, index--) { + if (index == 0) { + if (try_module_get(tmp->owner)) + res = 0; break; + } + } read_unlock(&file_systems_lock); - if (!tmp) - return -EINVAL; + if (res) + return res; /* OK, we got the reference, so we can safely block */ len = strlen(tmp->name) + 1; diff --git a/fs/fs_context.c b/fs/fs_context.c index 582d33e81117..666e61753aed 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -222,7 +222,7 @@ int vfs_parse_monolithic_sep(struct fs_context *fc, void *data, char *value = strchr(key, '='); if (value) { - if (value == key) + if (unlikely(value == key)) continue; *value++ = 0; v_len = strlen(value); @@ -449,6 +449,10 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, printk(KERN_ERR "%s%s%pV\n", prefix ? prefix : "", prefix ? ": " : "", &vaf); break; + case 'i': + printk(KERN_INFO "%s%s%pV\n", prefix ? prefix : "", + prefix ? ": " : "", &vaf); + break; default: printk(KERN_NOTICE "%s%s%pV\n", prefix ? prefix : "", prefix ? ": " : "", &vaf); diff --git a/fs/fs_parser.c b/fs/fs_parser.c index e635a81e17d9..c092a9f79e32 100644 --- a/fs/fs_parser.c +++ b/fs/fs_parser.c @@ -380,58 +380,9 @@ EXPORT_SYMBOL(fs_param_is_path); #ifdef CONFIG_VALIDATE_FS_PARSER /** - * validate_constant_table - Validate a constant table - * @tbl: The constant table to validate. - * @tbl_size: The size of the table. - * @low: The lowest permissible value. - * @high: The highest permissible value. - * @special: One special permissible value outside of the range. - */ -bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, - int low, int high, int special) -{ - size_t i; - bool good = true; - - if (tbl_size == 0) { - pr_warn("VALIDATE C-TBL: Empty\n"); - return true; - } - - for (i = 0; i < tbl_size; i++) { - if (!tbl[i].name) { - pr_err("VALIDATE C-TBL[%zu]: Null\n", i); - good = false; - } else if (i > 0 && tbl[i - 1].name) { - int c = strcmp(tbl[i-1].name, tbl[i].name); - - if (c == 0) { - pr_err("VALIDATE C-TBL[%zu]: Duplicate %s\n", - i, tbl[i].name); - good = false; - } - if (c > 0) { - pr_err("VALIDATE C-TBL[%zu]: Missorted %s>=%s\n", - i, tbl[i-1].name, tbl[i].name); - good = false; - } - } - - if (tbl[i].value != special && - (tbl[i].value < low || tbl[i].value > high)) { - pr_err("VALIDATE C-TBL[%zu]: %s->%d const out of range (%d-%d)\n", - i, tbl[i].name, tbl[i].value, low, high); - good = false; - } - } - - return good; -} - -/** - * fs_validate_description - Validate a parameter description - * @name: The parameter name to search for. - * @desc: The parameter description to validate. + * fs_validate_description - Validate a parameter specification array + * @name: Owner name of the parameter specification array + * @desc: The parameter specification array to validate. */ bool fs_validate_description(const char *name, const struct fs_parameter_spec *desc) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 83ac192e7fdd..33b82529cb6e 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1676,7 +1676,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, goto out_err; } - set_delayed_call(callback, page_put_link, &folio->page); + set_delayed_call(callback, page_put_link, folio); return folio_address(folio); diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index 17ce9636a2b1..edcd6f18a8a8 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -120,7 +120,7 @@ static bool fuse_emit(struct file *file, struct dir_context *ctx, fuse_add_dirent_to_cache(file, dirent, ctx->pos); return dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino, - dirent->type); + dirent->type | FILLDIR_FLAG_NOINTR); } static int parse_dirfile(char *buf, size_t nbytes, struct file *file, @@ -419,7 +419,7 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff, if (ff->readdir.pos == ctx->pos) { res = FOUND_SOME; if (!dir_emit(ctx, dirent->name, dirent->namelen, - dirent->ino, dirent->type)) + dirent->ino, dirent->type | FILLDIR_FLAG_NOINTR)) return FOUND_ALL; ctx->pos = dirent->off; } diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 68fc8af14700..14f204cd5a82 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -37,27 +37,6 @@ #include "aops.h" -void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, - size_t from, size_t len) -{ - struct buffer_head *head = folio_buffers(folio); - unsigned int bsize = head->b_size; - struct buffer_head *bh; - size_t to = from + len; - size_t start, end; - - for (bh = head, start = 0; bh != head || !start; - bh = bh->b_this_page, start = end) { - end = start + bsize; - if (end <= from) - continue; - if (start >= to) - break; - set_buffer_uptodate(bh); - gfs2_trans_add_data(ip->i_gl, bh); - } -} - /** * gfs2_get_block_noalloc - Fills in a buffer head with details about a block * @inode: The inode @@ -133,12 +112,43 @@ static int __gfs2_jdata_write_folio(struct folio *folio, inode->i_sb->s_blocksize, BIT(BH_Dirty)|BIT(BH_Uptodate)); } - gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio)); + gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio)); } return gfs2_write_jdata_folio(folio, wbc); } /** + * gfs2_jdata_writeback - Write jdata folios to the log + * @mapping: The mapping to write + * @wbc: The writeback control + * + * Returns: errno + */ +int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) +{ + struct inode *inode = mapping->host; + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_sbd *sdp = GFS2_SB(mapping->host); + struct folio *folio = NULL; + int error; + + BUG_ON(current->journal_info); + if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE)) + return 0; + + while ((folio = writeback_iter(mapping, wbc, folio, &error))) { + if (folio_test_checked(folio)) { + folio_redirty_for_writepage(wbc, folio); + folio_unlock(folio); + continue; + } + error = __gfs2_jdata_write_folio(folio, wbc); + } + + return error; +} + +/** * gfs2_writepages - Write a bunch of dirty pages back to disk * @mapping: The mapping to write * @wbc: Write-back control @@ -228,24 +238,16 @@ continue_unlock: ret = __gfs2_jdata_write_folio(folio, wbc); if (unlikely(ret)) { - if (ret == AOP_WRITEPAGE_ACTIVATE) { - folio_unlock(folio); - ret = 0; - } else { - - /* - * done_index is set past this page, - * so media errors will not choke - * background writeout for the entire - * file. This has consequences for - * range_cyclic semantics (ie. it may - * not be suitable for data integrity - * writeout). - */ - *done_index = folio_next_index(folio); - ret = 1; - break; - } + /* + * done_index is set past this page, so media errors + * will not choke background writeout for the entire + * file. This has consequences for range_cyclic + * semantics (ie. it may not be suitable for data + * integrity writeout). + */ + *done_index = folio_next_index(folio); + ret = 1; + break; } /* @@ -540,7 +542,7 @@ out: gfs2_trans_end(sdp); } -static bool jdata_dirty_folio(struct address_space *mapping, +static bool gfs2_jdata_dirty_folio(struct address_space *mapping, struct folio *folio) { if (current->journal_info) @@ -722,7 +724,7 @@ static const struct address_space_operations gfs2_jdata_aops = { .writepages = gfs2_jdata_writepages, .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, - .dirty_folio = jdata_dirty_folio, + .dirty_folio = gfs2_jdata_dirty_folio, .bmap = gfs2_bmap, .migrate_folio = buffer_migrate_folio, .invalidate_folio = gfs2_invalidate_folio, diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h index a10c4334d248..bf002522a782 100644 --- a/fs/gfs2/aops.h +++ b/fs/gfs2/aops.h @@ -9,7 +9,6 @@ #include "incore.h" void adjust_fs_space(struct inode *inode); -void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, - size_t from, size_t len); +int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc); #endif /* __AOPS_DOT_H__ */ diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 366516b98b3f..7703d0471139 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -988,7 +988,8 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos, struct gfs2_sbd *sdp = GFS2_SB(inode); if (!gfs2_is_stuffed(ip)) - gfs2_trans_add_databufs(ip, folio, offset_in_folio(folio, pos), + gfs2_trans_add_databufs(ip->i_gl, folio, + offset_in_folio(folio, pos), copied); folio_unlock(folio); @@ -1296,10 +1297,12 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock, * uses iomap write to perform its actions, which begin their own transactions * (iomap_begin, get_folio, etc.) */ -static int gfs2_block_zero_range(struct inode *inode, loff_t from, - unsigned int length) +static int gfs2_block_zero_range(struct inode *inode, loff_t from, loff_t length) { BUG_ON(current->journal_info); + if (from >= inode->i_size) + return 0; + length = min(length, inode->i_size - from); return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops, NULL); } diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index d7220a6fe8f5..ba25b884169e 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1166,7 +1166,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, int create, struct gfs2_glock **glp) { - struct super_block *s = sdp->sd_vfs; struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type, .ln_sbd = sdp }; @@ -1229,7 +1228,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, mapping = gfs2_glock2aspace(gl); if (mapping) { mapping->a_ops = &gfs2_meta_aops; - mapping->host = s->s_bdev->bd_mapping->host; + mapping->host = sdp->sd_inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->i_private_data = NULL; diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index eb4714f299ef..cebd66b22694 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -168,7 +168,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) static int gfs2_rgrp_metasync(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct address_space *metamapping = &sdp->sd_aspace; + struct address_space *metamapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; @@ -225,7 +225,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl) static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct address_space *mapping = &sdp->sd_aspace; + struct address_space *mapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; loff_t start, end; @@ -601,14 +601,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl) if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); - error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); + error = gfs2_find_jhead(sdp->sd_jdesc, &head); if (gfs2_assert_withdraw_delayed(sdp, !error)) return error; if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) return -EIO; - sdp->sd_log_sequence = head.lh_sequence + 1; - gfs2_log_pointers_init(sdp, head.lh_blkno); + gfs2_log_pointers_init(sdp, &head); } return 0; } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 74abbd4970f8..0a41c4e76b32 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -795,7 +795,7 @@ struct gfs2_sbd { /* Log stuff */ - struct address_space sd_aspace; + struct inode *sd_inode; spinlock_t sd_log_lock; @@ -851,6 +851,13 @@ struct gfs2_sbd { unsigned long sd_glock_dqs_held; }; +#define GFS2_BAD_INO 1 + +static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp) +{ + return sdp->sd_inode->i_mapping; +} + static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) { gl->gl_stats.stats[which]++; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 198a8cbaf5e5..187d789a8f1e 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -439,6 +439,74 @@ out: return error; } +static void gfs2_final_release_pages(struct gfs2_inode *ip) +{ + struct inode *inode = &ip->i_inode; + struct gfs2_glock *gl = ip->i_gl; + + if (unlikely(!gl)) { + /* This can only happen during incomplete inode creation. */ + BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); + return; + } + + truncate_inode_pages(gfs2_glock2aspace(gl), 0); + truncate_inode_pages(&inode->i_data, 0); + + if (atomic_read(&gl->gl_revokes) == 0) { + clear_bit(GLF_LFLUSH, &gl->gl_flags); + clear_bit(GLF_DIRTY, &gl->gl_flags); + } +} + +int gfs2_dinode_dealloc(struct gfs2_inode *ip) +{ + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); + struct gfs2_rgrpd *rgd; + struct gfs2_holder gh; + int error; + + if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { + gfs2_consist_inode(ip); + return -EIO; + } + + gfs2_rindex_update(sdp); + + error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); + if (error) + return error; + + rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); + if (!rgd) { + gfs2_consist_inode(ip); + error = -EIO; + goto out_qs; + } + + error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, + LM_FLAG_NODE_SCOPE, &gh); + if (error) + goto out_qs; + + error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, + sdp->sd_jdesc->jd_blocks); + if (error) + goto out_rg_gunlock; + + gfs2_free_di(rgd, ip); + + gfs2_final_release_pages(ip); + + gfs2_trans_end(sdp); + +out_rg_gunlock: + gfs2_glock_dq_uninit(&gh); +out_qs: + gfs2_quota_unhold(ip); + return error; +} + static void gfs2_init_dir(struct buffer_head *dibh, const struct gfs2_inode *parent) { @@ -629,10 +697,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; - int error; + int error, dealloc_error; u32 aflags = 0; unsigned blocks = 1; struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, }; + bool xattr_initialized = false; if (!name->len || name->len > GFS2_FNAMESIZE) return -ENAMETOOLONG; @@ -659,7 +728,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (!IS_ERR(inode)) { if (S_ISDIR(inode->i_mode)) { iput(inode); - inode = ERR_PTR(-EISDIR); + inode = NULL; + error = -EISDIR; goto fail_gunlock; } d_instantiate(dentry, inode); @@ -744,11 +814,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) - goto fail_free_inode; + goto fail_dealloc_inode; error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) - goto fail_free_inode; + goto fail_dealloc_inode; gfs2_cancel_delete_work(io_gl); io_gl->gl_no_formal_ino = ip->i_no_formal_ino; @@ -767,13 +837,16 @@ retry: error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); if (error) goto fail_gunlock3; + clear_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags); error = gfs2_trans_begin(sdp, blocks, 0); if (error) goto fail_gunlock3; - if (blocks > 1) + if (blocks > 1) { gfs2_init_xattr(ip); + xattr_initialized = true; + } init_dinode(dip, ip, symname); gfs2_trans_end(sdp); @@ -828,6 +901,18 @@ fail_gunlock3: gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_gunlock2: gfs2_glock_put(io_gl); +fail_dealloc_inode: + set_bit(GIF_ALLOC_FAILED, &ip->i_flags); + dealloc_error = 0; + if (ip->i_eattr) + dealloc_error = gfs2_ea_dealloc(ip, xattr_initialized); + clear_nlink(inode); + mark_inode_dirty(inode); + if (!dealloc_error) + dealloc_error = gfs2_dinode_dealloc(ip); + if (dealloc_error) + fs_warn(sdp, "%s: %d\n", __func__, dealloc_error); + ip->i_no_addr = 0; fail_free_inode: if (ip->i_gl) { gfs2_glock_put(ip->i_gl); @@ -842,10 +927,6 @@ fail_gunlock: gfs2_dir_no_add(&da); gfs2_glock_dq_uninit(&d_gh); if (!IS_ERR_OR_NULL(inode)) { - set_bit(GIF_ALLOC_FAILED, &ip->i_flags); - clear_nlink(inode); - if (ip->i_no_addr) - mark_inode_dirty(inode); if (inode->i_state & I_NEW) iget_failed(inode); else diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 9e5e1622d50a..eafe123617e6 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -92,6 +92,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, u64 no_formal_ino, unsigned int blktype); +int gfs2_dinode_dealloc(struct gfs2_inode *ip); struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, int is_root); diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 58aeeae7ed8c..7cb9d216d8bb 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -328,6 +328,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; + uint32_t flags = 0; int error; BUG_ON(!__lockref_is_dead(&gl->gl_lockref)); @@ -352,7 +353,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl) * When the lockspace is released, all remaining glocks will be * unlocked automatically. This is more efficient than unlocking them * individually, but when the lock is held in DLM_LOCK_EX or - * DLM_LOCK_PW mode, the lock value block (LVB) will be lost. + * DLM_LOCK_PW mode, the lock value block (LVB) would be lost. */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && @@ -361,8 +362,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl) return; } + if (gl->gl_lksb.sb_lvbptr) + flags |= DLM_LKF_VALBLK; + again: - error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, + error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags, NULL, gl); if (error == -EBUSY) { msleep(20); @@ -996,14 +1000,15 @@ locks_done: if (sdp->sd_args.ar_spectator) { fs_info(sdp, "Recovery is required. Waiting for a " "non-spectator to mount.\n"); + spin_unlock(&ls->ls_recover_spin); msleep_interruptible(1000); } else { fs_info(sdp, "control_mount wait1 block %u start %u " "mount %u lvb %u flags %lx\n", block_gen, start_gen, mount_gen, lvb_gen, ls->ls_recover_flags); + spin_unlock(&ls->ls_recover_spin); } - spin_unlock(&ls->ls_recover_spin); goto restart; } diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index f9c5089783d2..115c4ac457e9 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -31,6 +31,7 @@ #include "dir.h" #include "trace_gfs2.h" #include "trans.h" +#include "aops.h" static void gfs2_log_shutdown(struct gfs2_sbd *sdp); @@ -131,7 +132,11 @@ __acquires(&sdp->sd_ail_lock) if (!mapping) continue; spin_unlock(&sdp->sd_ail_lock); - ret = mapping->a_ops->writepages(mapping, wbc); + BUG_ON(GFS2_SB(mapping->host) != sdp); + if (gfs2_is_jdata(GFS2_I(mapping->host))) + ret = gfs2_jdata_writeback(mapping, wbc); + else + ret = mapping->a_ops->writepages(mapping, wbc); if (need_resched()) { blk_finish_plug(plug); cond_resched(); diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index c27b05099c1e..fc30ebdad83a 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -44,17 +44,6 @@ __releases(&sdp->sd_log_lock) spin_unlock(&sdp->sd_log_lock); } -static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp, - unsigned int value) -{ - if (++value == sdp->sd_jdesc->jd_blocks) { - value = 0; - } - sdp->sd_log_tail = value; - sdp->sd_log_flush_tail = value; - sdp->sd_log_head = value; -} - static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 0fd3b5ec7d8c..9c8c305a75c4 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -204,9 +204,11 @@ static void gfs2_end_log_write(struct bio *bio) struct bvec_iter_all iter_all; if (bio->bi_status) { - if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status)) + int err = blk_status_to_errno(bio->bi_status); + + if (!cmpxchg(&sdp->sd_log_error, 0, err)) fs_err(sdp, "Error %d writing to journal, jid=%u\n", - bio->bi_status, sdp->sd_jdesc->jd_jid); + err, sdp->sd_jdesc->jd_jid); gfs2_withdraw_delayed(sdp); /* prevent more writes to the journal */ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); @@ -449,7 +451,7 @@ static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd, * Find the folio with 'index' in the journal's mapping. Search the folio for * the journal head if requested (cleanup == false). Release refs on the * folio so the page cache can reclaim it. We grabbed a - * reference on this folio twice, first when we did a grab_cache_page() + * reference on this folio twice, first when we did a filemap_grab_folio() * to obtain the folio to add it to the bio and second when we do a * filemap_get_folio() here to get the folio to wait on while I/O on it is being * completed. @@ -474,7 +476,7 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, if (!*done) *done = gfs2_jhead_folio_search(jd, head, folio); - /* filemap_get_folio() and the earlier grab_cache_page() */ + /* filemap_get_folio() and the earlier filemap_grab_folio() */ folio_put_refs(folio, 2); } @@ -494,15 +496,13 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) * gfs2_find_jhead - find the head of a log * @jd: The journal descriptor * @head: The log descriptor for the head of the log is returned here - * @keep_cache: If set inode pages will not be truncated * * Do a search of a journal by reading it in large chunks using bios and find * the valid log entry with the highest sequence number. (i.e. the log head) * * Returns: 0 on success, errno otherwise */ -int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, - bool keep_cache) +int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct address_space *mapping = jd->jd_inode->i_mapping; @@ -591,8 +591,7 @@ out: if (!ret) ret = filemap_check_wb_err(mapping, since); - if (!keep_cache) - truncate_inode_pages(mapping, 0); + truncate_inode_pages(mapping, 0); return ret; } diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 07890c7b145d..be740bf33666 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -20,7 +20,7 @@ void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf); void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); int gfs2_find_jhead(struct gfs2_jdesc *jd, - struct gfs2_log_header_host *head, bool keep_cache); + struct gfs2_log_header_host *head); void gfs2_drain_revokes(struct gfs2_sbd *sdp); static inline unsigned int buf_limit(struct gfs2_sbd *sdp) diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 198cc7056637..9dc8885c95d0 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -132,7 +132,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) unsigned int bufnum; if (mapping == NULL) - mapping = &sdp->sd_aspace; + mapping = gfs2_aspace(sdp); shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; index = blkno >> shift; /* convert block to page */ diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index 831d988c2ceb..b7c8a6684d02 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h @@ -44,9 +44,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) struct gfs2_glock_aspace *gla = container_of(mapping, struct gfs2_glock_aspace, mapping); return gla->glock.gl_name.ln_sbd; - } else if (mapping->a_ops == &gfs2_rgrp_aops) - return container_of(mapping, struct gfs2_sbd, sd_aspace); - else + } else return inode->i_sb->s_fs_info; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index e83d293c3614..653f0ff4b057 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -64,15 +64,13 @@ static void gfs2_tune_init(struct gfs2_tune *gt) void free_sbd(struct gfs2_sbd *sdp) { - if (sdp->sd_lkstats) - free_percpu(sdp->sd_lkstats); + free_percpu(sdp->sd_lkstats); kfree(sdp); } static struct gfs2_sbd *init_sbd(struct super_block *sb) { struct gfs2_sbd *sdp; - struct address_space *mapping; sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); if (!sdp) @@ -109,16 +107,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) INIT_LIST_HEAD(&sdp->sd_sc_inodes_list); - mapping = &sdp->sd_aspace; - - address_space_init_once(mapping); - mapping->a_ops = &gfs2_rgrp_aops; - mapping->host = sb->s_bdev->bd_mapping->host; - mapping->flags = 0; - mapping_set_gfp_mask(mapping, GFP_NOFS); - mapping->i_private_data = NULL; - mapping->writeback_index = 0; - spin_lock_init(&sdp->sd_log_lock); atomic_set(&sdp->sd_log_pinned, 0); INIT_LIST_HEAD(&sdp->sd_log_revokes); @@ -226,28 +214,22 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const struct gfs2_sb *str) static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) { - struct super_block *sb = sdp->sd_vfs; - struct page *page; - struct bio_vec bvec; - struct bio bio; + struct gfs2_sb *sb; int err; - page = alloc_page(GFP_KERNEL); - if (unlikely(!page)) + sb = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (unlikely(!sb)) return -ENOMEM; - - bio_init(&bio, sb->s_bdev, &bvec, 1, REQ_OP_READ | REQ_META); - bio.bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); - __bio_add_page(&bio, page, PAGE_SIZE, 0); - - err = submit_bio_wait(&bio); + err = bdev_rw_virt(sdp->sd_vfs->s_bdev, + sector * (sdp->sd_vfs->s_blocksize >> 9), sb, PAGE_SIZE, + REQ_OP_READ | REQ_META); if (err) { pr_warn("error %d reading superblock\n", err); - __free_page(page); + kfree(sb); return err; } - gfs2_sb_in(sdp, page_address(page)); - __free_page(page); + gfs2_sb_in(sdp, sb); + kfree(sb); return gfs2_check_sb(sdp, silent); } @@ -500,7 +482,9 @@ static int init_sb(struct gfs2_sbd *sdp, int silent) sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE); goto out; } - sb_set_blocksize(sb, sdp->sd_sb.sb_bsize); + ret = -EINVAL; + if (!sb_set_blocksize(sb, sdp->sd_sb.sb_bsize)) + goto out; /* Get the root inode */ no_addr = sdp->sd_sb.sb_root_dir.no_addr; @@ -1135,6 +1119,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) int silent = fc->sb_flags & SB_SILENT; struct gfs2_sbd *sdp; struct gfs2_holder mount_gh; + struct address_space *mapping; int error; sdp = init_sbd(sb); @@ -1156,6 +1141,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_NOSEC; sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; + sb->s_d_op = &gfs2_dops; sb->s_export_op = &gfs2_export_ops; sb->s_qcop = &gfs2_quotactl_ops; @@ -1167,6 +1153,9 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) /* Set up the buffer cache and fill in some fake block size values to allow us to read-in the on-disk superblock. */ sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, 512); + error = -EINVAL; + if (!sdp->sd_sb.sb_bsize) + goto fail_free; sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits; sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9; sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift); @@ -1181,9 +1170,21 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) sdp->sd_tune.gt_statfs_quantum = 30; } + /* Set up an address space for metadata writes */ + sdp->sd_inode = new_inode(sb); + error = -ENOMEM; + if (!sdp->sd_inode) + goto fail_free; + sdp->sd_inode->i_ino = GFS2_BAD_INO; + sdp->sd_inode->i_size = OFFSET_MAX; + + mapping = gfs2_aspace(sdp); + mapping->a_ops = &gfs2_rgrp_aops; + mapping_set_gfp_mask(mapping, GFP_NOFS); + error = init_names(sdp, silent); if (error) - goto fail_free; + goto fail_iput; snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name); @@ -1192,7 +1193,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0, sdp->sd_fsname); if (!sdp->sd_glock_wq) - goto fail_free; + goto fail_iput; sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname); @@ -1309,6 +1310,8 @@ fail_delete_wq: fail_glock_wq: if (sdp->sd_glock_wq) destroy_workqueue(sdp->sd_glock_wq); +fail_iput: + iput(sdp->sd_inode); fail_free: free_sbd(sdp); sb->s_fs_info = NULL; diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index f4fe7039f725..24250478b085 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -118,6 +118,7 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd) int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh, unsigned int blkno, struct gfs2_log_header_host *head) { + const u32 zero = 0; u32 hash, crc; if (lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) || @@ -126,7 +127,7 @@ int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh, return 1; hash = crc32(~0, lh, LH_V1_SIZE - 4); - hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */ + hash = ~crc32(hash, &zero, 4); /* assume lh_hash is zero */ if (be32_to_cpu(lh->lh_hash) != hash) return 1; @@ -263,16 +264,12 @@ static void clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); - u32 lblock = head->lh_blkno; - gfs2_replay_incr_blk(jd, &lblock); - gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock, + gfs2_replay_incr_blk(jd, &head->lh_blkno); + head->lh_sequence++; + gfs2_write_log_header(sdp, jd, head->lh_sequence, 0, head->lh_blkno, GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY, REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC); - if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) { - sdp->sd_log_flush_head = lblock; - gfs2_log_incr_head(sdp); - } } @@ -457,7 +454,7 @@ void gfs2_recover_func(struct work_struct *work) if (error) goto fail_gunlock_ji; - error = gfs2_find_jhead(jd, &head, true); + error = gfs2_find_jhead(jd, &head); if (error) goto fail_gunlock_ji; t_jhd = ktime_get(); @@ -533,6 +530,9 @@ void gfs2_recover_func(struct work_struct *work) ktime_ms_delta(t_rep, t_tlck)); } + if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) + gfs2_log_pointers_init(sdp, &head); + gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); if (jlocked) { @@ -580,3 +580,13 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait) return wait ? jd->jd_recover_error : 0; } +void gfs2_log_pointers_init(struct gfs2_sbd *sdp, + struct gfs2_log_header_host *head) +{ + sdp->sd_log_sequence = head->lh_sequence + 1; + gfs2_replay_incr_blk(sdp->sd_jdesc, &head->lh_blkno); + sdp->sd_log_tail = head->lh_blkno; + sdp->sd_log_flush_head = head->lh_blkno; + sdp->sd_log_flush_tail = head->lh_blkno; + sdp->sd_log_head = head->lh_blkno; +} diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 6a0fd42e1120..5a5ba72ecd75 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h @@ -29,6 +29,8 @@ void gfs2_recover_func(struct work_struct *work); int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh, unsigned int blkno, struct gfs2_log_header_host *head); +void gfs2_log_pointers_init(struct gfs2_sbd *sdp, + struct gfs2_log_header_host *head); #endif /* __RECOVERY_DOT_H__ */ diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 44e5658b896c..7c518c4ff638 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -134,28 +134,18 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) { struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; - struct gfs2_log_header_host head; int error; j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); if (gfs2_withdrawing_or_withdrawn(sdp)) return -EIO; - error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); - if (error) { - gfs2_consist(sdp); - return error; - } - - if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { - gfs2_consist(sdp); + if (sdp->sd_log_sequence == 0) { + fs_err(sdp, "unknown status of our own journal jid %d", + sdp->sd_lockstruct.ls_jid); return -EIO; } - /* Initialize some head of the log stuff */ - sdp->sd_log_sequence = head.lh_sequence + 1; - gfs2_log_pointers_init(sdp, head.lh_blkno); - error = gfs2_quota_init(sdp); if (!error && gfs2_withdrawing_or_withdrawn(sdp)) error = -EIO; @@ -370,7 +360,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) error = gfs2_jdesc_check(jd); if (error) break; - error = gfs2_find_jhead(jd, &lh, false); + error = gfs2_find_jhead(jd, &lh); if (error) break; if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { @@ -648,7 +638,7 @@ restart: gfs2_jindex_free(sdp); /* Take apart glock structures and buffer lists */ gfs2_gl_hash_clear(sdp); - truncate_inode_pages_final(&sdp->sd_aspace); + iput(sdp->sd_inode); gfs2_delete_debugfs_file(sdp); gfs2_sys_fs_del(sdp); @@ -674,7 +664,7 @@ static int gfs2_sync_fs(struct super_block *sb, int wait) return sdp->sd_log_error; } -static int gfs2_do_thaw(struct gfs2_sbd *sdp) +static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner) { struct super_block *sb = sdp->sd_vfs; int error; @@ -682,7 +672,7 @@ static int gfs2_do_thaw(struct gfs2_sbd *sdp) error = gfs2_freeze_lock_shared(sdp); if (error) goto fail; - error = thaw_super(sb, FREEZE_HOLDER_USERSPACE); + error = thaw_super(sb, who, freeze_owner); if (!error) return 0; @@ -703,14 +693,14 @@ void gfs2_freeze_func(struct work_struct *work) if (test_bit(SDF_FROZEN, &sdp->sd_flags)) goto freeze_failed; - error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); + error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL); if (error) goto freeze_failed; gfs2_freeze_unlock(sdp); set_bit(SDF_FROZEN, &sdp->sd_flags); - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL); if (error) goto out; @@ -728,10 +718,13 @@ out: /** * gfs2_freeze_super - prevent further writes to the filesystem * @sb: the VFS structure for the filesystem + * @who: freeze flags + * @freeze_owner: owner of the freeze * */ -static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) +static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) { struct gfs2_sbd *sdp = sb->s_fs_info; int error; @@ -744,7 +737,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) } for (;;) { - error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); + error = freeze_super(sb, who, freeze_owner); if (error) { fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error); @@ -758,7 +751,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) break; } - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, who, freeze_owner); if (error) goto out; @@ -796,10 +789,13 @@ static int gfs2_freeze_fs(struct super_block *sb) /** * gfs2_thaw_super - reallow writes to the filesystem * @sb: the VFS structure for the filesystem + * @who: freeze flags + * @freeze_owner: owner of the freeze * */ -static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) +static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) { struct gfs2_sbd *sdp = sb->s_fs_info; int error; @@ -814,7 +810,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) atomic_inc(&sb->s_active); gfs2_freeze_unlock(sdp); - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, who, freeze_owner); if (!error) { clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags); @@ -1173,74 +1169,6 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) return 0; } -static void gfs2_final_release_pages(struct gfs2_inode *ip) -{ - struct inode *inode = &ip->i_inode; - struct gfs2_glock *gl = ip->i_gl; - - if (unlikely(!gl)) { - /* This can only happen during incomplete inode creation. */ - BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); - return; - } - - truncate_inode_pages(gfs2_glock2aspace(gl), 0); - truncate_inode_pages(&inode->i_data, 0); - - if (atomic_read(&gl->gl_revokes) == 0) { - clear_bit(GLF_LFLUSH, &gl->gl_flags); - clear_bit(GLF_DIRTY, &gl->gl_flags); - } -} - -static int gfs2_dinode_dealloc(struct gfs2_inode *ip) -{ - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct gfs2_rgrpd *rgd; - struct gfs2_holder gh; - int error; - - if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { - gfs2_consist_inode(ip); - return -EIO; - } - - gfs2_rindex_update(sdp); - - error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); - if (error) - return error; - - rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); - if (!rgd) { - gfs2_consist_inode(ip); - error = -EIO; - goto out_qs; - } - - error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, - LM_FLAG_NODE_SCOPE, &gh); - if (error) - goto out_qs; - - error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, - sdp->sd_jdesc->jd_blocks); - if (error) - goto out_rg_gunlock; - - gfs2_free_di(rgd, ip); - - gfs2_final_release_pages(ip); - - gfs2_trans_end(sdp); - -out_rg_gunlock: - gfs2_glock_dq_uninit(&gh); -out_qs: - gfs2_quota_unhold(ip); - return error; -} - /** * gfs2_glock_put_eventually * @gl: The glock to put @@ -1326,9 +1254,6 @@ static enum evict_behavior evict_should_delete(struct inode *inode, struct gfs2_sbd *sdp = sb->s_fs_info; int ret; - if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) - goto should_delete; - if (gfs2_holder_initialized(&ip->i_iopen_gh) && test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags)) return EVICT_SHOULD_DEFER_DELETE; @@ -1358,7 +1283,6 @@ static enum evict_behavior evict_should_delete(struct inode *inode, if (inode->i_nlink) return EVICT_SHOULD_SKIP_DELETE; -should_delete: if (gfs2_holder_initialized(&ip->i_iopen_gh) && test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) return gfs2_upgrade_iopen_glock(inode); @@ -1382,7 +1306,7 @@ static int evict_unlinked_inode(struct inode *inode) } if (ip->i_eattr) { - ret = gfs2_ea_dealloc(ip); + ret = gfs2_ea_dealloc(ip, true); if (ret) goto out; } diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index ecc699f8d9fc..748125653d6c 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -174,10 +174,10 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) switch (n) { case 0: - error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE); + error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL); break; case 1: - error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE); + error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL); break; default: return -EINVAL; diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index f8ae2c666fd6..075f7e9abe47 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -226,6 +226,27 @@ out: unlock_buffer(bh); } +void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio, + size_t from, size_t len) +{ + struct buffer_head *head = folio_buffers(folio); + unsigned int bsize = head->b_size; + struct buffer_head *bh; + size_t to = from + len; + size_t start, end; + + for (bh = head, start = 0; bh != head || !start; + bh = bh->b_this_page, start = end) { + end = start + bsize; + if (end <= from) + continue; + if (start >= to) + break; + set_buffer_uptodate(bh); + gfs2_trans_add_data(gl, bh); + } +} + void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) { diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h index f8ce5302280d..790c55f59e61 100644 --- a/fs/gfs2/trans.h +++ b/fs/gfs2/trans.h @@ -42,6 +42,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, void gfs2_trans_end(struct gfs2_sbd *sdp); void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh); +void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio, + size_t from, size_t len); void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh); void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index 13be8d1d228b..d5a1e63fa257 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -73,7 +73,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, "mount.\n"); goto out_unlock; } - error = gfs2_find_jhead(jd, &head, false); + error = gfs2_find_jhead(jd, &head); if (error) { if (verbose) fs_err(sdp, "Error parsing journal for spectator " diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 17ae5070a90e..df9c93de94c7 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -1383,7 +1383,7 @@ out: return error; } -static int ea_dealloc_block(struct gfs2_inode *ip) +static int ea_dealloc_block(struct gfs2_inode *ip, bool initialized) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; @@ -1416,7 +1416,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip) ip->i_eattr = 0; gfs2_add_inode_blocks(&ip->i_inode, -1); - if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { + if (initialized) { error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { gfs2_trans_add_meta(ip->i_gl, dibh); @@ -1435,11 +1435,12 @@ out_gunlock: /** * gfs2_ea_dealloc - deallocate the extended attribute fork * @ip: the inode + * @initialized: xattrs have been initialized * * Returns: errno */ -int gfs2_ea_dealloc(struct gfs2_inode *ip) +int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized) { int error; @@ -1451,7 +1452,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) if (error) return error; - if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { + if (initialized) { error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); if (error) goto out_quota; @@ -1463,7 +1464,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) } } - error = ea_dealloc_block(ip); + error = ea_dealloc_block(ip, initialized); out_quota: gfs2_quota_unhold(ip); diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h index eb12eb7e37c1..3c9788e0e137 100644 --- a/fs/gfs2/xattr.h +++ b/fs/gfs2/xattr.h @@ -54,7 +54,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name, const void *value, size_t size, int flags, int type); ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size); -int gfs2_ea_dealloc(struct gfs2_inode *ip); +int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized); /* Exported to acl.c */ diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 74801911bc1c..30cf4fe78b3d 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -48,47 +48,19 @@ struct hfsplus_wd { int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf, void **data, blk_opf_t opf) { - const enum req_op op = opf & REQ_OP_MASK; - struct bio *bio; - int ret = 0; - u64 io_size; - loff_t start; - int offset; + u64 io_size = hfsplus_min_io_size(sb); + loff_t start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT; + int offset = start & (io_size - 1); + + if ((opf & REQ_OP_MASK) != REQ_OP_WRITE && data) + *data = (u8 *)buf + offset; /* - * Align sector to hardware sector size and find offset. We - * assume that io_size is a power of two, which _should_ - * be true. + * Align sector to hardware sector size and find offset. We assume that + * io_size is a power of two, which _should_ be true. */ - io_size = hfsplus_min_io_size(sb); - start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT; - offset = start & (io_size - 1); sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); - - bio = bio_alloc(sb->s_bdev, 1, opf, GFP_NOIO); - bio->bi_iter.bi_sector = sector; - - if (op != REQ_OP_WRITE && data) - *data = (u8 *)buf + offset; - - while (io_size > 0) { - unsigned int page_offset = offset_in_page(buf); - unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset, - io_size); - - ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); - if (ret != len) { - ret = -EIO; - goto out; - } - io_size -= len; - buf = (u8 *)buf + len; - } - - ret = submit_bio_wait(bio); -out: - bio_put(bio); - return ret < 0 ? ret : 0; + return bdev_rw_virt(sb->s_bdev, sector, buf, io_size, opf); } static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd) diff --git a/fs/internal.h b/fs/internal.h index b9b3e29a73fd..393f6c5c24f6 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -66,6 +66,7 @@ int do_linkat(int olddfd, struct filename *old, int newdfd, int vfs_tmpfile(struct mnt_idmap *idmap, const struct path *parentpath, struct file *file, umode_t mode); +struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); /* * namespace.c @@ -343,3 +344,9 @@ static inline bool path_mounted(const struct path *path) void file_f_owner_release(struct file *file); bool file_seek_cur_needs_f_lock(struct file *file); int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map); +struct dentry *find_next_child(struct dentry *parent, struct dentry *prev); +int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path, + struct kstat *stat, u32 request_mask, + unsigned int query_flags); +int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + struct iattr *attr); diff --git a/fs/ioctl.c b/fs/ioctl.c index c91fd2b46a77..69107a245b4c 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -396,8 +396,8 @@ static int ioctl_fsfreeze(struct file *filp) /* Freeze */ if (sb->s_op->freeze_super) - return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE); - return freeze_super(sb, FREEZE_HOLDER_USERSPACE); + return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL); + return freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL); } static int ioctl_fsthaw(struct file *filp) @@ -409,8 +409,8 @@ static int ioctl_fsthaw(struct file *filp) /* Thaw */ if (sb->s_op->thaw_super) - return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE); - return thaw_super(sb, FREEZE_HOLDER_USERSPACE); + return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL); + return thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL); } static int ioctl_file_dedupe_range(struct file *file, @@ -821,7 +821,8 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, return ioctl_fioasync(fd, filp, argp); case FIOQSIZE: - if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) || + if (S_ISDIR(inode->i_mode) || + (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode)) || S_ISLNK(inode->i_mode)) { loff_t res = inode_get_bytes(inode); return copy_to_user(argp, &res, sizeof(res)) ? @@ -856,7 +857,7 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, return ioctl_file_dedupe_range(filp, argp); case FIONREAD: - if (!S_ISREG(inode->i_mode)) + if (!S_ISREG(inode->i_mode) || IS_ANON_FILE(inode)) return vfs_ioctl(filp, cmd, arg); return put_user(i_size_read(inode) - filp->f_pos, @@ -881,7 +882,7 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, return ioctl_get_fs_sysfs_path(filp, argp); default: - if (S_ISREG(inode->i_mode)) + if (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode)) return file_ioctl(filp, cmd, argp); break; } diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 5b08bd417b28..233abf598f65 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -679,11 +679,12 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, return submit_bio_wait(&bio); } -static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, - size_t len, struct folio *folio) +static int __iomap_write_begin(const struct iomap_iter *iter, size_t len, + struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); struct iomap_folio_state *ifs; + loff_t pos = iter->pos; loff_t block_size = i_blocksize(iter->inode); loff_t block_start = round_down(pos, block_size); loff_t block_end = round_up(pos + len, block_size); @@ -741,10 +742,13 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, return 0; } -static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, - size_t len) +static struct folio *__iomap_get_folio(struct iomap_iter *iter, size_t len) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; + loff_t pos = iter->pos; + + if (!mapping_large_folio_support(iter->inode->i_mapping)) + len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); if (folio_ops && folio_ops->get_folio) return folio_ops->get_folio(iter, pos, len); @@ -752,10 +756,11 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, return iomap_get_folio(iter, pos, len); } -static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, +static void __iomap_put_folio(struct iomap_iter *iter, size_t ret, struct folio *folio) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; + loff_t pos = iter->pos; if (folio_ops && folio_ops->put_folio) { folio_ops->put_folio(iter->inode, pos, ret, folio); @@ -765,6 +770,22 @@ static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, } } +/* trim pos and bytes to within a given folio */ +static loff_t iomap_trim_folio_range(struct iomap_iter *iter, + struct folio *folio, size_t *offset, u64 *bytes) +{ + loff_t pos = iter->pos; + size_t fsize = folio_size(folio); + + WARN_ON_ONCE(pos < folio_pos(folio)); + WARN_ON_ONCE(pos >= folio_pos(folio) + fsize); + + *offset = offset_in_folio(folio, pos); + *bytes = min(*bytes, fsize - *offset); + + return pos; +} + static int iomap_write_begin_inline(const struct iomap_iter *iter, struct folio *folio) { @@ -774,14 +795,22 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter, return iomap_read_inline_data(iter, folio); } -static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, - size_t len, struct folio **foliop) +/* + * Grab and prepare a folio for write based on iter state. Returns the folio, + * offset, and length. Callers can optionally pass a max length *plen, + * otherwise init to zero. + */ +static int iomap_write_begin(struct iomap_iter *iter, struct folio **foliop, + size_t *poffset, u64 *plen) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); + loff_t pos = iter->pos; + u64 len = min_t(u64, SIZE_MAX, iomap_length(iter)); struct folio *folio; int status = 0; + len = min_not_zero(len, *plen); BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); if (srcmap != &iter->iomap) BUG_ON(pos + len > srcmap->offset + srcmap->length); @@ -789,10 +818,7 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, if (fatal_signal_pending(current)) return -EINTR; - if (!mapping_large_folio_support(iter->inode->i_mapping)) - len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); - - folio = __iomap_get_folio(iter, pos, len); + folio = __iomap_get_folio(iter, len); if (IS_ERR(folio)) return PTR_ERR(folio); @@ -816,24 +842,24 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, } } - if (pos + len > folio_pos(folio) + folio_size(folio)) - len = folio_pos(folio) + folio_size(folio) - pos; + pos = iomap_trim_folio_range(iter, folio, poffset, &len); if (srcmap->type == IOMAP_INLINE) status = iomap_write_begin_inline(iter, folio); else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); else - status = __iomap_write_begin(iter, pos, len, folio); + status = __iomap_write_begin(iter, len, folio); if (unlikely(status)) goto out_unlock; *foliop = folio; + *plen = len; return 0; out_unlock: - __iomap_put_folio(iter, pos, 0, folio); + __iomap_put_folio(iter, 0, folio); return status; } @@ -883,10 +909,11 @@ static void iomap_write_end_inline(const struct iomap_iter *iter, * Returns true if all copied bytes have been written to the pagecache, * otherwise return false. */ -static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, - size_t copied, struct folio *folio) +static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied, + struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); + loff_t pos = iter->pos; if (srcmap->type == IOMAP_INLINE) { iomap_write_end_inline(iter, folio, pos, copied); @@ -917,14 +944,14 @@ static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) struct folio *folio; loff_t old_size; size_t offset; /* Offset into folio */ - size_t bytes; /* Bytes to write to folio */ + u64 bytes; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ u64 written; /* Bytes have been written */ - loff_t pos = iter->pos; + loff_t pos; bytes = iov_iter_count(i); retry: - offset = pos & (chunk - 1); + offset = iter->pos & (chunk - 1); bytes = min(chunk - offset, bytes); status = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); @@ -949,23 +976,21 @@ retry: break; } - status = iomap_write_begin(iter, pos, bytes, &folio); + status = iomap_write_begin(iter, &folio, &offset, &bytes); if (unlikely(status)) { - iomap_write_failed(iter->inode, pos, bytes); + iomap_write_failed(iter->inode, iter->pos, bytes); break; } if (iter->iomap.flags & IOMAP_F_STALE) break; - offset = offset_in_folio(folio, pos); - if (bytes > folio_size(folio) - offset) - bytes = folio_size(folio) - offset; + pos = iter->pos; if (mapping_writably_mapped(mapping)) flush_dcache_folio(folio); copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); - written = iomap_write_end(iter, pos, bytes, copied, folio) ? + written = iomap_write_end(iter, bytes, copied, folio) ? copied : 0; /* @@ -980,7 +1005,7 @@ retry: i_size_write(iter->inode, pos + written); iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } - __iomap_put_folio(iter, pos, written, folio); + __iomap_put_folio(iter, written, folio); if (old_size < pos) pagecache_isize_extended(iter->inode, old_size, pos); @@ -1276,22 +1301,17 @@ static int iomap_unshare_iter(struct iomap_iter *iter) do { struct folio *folio; size_t offset; - loff_t pos = iter->pos; bool ret; bytes = min_t(u64, SIZE_MAX, bytes); - status = iomap_write_begin(iter, pos, bytes, &folio); + status = iomap_write_begin(iter, &folio, &offset, &bytes); if (unlikely(status)) return status; if (iomap->flags & IOMAP_F_STALE) break; - offset = offset_in_folio(folio, pos); - if (bytes > folio_size(folio) - offset) - bytes = folio_size(folio) - offset; - - ret = iomap_write_end(iter, pos, bytes, bytes, folio); - __iomap_put_folio(iter, pos, bytes, folio); + ret = iomap_write_end(iter, bytes, bytes, folio); + __iomap_put_folio(iter, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; @@ -1351,11 +1371,10 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) do { struct folio *folio; size_t offset; - loff_t pos = iter->pos; bool ret; bytes = min_t(u64, SIZE_MAX, bytes); - status = iomap_write_begin(iter, pos, bytes, &folio); + status = iomap_write_begin(iter, &folio, &offset, &bytes); if (status) return status; if (iter->iomap.flags & IOMAP_F_STALE) @@ -1363,15 +1382,12 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) /* warn about zeroing folios beyond eof that won't write back */ WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size); - offset = offset_in_folio(folio, pos); - if (bytes > folio_size(folio) - offset) - bytes = folio_size(folio) - offset; folio_zero_range(folio, offset, bytes); folio_mark_accessed(folio); - ret = iomap_write_end(iter, pos, bytes, bytes, folio); - __iomap_put_folio(iter, pos, bytes, folio); + ret = iomap_write_end(iter, bytes, bytes, folio); + __iomap_put_folio(iter, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h index 9eab2c8ac3c5..455cc6f90be0 100644 --- a/fs/iomap/trace.h +++ b/fs/iomap/trace.h @@ -99,7 +99,11 @@ DEFINE_RANGE_EVENT(iomap_dio_rw_queued); { IOMAP_FAULT, "FAULT" }, \ { IOMAP_DIRECT, "DIRECT" }, \ { IOMAP_NOWAIT, "NOWAIT" }, \ - { IOMAP_ATOMIC, "ATOMIC" } + { IOMAP_OVERWRITE_ONLY, "OVERWRITE_ONLY" }, \ + { IOMAP_UNSHARE, "UNSHARE" }, \ + { IOMAP_DAX, "DAX" }, \ + { IOMAP_ATOMIC, "ATOMIC" }, \ + { IOMAP_DONTCACHE, "DONTCACHE" } #define IOMAP_F_FLAGS_STRINGS \ { IOMAP_F_NEW, "NEW" }, \ @@ -107,7 +111,14 @@ DEFINE_RANGE_EVENT(iomap_dio_rw_queued); { IOMAP_F_SHARED, "SHARED" }, \ { IOMAP_F_MERGED, "MERGED" }, \ { IOMAP_F_BUFFER_HEAD, "BH" }, \ - { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" } + { IOMAP_F_XATTR, "XATTR" }, \ + { IOMAP_F_BOUNDARY, "BOUNDARY" }, \ + { IOMAP_F_ANON_WRITE, "ANON_WRITE" }, \ + { IOMAP_F_ATOMIC_BIO, "ATOMIC_BIO" }, \ + { IOMAP_F_PRIVATE, "PRIVATE" }, \ + { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }, \ + { IOMAP_F_STALE, "STALE" } + #define IOMAP_DIO_STRINGS \ {IOMAP_DIO_FORCE_WAIT, "DIO_FORCE_WAIT" }, \ @@ -138,7 +149,7 @@ DECLARE_EVENT_CLASS(iomap_class, __entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0; ), TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr 0x%llx offset 0x%llx " - "length 0x%llx type %s flags %s", + "length 0x%llx type %s (0x%x) flags %s (0x%x)", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, MAJOR(__entry->bdev), MINOR(__entry->bdev), @@ -146,7 +157,9 @@ DECLARE_EVENT_CLASS(iomap_class, __entry->offset, __entry->length, __print_symbolic(__entry->type, IOMAP_TYPE_STRINGS), - __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS)) + __entry->type, + __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS), + __entry->flags) ) #define DEFINE_IOMAP_EVENT(name) \ @@ -185,7 +198,7 @@ TRACE_EVENT(iomap_writepage_map, __entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0; ), TP_printk("dev %d:%d ino 0x%llx bdev %d:%d pos 0x%llx dirty len 0x%llx " - "addr 0x%llx offset 0x%llx length 0x%llx type %s flags %s", + "addr 0x%llx offset 0x%llx length 0x%llx type %s (0x%x) flags %s (0x%x)", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, MAJOR(__entry->bdev), MINOR(__entry->bdev), @@ -195,7 +208,9 @@ TRACE_EVENT(iomap_writepage_map, __entry->offset, __entry->length, __print_symbolic(__entry->type, IOMAP_TYPE_STRINGS), - __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS)) + __entry->type, + __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS), + __entry->flags) ); TRACE_EVENT(iomap_iter, diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 5124e196c2bf..c1719b5778a1 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -62,6 +62,21 @@ const struct super_operations kernfs_sops = { .show_options = kernfs_sop_show_options, .show_path = kernfs_sop_show_path, + + /* + * sysfs is built on top of kernfs and sysfs provides the power + * management infrastructure to support suspend/hibernate by + * writing to various files in /sys/power/. As filesystems may + * be automatically frozen during suspend/hibernate implementing + * freeze/thaw support for kernfs generically will cause + * deadlocks as the suspending/hibernation initiating task will + * hold a VFS lock that it will then wait upon to be released. + * If freeze/thaw for kernfs is needed talk to the VFS. + */ + .freeze_fs = NULL, + .unfreeze_fs = NULL, + .freeze_super = NULL, + .thaw_super = NULL, }; static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, @@ -255,7 +270,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn, dput(dentry); return ERR_PTR(-ENOMEM); } - dtmp = lookup_positive_unlocked(name, dentry, strlen(name)); + dtmp = lookup_noperm_positive_unlocked(&QSTR(name), dentry); dput(dentry); kfree(name); if (IS_ERR(dtmp)) diff --git a/fs/libfs.c b/fs/libfs.c index 6393d7c49ee6..9ea0ecc325a8 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -583,7 +583,7 @@ const struct file_operations simple_offset_dir_operations = { .fsync = noop_fsync, }; -static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev) +struct dentry *find_next_child(struct dentry *parent, struct dentry *prev) { struct dentry *child = NULL, *d; @@ -603,6 +603,7 @@ static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev dput(prev); return child; } +EXPORT_SYMBOL(find_next_child); void simple_recursive_removal(struct dentry *dentry, void (*callback)(struct dentry *)) @@ -1647,10 +1648,16 @@ struct inode *alloc_anon_inode(struct super_block *s) * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; - inode->i_mode = S_IRUSR | S_IWUSR; + /* + * Historically anonymous inodes didn't have a type at all and + * userspace has come to rely on this. Internally they're just + * regular files but S_IFREG is masked off when reporting + * information to userspace. + */ + inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); - inode->i_flags |= S_PRIVATE; + inode->i_flags |= S_PRIVATE | S_ANON_INODE; simple_inode_init_ts(inode); return inode; } diff --git a/fs/mpage.c b/fs/mpage.c index ad7844de87c3..c5fd821fd30e 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -445,10 +445,9 @@ static void clean_buffers(struct folio *folio, unsigned first_unmapped) try_to_free_buffers(folio); } -static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, - void *data) +static int mpage_write_folio(struct writeback_control *wbc, struct folio *folio, + struct mpage_data *mpd) { - struct mpage_data *mpd = data; struct bio *bio = mpd->bio; struct address_space *mapping = folio->mapping; struct inode *inode = mapping->host; @@ -656,14 +655,16 @@ mpage_writepages(struct address_space *mapping, struct mpage_data mpd = { .get_block = get_block, }; + struct folio *folio = NULL; struct blk_plug plug; - int ret; + int error; blk_start_plug(&plug); - ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); + while ((folio = writeback_iter(mapping, wbc, folio, &error))) + error = mpage_write_folio(wbc, folio, &mpd); if (mpd.bio) mpage_bio_submit_write(mpd.bio); blk_finish_plug(&plug); - return ret; + return error; } EXPORT_SYMBOL(mpage_writepages); diff --git a/fs/namei.c b/fs/namei.c index 84a0e0b0111c..4bb889fc980b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -571,14 +571,14 @@ int inode_permission(struct mnt_idmap *idmap, int retval; retval = sb_permission(inode->i_sb, inode, mask); - if (retval) + if (unlikely(retval)) return retval; if (unlikely(mask & MAY_WRITE)) { /* * Nobody gets write access to an immutable file. */ - if (IS_IMMUTABLE(inode)) + if (unlikely(IS_IMMUTABLE(inode))) return -EPERM; /* @@ -586,16 +586,16 @@ int inode_permission(struct mnt_idmap *idmap, * written back improperly if their true value is unknown * to the vfs. */ - if (HAS_UNMAPPED_ID(idmap, inode)) + if (unlikely(HAS_UNMAPPED_ID(idmap, inode))) return -EACCES; } retval = do_inode_permission(idmap, inode, mask); - if (retval) + if (unlikely(retval)) return retval; retval = devcgroup_inode_permission(inode, mask); - if (retval) + if (unlikely(retval)) return retval; return security_inode_permission(inode, mask); @@ -1915,13 +1915,13 @@ static const char *pick_link(struct nameidata *nd, struct path *link, unlikely(link->mnt->mnt_flags & MNT_NOSYMFOLLOW)) return ERR_PTR(-ELOOP); - if (!(nd->flags & LOOKUP_RCU)) { + if (unlikely(atime_needs_update(&last->link, inode))) { + if (nd->flags & LOOKUP_RCU) { + if (!try_to_unlazy(nd)) + return ERR_PTR(-ECHILD); + } touch_atime(&last->link); cond_resched(); - } else if (atime_needs_update(&last->link, inode)) { - if (!try_to_unlazy(nd)) - return ERR_PTR(-ECHILD); - touch_atime(&last->link); } error = security_inode_follow_link(link->dentry, inode, @@ -2434,9 +2434,12 @@ static int link_path_walk(const char *name, struct nameidata *nd) nd->flags |= LOOKUP_PARENT; if (IS_ERR(name)) return PTR_ERR(name); - while (*name=='/') - name++; - if (!*name) { + if (*name == '/') { + do { + name++; + } while (unlikely(*name == '/')); + } + if (unlikely(!*name)) { nd->dir_mode = 0; // short-circuit the 'hardening' idiocy return 0; } @@ -2449,7 +2452,7 @@ static int link_path_walk(const char *name, struct nameidata *nd) idmap = mnt_idmap(nd->path.mnt); err = may_lookup(idmap, nd); - if (err) + if (unlikely(err)) return err; nd->last.name = name; @@ -2869,13 +2872,12 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, } EXPORT_SYMBOL(vfs_path_lookup); -static int lookup_one_common(struct mnt_idmap *idmap, - const char *name, struct dentry *base, int len, - struct qstr *this) +static int lookup_noperm_common(struct qstr *qname, struct dentry *base) { - this->name = name; - this->len = len; - this->hash = full_name_hash(base, name, len); + const char *name = qname->name; + u32 len = qname->len; + + qname->hash = full_name_hash(base, name, len); if (!len) return -EACCES; @@ -2892,139 +2894,135 @@ static int lookup_one_common(struct mnt_idmap *idmap, * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { - int err = base->d_op->d_hash(base, this); + int err = base->d_op->d_hash(base, qname); if (err < 0) return err; } + return 0; +} +static int lookup_one_common(struct mnt_idmap *idmap, + struct qstr *qname, struct dentry *base) +{ + int err; + err = lookup_noperm_common(qname, base); + if (err < 0) + return err; return inode_permission(idmap, base->d_inode, MAY_EXEC); } /** - * try_lookup_one_len - filesystem helper to lookup single pathname component - * @name: pathname component to lookup + * try_lookup_noperm - filesystem helper to lookup single pathname component + * @name: qstr storing pathname component to lookup * @base: base directory to lookup from - * @len: maximum length @len should be interpreted to * * Look up a dentry by name in the dcache, returning NULL if it does not * currently exist. The function does not try to create a dentry. * * Note that this routine is purely a helper for filesystem usage and should - * not be called by generic code. + * not be called by generic code. It does no permission checking. * * No locks need be held - only a counted reference to @base is needed. * */ -struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len) +struct dentry *try_lookup_noperm(struct qstr *name, struct dentry *base) { - struct qstr this; int err; - err = lookup_one_common(&nop_mnt_idmap, name, base, len, &this); + err = lookup_noperm_common(name, base); if (err) return ERR_PTR(err); - return lookup_dcache(&this, base, 0); + return lookup_dcache(name, base, 0); } -EXPORT_SYMBOL(try_lookup_one_len); +EXPORT_SYMBOL(try_lookup_noperm); /** - * lookup_one_len - filesystem helper to lookup single pathname component - * @name: pathname component to lookup + * lookup_noperm - filesystem helper to lookup single pathname component + * @name: qstr storing pathname component to lookup * @base: base directory to lookup from - * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should - * not be called by generic code. + * not be called by generic code. It does no permission checking. * * The caller must hold base->i_mutex. */ -struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) +struct dentry *lookup_noperm(struct qstr *name, struct dentry *base) { struct dentry *dentry; - struct qstr this; int err; WARN_ON_ONCE(!inode_is_locked(base->d_inode)); - err = lookup_one_common(&nop_mnt_idmap, name, base, len, &this); + err = lookup_noperm_common(name, base); if (err) return ERR_PTR(err); - dentry = lookup_dcache(&this, base, 0); - return dentry ? dentry : __lookup_slow(&this, base, 0); + dentry = lookup_dcache(name, base, 0); + return dentry ? dentry : __lookup_slow(name, base, 0); } -EXPORT_SYMBOL(lookup_one_len); +EXPORT_SYMBOL(lookup_noperm); /** - * lookup_one - filesystem helper to lookup single pathname component + * lookup_one - lookup single pathname component * @idmap: idmap of the mount the lookup is performed from - * @name: pathname component to lookup + * @name: qstr holding pathname component to lookup * @base: base directory to lookup from - * @len: maximum length @len should be interpreted to * - * Note that this routine is purely a helper for filesystem usage and should - * not be called by generic code. + * This can be used for in-kernel filesystem clients such as file servers. * * The caller must hold base->i_mutex. */ -struct dentry *lookup_one(struct mnt_idmap *idmap, const char *name, - struct dentry *base, int len) +struct dentry *lookup_one(struct mnt_idmap *idmap, struct qstr *name, + struct dentry *base) { struct dentry *dentry; - struct qstr this; int err; WARN_ON_ONCE(!inode_is_locked(base->d_inode)); - err = lookup_one_common(idmap, name, base, len, &this); + err = lookup_one_common(idmap, name, base); if (err) return ERR_PTR(err); - dentry = lookup_dcache(&this, base, 0); - return dentry ? dentry : __lookup_slow(&this, base, 0); + dentry = lookup_dcache(name, base, 0); + return dentry ? dentry : __lookup_slow(name, base, 0); } EXPORT_SYMBOL(lookup_one); /** - * lookup_one_unlocked - filesystem helper to lookup single pathname component + * lookup_one_unlocked - lookup single pathname component * @idmap: idmap of the mount the lookup is performed from - * @name: pathname component to lookup + * @name: qstr olding pathname component to lookup * @base: base directory to lookup from - * @len: maximum length @len should be interpreted to * - * Note that this routine is purely a helper for filesystem usage and should - * not be called by generic code. + * This can be used for in-kernel filesystem clients such as file servers. * - * Unlike lookup_one_len, it should be called without the parent - * i_mutex held, and will take the i_mutex itself if necessary. + * Unlike lookup_one, it should be called without the parent + * i_rwsem held, and will take the i_rwsem itself if necessary. */ -struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap, - const char *name, struct dentry *base, - int len) +struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap, struct qstr *name, + struct dentry *base) { - struct qstr this; int err; struct dentry *ret; - err = lookup_one_common(idmap, name, base, len, &this); + err = lookup_one_common(idmap, name, base); if (err) return ERR_PTR(err); - ret = lookup_dcache(&this, base, 0); + ret = lookup_dcache(name, base, 0); if (!ret) - ret = lookup_slow(&this, base, 0); + ret = lookup_slow(name, base, 0); return ret; } EXPORT_SYMBOL(lookup_one_unlocked); /** - * lookup_one_positive_unlocked - filesystem helper to lookup single - * pathname component + * lookup_one_positive_unlocked - lookup single pathname component * @idmap: idmap of the mount the lookup is performed from - * @name: pathname component to lookup + * @name: qstr holding pathname component to lookup * @base: base directory to lookup from - * @len: maximum length @len should be interpreted to * * This helper will yield ERR_PTR(-ENOENT) on negatives. The helper returns * known positive or ERR_PTR(). This is what most of the users want. @@ -3033,16 +3031,15 @@ EXPORT_SYMBOL(lookup_one_unlocked); * time, so callers of lookup_one_unlocked() need to be very careful; pinned * positives have >d_inode stable, so this one avoids such problems. * - * Note that this routine is purely a helper for filesystem usage and should - * not be called by generic code. + * This can be used for in-kernel filesystem clients such as file servers. * - * The helper should be called without i_mutex held. + * The helper should be called without i_rwsem held. */ struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap, - const char *name, - struct dentry *base, int len) + struct qstr *name, + struct dentry *base) { - struct dentry *ret = lookup_one_unlocked(idmap, name, base, len); + struct dentry *ret = lookup_one_unlocked(idmap, name, base); if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { dput(ret); @@ -3053,38 +3050,48 @@ struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap, EXPORT_SYMBOL(lookup_one_positive_unlocked); /** - * lookup_one_len_unlocked - filesystem helper to lookup single pathname component + * lookup_noperm_unlocked - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from - * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should - * not be called by generic code. + * not be called by generic code. It does no permission checking. * - * Unlike lookup_one_len, it should be called without the parent - * i_mutex held, and will take the i_mutex itself if necessary. + * Unlike lookup_noperm, it should be called without the parent + * i_rwsem held, and will take the i_rwsem itself if necessary. */ -struct dentry *lookup_one_len_unlocked(const char *name, - struct dentry *base, int len) +struct dentry *lookup_noperm_unlocked(struct qstr *name, struct dentry *base) { - return lookup_one_unlocked(&nop_mnt_idmap, name, base, len); + struct dentry *ret; + + ret = try_lookup_noperm(name, base); + if (!ret) + ret = lookup_slow(name, base, 0); + return ret; } -EXPORT_SYMBOL(lookup_one_len_unlocked); +EXPORT_SYMBOL(lookup_noperm_unlocked); /* - * Like lookup_one_len_unlocked(), except that it yields ERR_PTR(-ENOENT) + * Like lookup_noperm_unlocked(), except that it yields ERR_PTR(-ENOENT) * on negatives. Returns known positive or ERR_PTR(); that's what * most of the users want. Note that pinned negative with unlocked parent - * _can_ become positive at any time, so callers of lookup_one_len_unlocked() + * _can_ become positive at any time, so callers of lookup_noperm_unlocked() * need to be very careful; pinned positives have ->d_inode stable, so * this one avoids such problems. */ -struct dentry *lookup_positive_unlocked(const char *name, - struct dentry *base, int len) +struct dentry *lookup_noperm_positive_unlocked(struct qstr *name, + struct dentry *base) { - return lookup_one_positive_unlocked(&nop_mnt_idmap, name, base, len); + struct dentry *ret; + + ret = lookup_noperm_unlocked(name, base); + if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { + dput(ret); + ret = ERR_PTR(-ENOENT); + } + return ret; } -EXPORT_SYMBOL(lookup_positive_unlocked); +EXPORT_SYMBOL(lookup_noperm_positive_unlocked); #ifdef CONFIG_UNIX98_PTYS int path_pts(struct path *path) @@ -5403,25 +5410,25 @@ EXPORT_SYMBOL(vfs_get_link); static char *__page_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { - struct page *page; + struct folio *folio; struct address_space *mapping = inode->i_mapping; if (!dentry) { - page = find_get_page(mapping, 0); - if (!page) + folio = filemap_get_folio(mapping, 0); + if (IS_ERR(folio)) return ERR_PTR(-ECHILD); - if (!PageUptodate(page)) { - put_page(page); + if (!folio_test_uptodate(folio)) { + folio_put(folio); return ERR_PTR(-ECHILD); } } else { - page = read_mapping_page(mapping, 0, NULL); - if (IS_ERR(page)) - return (char*)page; + folio = read_mapping_folio(mapping, 0, NULL); + if (IS_ERR(folio)) + return ERR_CAST(folio); } - set_delayed_call(callback, page_put_link, page); + set_delayed_call(callback, page_put_link, folio); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); - return page_address(page); + return folio_address(folio); } const char *page_get_link_raw(struct dentry *dentry, struct inode *inode, @@ -5431,6 +5438,17 @@ const char *page_get_link_raw(struct dentry *dentry, struct inode *inode, } EXPORT_SYMBOL_GPL(page_get_link_raw); +/** + * page_get_link() - An implementation of the get_link inode_operation. + * @dentry: The directory entry which is the symlink. + * @inode: The inode for the symlink. + * @callback: Used to drop the reference to the symlink. + * + * Filesystems which store their symlinks in the page cache should use + * this to implement the get_link() member of their inode_operations. + * + * Return: A pointer to the NUL-terminated symlink. + */ const char *page_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { @@ -5440,12 +5458,25 @@ const char *page_get_link(struct dentry *dentry, struct inode *inode, nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } - EXPORT_SYMBOL(page_get_link); +/** + * page_put_link() - Drop the reference to the symlink. + * @arg: The folio which contains the symlink. + * + * This is used internally by page_get_link(). It is exported for use + * by filesystems which need to implement a variant of page_get_link() + * themselves. Despite the apparent symmetry, filesystems which use + * page_get_link() do not need to call page_put_link(). + * + * The argument, while it has a void pointer type, must be a pointer to + * the folio which was retrieved from the page cache. The delayed_call + * infrastructure is used to drop the reference count once the caller + * is done with the symlink. + */ void page_put_link(void *arg) { - put_page(arg); + folio_put(arg); } EXPORT_SYMBOL(page_put_link); diff --git a/fs/namespace.c b/fs/namespace.c index 1b466c54a357..552ad7f4d18b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -355,12 +355,13 @@ static struct mount *alloc_vfsmnt(const char *name) if (err) goto out_free_cache; - if (name) { + if (name) mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL_ACCOUNT); - if (!mnt->mnt_devname) - goto out_free_id; - } + else + mnt->mnt_devname = "none"; + if (!mnt->mnt_devname) + goto out_free_id; #ifdef CONFIG_SMP mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); @@ -1264,7 +1265,7 @@ struct vfsmount *vfs_create_mount(struct fs_context *fc) if (!fc->root) return ERR_PTR(-EINVAL); - mnt = alloc_vfsmnt(fc->source ?: "none"); + mnt = alloc_vfsmnt(fc->source); if (!mnt) return ERR_PTR(-ENOMEM); @@ -5491,7 +5492,7 @@ static int statmount_sb_source(struct kstatmount *s, struct seq_file *seq) seq->buf[seq->count] = '\0'; seq->count = start; seq_commit(seq, string_unescape_inplace(seq->buf + start, UNESCAPE_OCTAL)); - } else if (r->mnt_devname) { + } else { seq_puts(seq, r->mnt_devname); } return 0; @@ -5804,7 +5805,9 @@ static int grab_requested_root(struct mnt_namespace *ns, struct path *root) STATMOUNT_SB_SOURCE | \ STATMOUNT_OPT_ARRAY | \ STATMOUNT_OPT_SEC_ARRAY | \ - STATMOUNT_SUPPORTED_MASK) + STATMOUNT_SUPPORTED_MASK | \ + STATMOUNT_MNT_UIDMAP | \ + STATMOUNT_MNT_GIDMAP) static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, struct mnt_namespace *ns) @@ -5839,13 +5842,29 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id, return err; s->root = root; - s->idmap = mnt_idmap(s->mnt); - if (s->mask & STATMOUNT_SB_BASIC) - statmount_sb_basic(s); + /* + * Note that mount properties in mnt->mnt_flags, mnt->mnt_idmap + * can change concurrently as we only hold the read-side of the + * namespace semaphore and mount properties may change with only + * the mount lock held. + * + * We could sample the mount lock sequence counter to detect + * those changes and retry. But it's not worth it. Worst that + * happens is that the mnt->mnt_idmap pointer is already changed + * while mnt->mnt_flags isn't or vica versa. So what. + * + * Both mnt->mnt_flags and mnt->mnt_idmap are set and retrieved + * via READ_ONCE()/WRITE_ONCE() and guard against theoretical + * torn read/write. That's all we care about right now. + */ + s->idmap = mnt_idmap(s->mnt); if (s->mask & STATMOUNT_MNT_BASIC) statmount_mnt_basic(s); + if (s->mask & STATMOUNT_SB_BASIC) + statmount_sb_basic(s); + if (s->mask & STATMOUNT_PROPAGATE_FROM) statmount_propagate_from(s); @@ -6157,6 +6176,10 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, !ns_capable_noaudit(ns->user_ns, CAP_SYS_ADMIN)) return -ENOENT; + /* + * We only need to guard against mount topology changes as + * listmount() doesn't care about any mount properties. + */ scoped_guard(rwsem_read, &namespace_sem) ret = do_listmount(ns, kreq.mnt_id, last_mnt_id, kmnt_ids, nr_mnt_ids, (flags & LISTMOUNT_REVERSE)); diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 1c62a5a9f51d..58146e935402 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c @@ -40,31 +40,31 @@ static const char *nfs_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { - struct page *page; + struct folio *folio; void *err; if (!dentry) { err = ERR_PTR(nfs_revalidate_mapping_rcu(inode)); if (err) return err; - page = find_get_page(inode->i_mapping, 0); - if (!page) + folio = filemap_get_folio(inode->i_mapping, 0); + if (IS_ERR(folio)) return ERR_PTR(-ECHILD); - if (!PageUptodate(page)) { - put_page(page); + if (!folio_test_uptodate(folio)) { + folio_put(folio); return ERR_PTR(-ECHILD); } } else { err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping)); if (err) return err; - page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler, + folio = read_cache_folio(&inode->i_data, 0, nfs_symlink_filler, NULL); - if (IS_ERR(page)) - return ERR_CAST(page); + if (IS_ERR(folio)) + return ERR_CAST(folio); } - set_delayed_call(done, page_put_link, page); - return page_address(page); + set_delayed_call(done, page_put_link, folio); + return folio_address(folio); } /* diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index bf77399696a7..b55467911648 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -464,18 +464,17 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry) sdentry = NULL; do { - int slen; dput(sdentry); sillycounter++; - slen = scnprintf(silly, sizeof(silly), - SILLYNAME_PREFIX "%0*llx%0*x", - SILLYNAME_FILEID_LEN, fileid, - SILLYNAME_COUNTER_LEN, sillycounter); + scnprintf(silly, sizeof(silly), + SILLYNAME_PREFIX "%0*llx%0*x", + SILLYNAME_FILEID_LEN, fileid, + SILLYNAME_COUNTER_LEN, sillycounter); dfprintk(VFS, "NFS: trying to rename %pd to %s\n", dentry, silly); - sdentry = lookup_one_len(silly, dentry->d_parent, slen); + sdentry = lookup_noperm(&QSTR(silly), dentry->d_parent); /* * N.B. Better to return EBUSY here ... it could be * dangerous to delete the file while it's in use. diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 372bdcf5e07a..ac1731eb34ab 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -284,7 +284,9 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp, inode_lock_nested(inode, I_MUTEX_PARENT); - child = lookup_one_len(argp->name, parent, argp->len); + child = lookup_one(&nop_mnt_idmap, + &QSTR_LEN(argp->name, argp->len), + parent); if (IS_ERR(child)) { status = nfserrno(PTR_ERR(child)); goto out; diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index a7a07470c1f8..ef4971d71ac4 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -1001,7 +1001,9 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, } else dchild = dget(dparent); } else - dchild = lookup_positive_unlocked(name, dparent, namlen); + dchild = lookup_one_positive_unlocked(&nop_mnt_idmap, + &QSTR_LEN(name, namlen), + dparent); if (IS_ERR(dchild)) return rv; if (d_mountpoint(dchild)) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index b397246dae7b..fd560dcf6059 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -266,7 +266,9 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp, inode_lock_nested(inode, I_MUTEX_PARENT); - child = lookup_one_len(open->op_fname, parent, open->op_fnamelen); + child = lookup_one(&nop_mnt_idmap, + &QSTR_LEN(open->op_fname, open->op_fnamelen), + parent); if (IS_ERR(child)) { status = nfserrno(PTR_ERR(child)); goto out; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index c1d9bd07285f..acde3edab733 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -218,7 +218,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) /* lock the parent */ inode_lock(d_inode(dir)); - dentry = lookup_one_len(dname, dir, HEXDIR_LEN-1); + dentry = lookup_one(&nop_mnt_idmap, &QSTR(dname), dir); if (IS_ERR(dentry)) { status = PTR_ERR(dentry); goto out_unlock; @@ -316,7 +316,8 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) list_for_each_entry_safe(entry, tmp, &ctx.names, list) { if (!status) { struct dentry *dentry; - dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1); + dentry = lookup_one(&nop_mnt_idmap, + &QSTR(entry->name), dir); if (IS_ERR(dentry)) { status = PTR_ERR(dentry); break; @@ -339,16 +340,16 @@ nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn) } static int -nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn) +nfsd4_unlink_clid_dir(char *name, struct nfsd_net *nn) { struct dentry *dir, *dentry; int status; - dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name); + dprintk("NFSD: nfsd4_unlink_clid_dir. name %s\n", name); dir = nn->rec_file->f_path.dentry; inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); - dentry = lookup_one_len(name, dir, namlen); + dentry = lookup_one(&nop_mnt_idmap, &QSTR(name), dir); if (IS_ERR(dentry)) { status = PTR_ERR(dentry); goto out_unlock; @@ -408,7 +409,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp) if (status < 0) goto out_drop_write; - status = nfsd4_unlink_clid_dir(dname, HEXDIR_LEN-1, nn); + status = nfsd4_unlink_clid_dir(dname, nn); nfs4_reset_creds(original_cred); if (status == 0) { vfs_fsync(nn->rec_file, 0); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index e67420729ecd..fe876395985a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3812,7 +3812,9 @@ nfsd4_encode_entry4_fattr(struct nfsd4_readdir *cd, const char *name, __be32 nfserr; int ignore_crossmnt = 0; - dentry = lookup_positive_unlocked(name, cd->rd_fhp->fh_dentry, namlen); + dentry = lookup_one_positive_unlocked(&nop_mnt_idmap, + &QSTR_LEN(name, namlen), + cd->rd_fhp->fh_dentry); if (IS_ERR(dentry)) return nfserrno(PTR_ERR(dentry)); diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 6dda081eb24c..6370ac0a85fd 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -312,7 +312,8 @@ nfsd_proc_create(struct svc_rqst *rqstp) } inode_lock_nested(dirfhp->fh_dentry->d_inode, I_MUTEX_PARENT); - dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len); + dchild = lookup_one(&nop_mnt_idmap, &QSTR_LEN(argp->name, argp->len), + dirfhp->fh_dentry); if (IS_ERR(dchild)) { resp->status = nfserrno(PTR_ERR(dchild)); goto out_unlock; @@ -331,7 +332,7 @@ nfsd_proc_create(struct svc_rqst *rqstp) */ resp->status = nfserr_acces; if (!newfhp->fh_dentry) { - printk(KERN_WARNING + printk(KERN_WARNING "nfsd_proc_create: file handle not verified\n"); goto out_unlock; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 9abdc4b75813..160a839af405 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -264,7 +264,8 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out_nfserr; } } else { - dentry = lookup_one_len_unlocked(name, dparent, len); + dentry = lookup_one_unlocked(&nop_mnt_idmap, + &QSTR_LEN(name, len), dparent); host_err = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_nfserr; @@ -922,7 +923,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, * directories, but we never have and it doesn't seem to have * caused anyone a problem. If we were to change this, note * also that our filldir callbacks would need a variant of - * lookup_one_len that doesn't check permissions. + * lookup_one_positive_unlocked() that doesn't check permissions. */ if (type == S_IFREG) may_flags |= NFSD_MAY_OWNER_OVERRIDE; @@ -1554,7 +1555,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, return nfserrno(host_err); inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT); - dchild = lookup_one_len(fname, dentry, flen); + dchild = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), dentry); host_err = PTR_ERR(dchild); if (IS_ERR(dchild)) { err = nfserrno(host_err); @@ -1659,7 +1660,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, dentry = fhp->fh_dentry; inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT); - dnew = lookup_one_len(fname, dentry, flen); + dnew = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), dentry); if (IS_ERR(dnew)) { err = nfserrno(PTR_ERR(dnew)); inode_unlock(dentry->d_inode); @@ -1734,7 +1735,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, dirp = d_inode(ddir); inode_lock_nested(dirp, I_MUTEX_PARENT); - dnew = lookup_one_len(name, ddir, len); + dnew = lookup_one(&nop_mnt_idmap, &QSTR_LEN(name, len), ddir); if (IS_ERR(dnew)) { host_err = PTR_ERR(dnew); goto out_unlock; @@ -1867,7 +1868,7 @@ retry: if (err != nfs_ok) goto out_unlock; - odentry = lookup_one_len(fname, fdentry, flen); + odentry = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), fdentry); host_err = PTR_ERR(odentry); if (IS_ERR(odentry)) goto out_nfserr; @@ -1880,7 +1881,7 @@ retry: goto out_dput_old; type = d_inode(odentry)->i_mode & S_IFMT; - ndentry = lookup_one_len(tname, tdentry, tlen); + ndentry = lookup_one(&nop_mnt_idmap, &QSTR_LEN(tname, tlen), tdentry); host_err = PTR_ERR(ndentry); if (IS_ERR(ndentry)) goto out_dput_old; @@ -1998,7 +1999,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, dirp = d_inode(dentry); inode_lock_nested(dirp, I_MUTEX_PARENT); - rdentry = lookup_one_len(fname, dentry, flen); + rdentry = lookup_one(&nop_mnt_idmap, &QSTR_LEN(fname, flen), dentry); host_err = PTR_ERR(rdentry); if (IS_ERR(rdentry)) goto out_unlock; diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index d6cd81163030..135c49c5d848 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -9,12 +9,13 @@ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/cred.h> -#include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/vmalloc.h> #include <linux/writeback.h> #include <linux/seq_file.h> #include <linux/crc-itu-t.h> +#include <linux/fs_context.h> +#include <linux/fs_parser.h> #include "omfs.h" MODULE_AUTHOR("Bob Copeland <me@bobcopeland.com>"); @@ -384,79 +385,83 @@ nomem: return -ENOMEM; } +struct omfs_mount_options { + kuid_t s_uid; + kgid_t s_gid; + int s_dmask; + int s_fmask; +}; + enum { - Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err + Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, }; -static const match_table_t tokens = { - {Opt_uid, "uid=%u"}, - {Opt_gid, "gid=%u"}, - {Opt_umask, "umask=%o"}, - {Opt_dmask, "dmask=%o"}, - {Opt_fmask, "fmask=%o"}, - {Opt_err, NULL}, +static const struct fs_parameter_spec omfs_param_spec[] = { + fsparam_uid ("uid", Opt_uid), + fsparam_gid ("gid", Opt_gid), + fsparam_u32oct ("umask", Opt_umask), + fsparam_u32oct ("dmask", Opt_dmask), + fsparam_u32oct ("fmask", Opt_fmask), + {} }; -static int parse_options(char *options, struct omfs_sb_info *sbi) +static int +omfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { - char *p; - substring_t args[MAX_OPT_ARGS]; - int option; - - if (!options) - return 1; - - while ((p = strsep(&options, ",")) != NULL) { - int token; - if (!*p) - continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_uid: - if (match_int(&args[0], &option)) - return 0; - sbi->s_uid = make_kuid(current_user_ns(), option); - if (!uid_valid(sbi->s_uid)) - return 0; - break; - case Opt_gid: - if (match_int(&args[0], &option)) - return 0; - sbi->s_gid = make_kgid(current_user_ns(), option); - if (!gid_valid(sbi->s_gid)) - return 0; - break; - case Opt_umask: - if (match_octal(&args[0], &option)) - return 0; - sbi->s_fmask = sbi->s_dmask = option; - break; - case Opt_dmask: - if (match_octal(&args[0], &option)) - return 0; - sbi->s_dmask = option; - break; - case Opt_fmask: - if (match_octal(&args[0], &option)) - return 0; - sbi->s_fmask = option; - break; - default: - return 0; - } + struct omfs_mount_options *opts = fc->fs_private; + int token; + struct fs_parse_result result; + + /* All options are ignored on remount */ + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) + return 0; + + token = fs_parse(fc, omfs_param_spec, param, &result); + if (token < 0) + return token; + + switch (token) { + case Opt_uid: + opts->s_uid = result.uid; + break; + case Opt_gid: + opts->s_gid = result.gid; + break; + case Opt_umask: + opts->s_fmask = opts->s_dmask = result.uint_32; + break; + case Opt_dmask: + opts->s_dmask = result.uint_32; + break; + case Opt_fmask: + opts->s_fmask = result.uint_32; + break; + default: + return -EINVAL; } - return 1; + + return 0; } -static int omfs_fill_super(struct super_block *sb, void *data, int silent) +static void +omfs_set_options(struct omfs_sb_info *sbi, struct omfs_mount_options *opts) +{ + sbi->s_uid = opts->s_uid; + sbi->s_gid = opts->s_gid; + sbi->s_dmask = opts->s_dmask; + sbi->s_fmask = opts->s_fmask; +} + +static int omfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct buffer_head *bh, *bh2; struct omfs_super_block *omfs_sb; struct omfs_root_block *omfs_rb; struct omfs_sb_info *sbi; struct inode *root; + struct omfs_mount_options *parsed_opts = fc->fs_private; int ret = -EINVAL; + int silent = fc->sb_flags & SB_SILENT; sbi = kzalloc(sizeof(struct omfs_sb_info), GFP_KERNEL); if (!sbi) @@ -464,12 +469,7 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_fs_info = sbi; - sbi->s_uid = current_uid(); - sbi->s_gid = current_gid(); - sbi->s_dmask = sbi->s_fmask = current_umask(); - - if (!parse_options((char *) data, sbi)) - goto end; + omfs_set_options(sbi, parsed_opts); sb->s_maxbytes = 0xffffffff; @@ -594,18 +594,50 @@ end: return ret; } -static struct dentry *omfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int omfs_get_tree(struct fs_context *fc) +{ + return get_tree_bdev(fc, omfs_fill_super); +} + +static void omfs_free_fc(struct fs_context *fc); + +static const struct fs_context_operations omfs_context_ops = { + .parse_param = omfs_parse_param, + .get_tree = omfs_get_tree, + .free = omfs_free_fc, +}; + +static int omfs_init_fs_context(struct fs_context *fc) +{ + struct omfs_mount_options *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return -ENOMEM; + + /* Set mount options defaults */ + opts->s_uid = current_uid(); + opts->s_gid = current_gid(); + opts->s_dmask = opts->s_fmask = current_umask(); + + fc->fs_private = opts; + fc->ops = &omfs_context_ops; + + return 0; +} + +static void omfs_free_fc(struct fs_context *fc) { - return mount_bdev(fs_type, flags, dev_name, data, omfs_fill_super); + kfree(fc->fs_private); } static struct file_system_type omfs_fs_type = { - .owner = THIS_MODULE, - .name = "omfs", - .mount = omfs_mount, - .kill_sb = kill_block_super, - .fs_flags = FS_REQUIRES_DEV, + .owner = THIS_MODULE, + .name = "omfs", + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, + .init_fs_context = omfs_init_fs_context, + .parameters = omfs_param_spec, }; MODULE_ALIAS_FS("omfs"); diff --git a/fs/open.c b/fs/open.c index a9063cca9911..7828234a7caa 100644 --- a/fs/open.c +++ b/fs/open.c @@ -60,7 +60,10 @@ int do_truncate(struct mnt_idmap *idmap, struct dentry *dentry, if (ret) newattrs.ia_valid |= ret | ATTR_FORCE; - inode_lock(dentry->d_inode); + ret = inode_lock_killable(dentry->d_inode); + if (ret) + return ret; + /* Note any delegations or leases have already been broken: */ ret = notify_change(idmap, dentry, &newattrs, NULL); inode_unlock(dentry->d_inode); @@ -635,7 +638,9 @@ int chmod_common(const struct path *path, umode_t mode) if (error) return error; retry_deleg: - inode_lock(inode); + error = inode_lock_killable(inode); + if (error) + goto out_mnt_unlock; error = security_path_chmod(path, mode); if (error) goto out_unlock; @@ -650,6 +655,7 @@ out_unlock: if (!error) goto retry_deleg; } +out_mnt_unlock: mnt_drop_write(path->mnt); return error; } @@ -769,7 +775,9 @@ retry_deleg: return -EINVAL; if ((group != (gid_t)-1) && !setattr_vfsgid(&newattrs, gid)) return -EINVAL; - inode_lock(inode); + error = inode_lock_killable(inode); + if (error) + return error; if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_PRIV | setattr_should_drop_sgid(idmap, inode); diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 5ac743c6bc2e..08a6f372a352 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -32,12 +32,13 @@ static int orangefs_writepage_locked(struct folio *folio, len = i_size_read(inode); if (folio->private) { wr = folio->private; - WARN_ON(wr->pos >= len); off = wr->pos; - if (off + wr->len > len) + if ((off + wr->len > len) && (off <= len)) wlen = len - off; else wlen = wr->len; + if (wlen == 0) + wlen = wr->len; } else { WARN_ON(1); off = folio_pos(folio); @@ -46,8 +47,6 @@ static int orangefs_writepage_locked(struct folio *folio, if (wlen > len - off) wlen = len - off; } - /* Should've been handled in orangefs_invalidate_folio. */ - WARN_ON(off == len || off + wlen > len); WARN_ON(wlen == 0); bvec_set_folio(&bv, folio, wlen, offset_in_folio(folio, off)); @@ -320,6 +319,8 @@ static int orangefs_write_begin(struct file *file, wr->len += len; goto okay; } else { + wr->pos = pos; + wr->len = len; ret = orangefs_launder_folio(folio); if (ret) return ret; diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index 444aeeccb6da..83f80fdb1567 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -385,11 +385,9 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected, */ take_dentry_name_snapshot(&name, real); /* - * No idmap handling here: it's an internal lookup. Could skip - * permission checking altogether, but for now just use non-idmap - * transformed ids. + * No idmap handling here: it's an internal lookup. */ - this = lookup_one_len(name.name.name, connected, name.name.len); + this = lookup_noperm(&name.name, connected); release_dentry_name_snapshot(&name); err = PTR_ERR(this); if (IS_ERR(this)) { diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index be5c65d6f848..bf722daf19a9 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -205,8 +205,8 @@ static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d, struct dentry *base, int len, bool drop_negative) { - struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), name, - base, len); + struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), + &QSTR_LEN(name, len), base); if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { if (drop_negative && ret->d_lockref.count == 1) { @@ -757,7 +757,7 @@ struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh) if (err) return ERR_PTR(err); - index = lookup_positive_unlocked(name.name, ofs->workdir, name.len); + index = lookup_noperm_positive_unlocked(&name, ofs->workdir); kfree(name.name); if (IS_ERR(index)) { if (PTR_ERR(index) == -ENOENT) @@ -789,8 +789,8 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper, if (err) return ERR_PTR(err); - index = lookup_one_positive_unlocked(ovl_upper_mnt_idmap(ofs), name.name, - ofs->workdir, name.len); + index = lookup_one_positive_unlocked(ovl_upper_mnt_idmap(ofs), &name, + ofs->workdir); if (IS_ERR(index)) { err = PTR_ERR(index); if (err == -ENOENT) { @@ -1371,7 +1371,7 @@ out: bool ovl_lower_positive(struct dentry *dentry) { struct ovl_entry *poe = OVL_E(dentry->d_parent); - const struct qstr *name = &dentry->d_name; + struct qstr *name = &dentry->d_name; const struct cred *old_cred; unsigned int i; bool positive = false; @@ -1396,7 +1396,7 @@ bool ovl_lower_positive(struct dentry *dentry) this = lookup_one_positive_unlocked( mnt_idmap(parentpath->layer->mnt), - name->name, parentpath->dentry, name->len); + name, parentpath->dentry); if (IS_ERR(this)) { switch (PTR_ERR(this)) { case -ENOENT: diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index aef942a758ce..8baaba0a3fe5 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -402,7 +402,7 @@ static inline struct dentry *ovl_lookup_upper(struct ovl_fs *ofs, const char *name, struct dentry *base, int len) { - return lookup_one(ovl_upper_mnt_idmap(ofs), name, base, len); + return lookup_one(ovl_upper_mnt_idmap(ofs), &QSTR_LEN(name, len), base); } static inline bool ovl_open_flags_need_copy_up(int flags) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 881ec5592da5..44e208da417c 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -271,7 +271,6 @@ static bool ovl_fill_merge(struct dir_context *ctx, const char *name, static int ovl_check_whiteouts(const struct path *path, struct ovl_readdir_data *rdd) { int err; - struct ovl_cache_entry *p; struct dentry *dentry, *dir = path->dentry; const struct cred *old_cred; @@ -280,9 +279,11 @@ static int ovl_check_whiteouts(const struct path *path, struct ovl_readdir_data err = down_write_killable(&dir->d_inode->i_rwsem); if (!err) { while (rdd->first_maybe_whiteout) { - p = rdd->first_maybe_whiteout; + struct ovl_cache_entry *p = + rdd->first_maybe_whiteout; rdd->first_maybe_whiteout = p->next_maybe_whiteout; - dentry = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len); + dentry = lookup_one(mnt_idmap(path->mnt), + &QSTR_LEN(p->name, p->len), dir); if (!IS_ERR(dentry)) { p->is_whiteout = ovl_is_whiteout(dentry); dput(dentry); @@ -351,6 +352,7 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list, struct path realpath; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_merge, + .ctx.count = INT_MAX, .dentry = dentry, .list = list, .root = root, @@ -492,7 +494,7 @@ static int ovl_cache_update(const struct path *path, struct ovl_cache_entry *p, } } /* This checks also for xwhiteouts */ - this = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len); + this = lookup_one(mnt_idmap(path->mnt), &QSTR_LEN(p->name, p->len), dir); if (IS_ERR_OR_NULL(this) || !this->d_inode) { /* Mark a stale entry */ p->is_whiteout = true; @@ -571,6 +573,7 @@ static int ovl_dir_read_impure(const struct path *path, struct list_head *list, struct ovl_cache_entry *p, *n; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, + .ctx.count = INT_MAX, .list = list, .root = root, }; @@ -672,6 +675,7 @@ static bool ovl_fill_real(struct dir_context *ctx, const char *name, struct ovl_readdir_translate *rdt = container_of(ctx, struct ovl_readdir_translate, ctx); struct dir_context *orig_ctx = rdt->orig_ctx; + bool res; if (rdt->parent_ino && strcmp(name, "..") == 0) { ino = rdt->parent_ino; @@ -686,7 +690,10 @@ static bool ovl_fill_real(struct dir_context *ctx, const char *name, name, namelen, rdt->xinowarn); } - return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); + res = orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); + ctx->count = orig_ctx->count; + + return res; } static bool ovl_is_impure_dir(struct file *file) @@ -713,6 +720,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx) const struct ovl_layer *lower_layer = ovl_layer_lower(dir); struct ovl_readdir_translate rdt = { .ctx.actor = ovl_fill_real, + .ctx.count = ctx->count, .orig_ctx = ctx, .xinobits = ovl_xino_bits(ofs), .xinowarn = ovl_xino_warn(ofs), @@ -1073,6 +1081,7 @@ int ovl_check_d_type_supported(const struct path *realpath) int err; struct ovl_readdir_data rdd = { .ctx.actor = ovl_check_d_type, + .ctx.count = INT_MAX, .d_type_supported = false, }; @@ -1094,6 +1103,7 @@ static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *pa struct ovl_cache_entry *p; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, + .ctx.count = INT_MAX, .list = &list, }; bool incompat = false; @@ -1178,6 +1188,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs) struct ovl_cache_entry *p; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, + .ctx.count = INT_MAX, .list = &list, }; diff --git a/fs/pidfs.c b/fs/pidfs.c index d64a4cbeb0da..c1f0a067be40 100644 --- a/fs/pidfs.c +++ b/fs/pidfs.c @@ -20,6 +20,7 @@ #include <linux/time_namespace.h> #include <linux/utsname.h> #include <net/net_namespace.h> +#include <linux/coredump.h> #include "internal.h" #include "mount.h" @@ -33,6 +34,7 @@ static struct kmem_cache *pidfs_cachep __ro_after_init; struct pidfs_exit_info { __u64 cgroupid; __s32 exit_code; + __u32 coredump_mask; }; struct pidfs_inode { @@ -240,6 +242,22 @@ static inline bool pid_in_current_pidns(const struct pid *pid) return false; } +static __u32 pidfs_coredump_mask(unsigned long mm_flags) +{ + switch (__get_dumpable(mm_flags)) { + case SUID_DUMP_USER: + return PIDFD_COREDUMP_USER; + case SUID_DUMP_ROOT: + return PIDFD_COREDUMP_ROOT; + case SUID_DUMP_DISABLE: + return PIDFD_COREDUMP_SKIP; + default: + WARN_ON_ONCE(true); + } + + return 0; +} + static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) { struct pidfd_info __user *uinfo = (struct pidfd_info __user *)arg; @@ -280,6 +298,11 @@ static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) } } + if (mask & PIDFD_INFO_COREDUMP) { + kinfo.mask |= PIDFD_INFO_COREDUMP; + kinfo.coredump_mask = READ_ONCE(pidfs_i(inode)->__pei.coredump_mask); + } + task = get_pid_task(pid, PIDTYPE_PID); if (!task) { /* @@ -296,6 +319,13 @@ static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg) if (!c) return -ESRCH; + if (!(kinfo.mask & PIDFD_INFO_COREDUMP)) { + task_lock(task); + if (task->mm) + kinfo.coredump_mask = pidfs_coredump_mask(task->mm->flags); + task_unlock(task); + } + /* Unconditionally return identifiers and credentials, the rest only on request */ user_ns = current_user_ns(); @@ -559,6 +589,31 @@ void pidfs_exit(struct task_struct *tsk) } } +#ifdef CONFIG_COREDUMP +void pidfs_coredump(const struct coredump_params *cprm) +{ + struct pid *pid = cprm->pid; + struct pidfs_exit_info *exit_info; + struct dentry *dentry; + struct inode *inode; + __u32 coredump_mask = 0; + + dentry = pid->stashed; + if (WARN_ON_ONCE(!dentry)) + return; + + inode = d_inode(dentry); + exit_info = &pidfs_i(inode)->__pei; + /* Note how we were coredumped. */ + coredump_mask = pidfs_coredump_mask(cprm->mm_flags); + /* Note that we actually did coredump. */ + coredump_mask |= PIDFD_COREDUMPED; + /* If coredumping is set to skip we should never end up here. */ + VFS_WARN_ON_ONCE(coredump_mask & PIDFD_COREDUMP_SKIP); + smp_store_release(&exit_info->coredump_mask, coredump_mask); +} +#endif + static struct vfsmount *pidfs_mnt __ro_after_init; /* @@ -569,36 +624,14 @@ static struct vfsmount *pidfs_mnt __ro_after_init; static int pidfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { - return -EOPNOTSUPP; + return anon_inode_setattr(idmap, dentry, attr); } - -/* - * User space expects pidfs inodes to have no file type in st_mode. - * - * In particular, 'lsof' has this legacy logic: - * - * type = s->st_mode & S_IFMT; - * switch (type) { - * ... - * case 0: - * if (!strcmp(p, "anon_inode")) - * Lf->ntype = Ntype = N_ANON_INODE; - * - * to detect our old anon_inode logic. - * - * Rather than mess with our internal sane inode data, just fix it - * up here in getattr() by masking off the format bits. - */ static int pidfs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { - struct inode *inode = d_inode(path->dentry); - - generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); - stat->mode &= ~S_IFMT; - return 0; + return anon_inode_getattr(idmap, path, stat, request_mask, query_flags); } static const struct inode_operations pidfs_inode_operations = { @@ -768,7 +801,7 @@ static inline bool pidfs_pid_valid(struct pid *pid, const struct path *path, { enum pid_type type; - if (flags & PIDFD_CLONE) + if (flags & PIDFD_STALE) return true; /* @@ -777,10 +810,14 @@ static inline bool pidfs_pid_valid(struct pid *pid, const struct path *path, * pidfd has been allocated perform another check that the pid * is still alive. If it is exit information is available even * if the task gets reaped before the pidfd is returned to - * userspace. The only exception is PIDFD_CLONE where no task - * linkage has been established for @pid yet and the kernel is - * in the middle of process creation so there's nothing for - * pidfs to miss. + * userspace. The only exception are indicated by PIDFD_STALE: + * + * (1) The kernel is in the middle of task creation and thus no + * task linkage has been established yet. + * (2) The caller knows @pid has been registered in pidfs at a + * time when the task was still alive. + * + * In both cases exit information will have been reported. */ if (flags & PIDFD_THREAD) type = PIDTYPE_PID; @@ -826,7 +863,7 @@ static int pidfs_init_inode(struct inode *inode, void *data) const struct pid *pid = data; inode->i_private = data; - inode->i_flags |= S_PRIVATE; + inode->i_flags |= S_PRIVATE | S_ANON_INODE; inode->i_mode |= S_IRWXU; inode->i_op = &pidfs_inode_operations; inode->i_fop = &pidfs_file_operations; @@ -874,11 +911,11 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags) int ret; /* - * Ensure that PIDFD_CLONE can be passed as a flag without + * Ensure that PIDFD_STALE can be passed as a flag without * overloading other uapi pidfd flags. */ - BUILD_BUG_ON(PIDFD_CLONE == PIDFD_THREAD); - BUILD_BUG_ON(PIDFD_CLONE == PIDFD_NONBLOCK); + BUILD_BUG_ON(PIDFD_STALE == PIDFD_THREAD); + BUILD_BUG_ON(PIDFD_STALE == PIDFD_NONBLOCK); ret = path_from_stashed(&pid->stashed, pidfs_mnt, get_pid(pid), &path); if (ret < 0) @@ -887,7 +924,8 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags) if (!pidfs_pid_valid(pid, &path, flags)) return ERR_PTR(-ESRCH); - flags &= ~PIDFD_CLONE; + flags &= ~PIDFD_STALE; + flags |= O_RDWR; pidfd_file = dentry_open(&path, flags, current_cred()); /* Raise PIDFD_THREAD explicitly as do_dentry_open() strips it. */ if (!IS_ERR(pidfd_file)) @@ -896,6 +934,65 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags) return pidfd_file; } +/** + * pidfs_register_pid - register a struct pid in pidfs + * @pid: pid to pin + * + * Register a struct pid in pidfs. Needs to be paired with + * pidfs_put_pid() to not risk leaking the pidfs dentry and inode. + * + * Return: On success zero, on error a negative error code is returned. + */ +int pidfs_register_pid(struct pid *pid) +{ + struct path path __free(path_put) = {}; + int ret; + + might_sleep(); + + if (!pid) + return 0; + + ret = path_from_stashed(&pid->stashed, pidfs_mnt, get_pid(pid), &path); + if (unlikely(ret)) + return ret; + /* Keep the dentry and only put the reference to the mount. */ + path.dentry = NULL; + return 0; +} + +/** + * pidfs_get_pid - pin a struct pid through pidfs + * @pid: pid to pin + * + * Similar to pidfs_register_pid() but only valid if the caller knows + * there's a reference to the @pid through a dentry already that can't + * go away. + */ +void pidfs_get_pid(struct pid *pid) +{ + if (!pid) + return; + WARN_ON_ONCE(!stashed_dentry_get(&pid->stashed)); +} + +/** + * pidfs_put_pid - drop a pidfs reference + * @pid: pid to drop + * + * Drop a reference to @pid via pidfs. This is only safe if the + * reference has been taken via pidfs_get_pid(). + */ +void pidfs_put_pid(struct pid *pid) +{ + might_sleep(); + + if (!pid) + return; + VFS_WARN_ON_ONCE(!pid->stashed); + dput(pid->stashed); +} + static void pidfs_inode_init_once(void *data) { struct pidfs_inode *pi = data; diff --git a/fs/proc/base.c b/fs/proc/base.c index b0d4e1908b22..fe33a5843fbd 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2121,7 +2121,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, unsigned type = DT_UNKNOWN; ino_t ino = 1; - child = d_hash_and_lookup(dir, &qname); + child = try_lookup_noperm(&qname, dir); if (!child) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); child = d_alloc_parallel(dir, &qname, &wq); diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 83be312159c9..bc2bc60c36cc 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -120,8 +120,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) global_node_page_state(NR_SECONDARY_PAGETABLE)); show_val_kb(m, "NFS_Unstable: ", 0); - show_val_kb(m, "Bounce: ", - global_zone_page_state(NR_BOUNCE)); + show_val_kb(m, "Bounce: ", 0); show_val_kb(m, "WritebackTmp: ", global_node_page_state(NR_WRITEBACK_TEMP)); show_val_kb(m, "CommitLimit: ", vm_commit_limit()); diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index e133b507ddf3..5c555db68aa2 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -111,7 +111,7 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) if (err) goto out; } else { - mangle(m, r->mnt_devname ? r->mnt_devname : "none"); + mangle(m, r->mnt_devname); } seq_putc(m, ' '); /* mountpoints outside of chroot jail will give SEQ_SKIP on this */ @@ -177,7 +177,7 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) if (err) goto out; } else { - mangle(m, r->mnt_devname ? r->mnt_devname : "none"); + mangle(m, r->mnt_devname); } seq_puts(m, sb_rdonly(sb) ? " ro" : " rw"); err = show_sb_opts(m, sb); @@ -199,17 +199,13 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) int err; /* device */ + seq_puts(m, "device "); if (sb->s_op->show_devname) { - seq_puts(m, "device "); err = sb->s_op->show_devname(m, mnt_path.dentry); if (err) goto out; } else { - if (r->mnt_devname) { - seq_puts(m, "device "); - mangle(m, r->mnt_devname); - } else - seq_puts(m, "no device"); + mangle(m, r->mnt_devname); } /* mount point */ diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 825c5c2e0962..df4a9b348769 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2560,7 +2560,7 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name, struct dentry *dentry; int error; - dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name)); + dentry = lookup_noperm_positive_unlocked(&QSTR(qf_name), sb->s_root); if (IS_ERR(dentry)) return PTR_ERR(dentry); diff --git a/fs/read_write.c b/fs/read_write.c index bb0ed26a0b3a..0ef70e128c4a 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -332,7 +332,9 @@ loff_t default_llseek(struct file *file, loff_t offset, int whence) struct inode *inode = file_inode(file); loff_t retval; - inode_lock(inode); + retval = inode_lock_killable(inode); + if (retval) + return retval; switch (whence) { case SEEK_END: offset += i_size_read(inode); diff --git a/fs/readdir.c b/fs/readdir.c index 0038efda417b..7764b8638978 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -222,6 +222,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, CLASS(fd_pos, f)(fd); struct readdir_callback buf = { .ctx.actor = fillonedir, + .ctx.count = 1, /* Hint to fs: just one entry. */ .dirent = dirent }; @@ -252,7 +253,6 @@ struct getdents_callback { struct dir_context ctx; struct linux_dirent __user * current_dir; int prev_reclen; - int count; int error; }; @@ -266,12 +266,16 @@ static bool filldir(struct dir_context *ctx, const char *name, int namlen, int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2, sizeof(long)); int prev_reclen; + unsigned int flags = d_type; + + BUILD_BUG_ON(FILLDIR_FLAG_NOINTR & S_DT_MASK); + d_type &= S_DT_MASK; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) return false; buf->error = -EINVAL; /* only used if we fail.. */ - if (reclen > buf->count) + if (reclen > ctx->count) return false; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { @@ -279,7 +283,7 @@ static bool filldir(struct dir_context *ctx, const char *name, int namlen, return false; } prev_reclen = buf->prev_reclen; - if (prev_reclen && signal_pending(current)) + if (!(flags & FILLDIR_FLAG_NOINTR) && prev_reclen && signal_pending(current)) return false; dirent = buf->current_dir; prev = (void __user *) dirent - prev_reclen; @@ -296,7 +300,7 @@ static bool filldir(struct dir_context *ctx, const char *name, int namlen, buf->current_dir = (void __user *)dirent + reclen; buf->prev_reclen = reclen; - buf->count -= reclen; + ctx->count -= reclen; return true; efault_end: user_write_access_end(); @@ -311,7 +315,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, CLASS(fd_pos, f)(fd); struct getdents_callback buf = { .ctx.actor = filldir, - .count = count, + .ctx.count = count, .current_dir = dirent }; int error; @@ -329,7 +333,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else - error = count - buf.count; + error = count - buf.ctx.count; } return error; } @@ -338,7 +342,6 @@ struct getdents_callback64 { struct dir_context ctx; struct linux_dirent64 __user * current_dir; int prev_reclen; - int count; int error; }; @@ -351,15 +354,19 @@ static bool filldir64(struct dir_context *ctx, const char *name, int namlen, int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1, sizeof(u64)); int prev_reclen; + unsigned int flags = d_type; + + BUILD_BUG_ON(FILLDIR_FLAG_NOINTR & S_DT_MASK); + d_type &= S_DT_MASK; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) return false; buf->error = -EINVAL; /* only used if we fail.. */ - if (reclen > buf->count) + if (reclen > ctx->count) return false; prev_reclen = buf->prev_reclen; - if (prev_reclen && signal_pending(current)) + if (!(flags & FILLDIR_FLAG_NOINTR) && prev_reclen && signal_pending(current)) return false; dirent = buf->current_dir; prev = (void __user *)dirent - prev_reclen; @@ -376,7 +383,7 @@ static bool filldir64(struct dir_context *ctx, const char *name, int namlen, buf->prev_reclen = reclen; buf->current_dir = (void __user *)dirent + reclen; - buf->count -= reclen; + ctx->count -= reclen; return true; efault_end: @@ -392,7 +399,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, CLASS(fd_pos, f)(fd); struct getdents_callback64 buf = { .ctx.actor = filldir64, - .count = count, + .ctx.count = count, .current_dir = dirent }; int error; @@ -411,7 +418,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, if (put_user(d_off, &lastdirent->d_off)) error = -EFAULT; else - error = count - buf.count; + error = count - buf.ctx.count; } return error; } @@ -475,6 +482,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd, CLASS(fd_pos, f)(fd); struct compat_readdir_callback buf = { .ctx.actor = compat_fillonedir, + .ctx.count = 1, /* Hint to fs: just one entry. */ .dirent = dirent }; @@ -499,7 +507,6 @@ struct compat_getdents_callback { struct dir_context ctx; struct compat_linux_dirent __user *current_dir; int prev_reclen; - int count; int error; }; @@ -513,12 +520,16 @@ static bool compat_filldir(struct dir_context *ctx, const char *name, int namlen int reclen = ALIGN(offsetof(struct compat_linux_dirent, d_name) + namlen + 2, sizeof(compat_long_t)); int prev_reclen; + unsigned int flags = d_type; + + BUILD_BUG_ON(FILLDIR_FLAG_NOINTR & S_DT_MASK); + d_type &= S_DT_MASK; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) return false; buf->error = -EINVAL; /* only used if we fail.. */ - if (reclen > buf->count) + if (reclen > ctx->count) return false; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { @@ -526,7 +537,7 @@ static bool compat_filldir(struct dir_context *ctx, const char *name, int namlen return false; } prev_reclen = buf->prev_reclen; - if (prev_reclen && signal_pending(current)) + if (!(flags & FILLDIR_FLAG_NOINTR) && prev_reclen && signal_pending(current)) return false; dirent = buf->current_dir; prev = (void __user *) dirent - prev_reclen; @@ -542,7 +553,7 @@ static bool compat_filldir(struct dir_context *ctx, const char *name, int namlen buf->prev_reclen = reclen; buf->current_dir = (void __user *)dirent + reclen; - buf->count -= reclen; + ctx->count -= reclen; return true; efault_end: user_write_access_end(); @@ -557,8 +568,8 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, CLASS(fd_pos, f)(fd); struct compat_getdents_callback buf = { .ctx.actor = compat_filldir, + .ctx.count = count, .current_dir = dirent, - .count = count }; int error; @@ -575,7 +586,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else - error = count - buf.count; + error = count - buf.ctx.count; } return error; } diff --git a/fs/select.c b/fs/select.c index 7da531b1cf6b..9fb650d03d52 100644 --- a/fs/select.c +++ b/fs/select.c @@ -630,7 +630,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; ret = -EINVAL; - if (n < 0) + if (unlikely(n < 0)) goto out_nofds; /* max_fds can increase, so grab it once to avoid race */ @@ -857,7 +857,7 @@ static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait, int fd = pollfd->fd; __poll_t mask, filter; - if (fd < 0) + if (unlikely(fd < 0)) return 0; CLASS(fd, f)(fd); diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c index 240d82c6f908..89d2dbbb742c 100644 --- a/fs/smb/client/cached_dir.c +++ b/fs/smb/client/cached_dir.c @@ -102,7 +102,8 @@ path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) while (*s && *s != sep) s++; - child = lookup_positive_unlocked(p, dentry, s - p); + child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p), + dentry); dput(dentry); dentry = child; } while (!IS_ERR(dentry)); @@ -201,7 +202,7 @@ replay_again: spin_unlock(&cfids->cfid_list_lock); /* - * Skip any prefix paths in @path as lookup_positive_unlocked() ends up + * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up * calling ->lookup() which already adds those through * build_path_from_dentry(). Also, do it earlier as we might reconnect * below when trying to send compounded request and then potentially diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index a08c42363ffc..fb04e263611c 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -929,7 +929,8 @@ cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb) while (*s && *s != sep) s++; - child = lookup_positive_unlocked(p, dentry, s - p); + child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p), + dentry); dput(dentry); dentry = child; } while (!IS_ERR(dentry)); diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c index 50f96259d9ad..f9f11cbf89be 100644 --- a/fs/smb/client/readdir.c +++ b/fs/smb/client/readdir.c @@ -9,6 +9,7 @@ * */ #include <linux/fs.h> +#include <linux/namei.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/stat.h> @@ -78,7 +79,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); - dentry = d_hash_and_lookup(parent, name); + dentry = try_lookup_noperm(name, parent); if (!dentry) { /* * If we know that the inode will need to be revalidated @@ -733,7 +734,10 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, else cifs_buf_release(cfile->srch_inf. ntwrk_buf_start); + /* Reset all pointers to the network buffer to prevent stale references */ cfile->srch_inf.ntwrk_buf_start = NULL; + cfile->srch_inf.srch_entries_start = NULL; + cfile->srch_inf.last_entry = NULL; } rc = initiate_cifs_search(xid, file, full_path); if (rc) { @@ -756,11 +760,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, rc = server->ops->query_dir_next(xid, tcon, &cfile->fid, search_flags, &cfile->srch_inf); + if (rc) + return -ENOENT; /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cfile->srch_inf.last_entry) cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); - if (rc) - return -ENOENT; } if (index_to_find < cfile->srch_inf.index_of_last_entry) { /* we found the buffer that contains the entry */ diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 03f606afad93..d7a8a580d013 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -146,12 +146,9 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci) { struct oplock_info *opinfo; - if (list_empty(&ci->m_op_list)) - return NULL; - down_read(&ci->m_lock); - opinfo = list_first_entry(&ci->m_op_list, struct oplock_info, - op_entry); + opinfo = list_first_entry_or_null(&ci->m_op_list, struct oplock_info, + op_entry); if (opinfo) { if (opinfo->conn == NULL || !atomic_inc_not_zero(&opinfo->refcount)) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index f2a2be8467c6..8d414239b3fe 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -4120,9 +4120,10 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv) return -EINVAL; lock_dir(priv->dir_fp); - dent = lookup_one(idmap, priv->d_info->name, - priv->dir_fp->filp->f_path.dentry, - priv->d_info->name_len); + dent = lookup_one(idmap, + &QSTR_LEN(priv->d_info->name, + priv->d_info->name_len), + priv->dir_fp->filp->f_path.dentry); unlock_dir(priv->dir_fp); if (IS_ERR(dent)) { diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 482eba0f4dc1..baf0d3031a44 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -409,10 +409,15 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n", *pos, count); + if (*pos >= XATTR_SIZE_MAX) { + pr_err("stream write position %lld is out of bounds\n", *pos); + return -EINVAL; + } + size = *pos + count; if (size > XATTR_SIZE_MAX) { size = XATTR_SIZE_MAX; - count = (*pos + count) - XATTR_SIZE_MAX; + count = XATTR_SIZE_MAX - *pos; } v_len = ksmbd_vfs_getcasexattr(idmap, @@ -426,13 +431,6 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, goto out; } - if (v_len <= *pos) { - pr_err("stream write position %lld is out of bounds (stream length: %zd)\n", - *pos, v_len); - err = -EINVAL; - goto out; - } - if (v_len < size) { wbuf = kvzalloc(size, KSMBD_DEFAULT_GFP); if (!wbuf) { @@ -684,7 +682,7 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path, struct ksmbd_file *parent_fp; int new_type; int err, lookup_flags = LOOKUP_NO_SYMLINKS; - int target_lookup_flags = LOOKUP_RENAME_TARGET; + int target_lookup_flags = LOOKUP_RENAME_TARGET | LOOKUP_CREATE; if (ksmbd_override_fsids(work)) return -ENOMEM; diff --git a/fs/stat.c b/fs/stat.c index 3d9222807214..f95c1dc3eaa4 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -136,13 +136,15 @@ EXPORT_SYMBOL(generic_fill_statx_attr); * @stat: Where to fill in the attribute flags * @unit_min: Minimum supported atomic write length in bytes * @unit_max: Maximum supported atomic write length in bytes + * @unit_max_opt: Optimised maximum supported atomic write length in bytes * * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from * atomic write unit_min and unit_max values. */ void generic_fill_statx_atomic_writes(struct kstat *stat, unsigned int unit_min, - unsigned int unit_max) + unsigned int unit_max, + unsigned int unit_max_opt) { /* Confirm that the request type is known */ stat->result_mask |= STATX_WRITE_ATOMIC; @@ -153,6 +155,7 @@ void generic_fill_statx_atomic_writes(struct kstat *stat, if (unit_min) { stat->atomic_write_unit_min = unit_min; stat->atomic_write_unit_max = unit_max; + stat->atomic_write_unit_max_opt = unit_max_opt; /* Initially only allow 1x segment */ stat->atomic_write_segments_max = 1; @@ -254,7 +257,7 @@ int vfs_getattr(const struct path *path, struct kstat *stat, int retval; retval = security_inode_getattr(path); - if (retval) + if (unlikely(retval)) return retval; return vfs_getattr_nosec(path, stat, request_mask, query_flags); } @@ -425,7 +428,7 @@ SYSCALL_DEFINE2(stat, const char __user *, filename, int error; error = vfs_stat(filename, &stat); - if (error) + if (unlikely(error)) return error; return cp_old_stat(&stat, statbuf); @@ -438,7 +441,7 @@ SYSCALL_DEFINE2(lstat, const char __user *, filename, int error; error = vfs_lstat(filename, &stat); - if (error) + if (unlikely(error)) return error; return cp_old_stat(&stat, statbuf); @@ -447,12 +450,13 @@ SYSCALL_DEFINE2(lstat, const char __user *, filename, SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) { struct kstat stat; - int error = vfs_fstat(fd, &stat); + int error; - if (!error) - error = cp_old_stat(&stat, statbuf); + error = vfs_fstat(fd, &stat); + if (unlikely(error)) + return error; - return error; + return cp_old_stat(&stat, statbuf); } #endif /* __ARCH_WANT_OLD_STAT */ @@ -506,10 +510,12 @@ SYSCALL_DEFINE2(newstat, const char __user *, filename, struct stat __user *, statbuf) { struct kstat stat; - int error = vfs_stat(filename, &stat); + int error; - if (error) + error = vfs_stat(filename, &stat); + if (unlikely(error)) return error; + return cp_new_stat(&stat, statbuf); } @@ -520,7 +526,7 @@ SYSCALL_DEFINE2(newlstat, const char __user *, filename, int error; error = vfs_lstat(filename, &stat); - if (error) + if (unlikely(error)) return error; return cp_new_stat(&stat, statbuf); @@ -534,8 +540,9 @@ SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, int error; error = vfs_fstatat(dfd, filename, &stat, flag); - if (error) + if (unlikely(error)) return error; + return cp_new_stat(&stat, statbuf); } #endif @@ -543,12 +550,13 @@ SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) { struct kstat stat; - int error = vfs_fstat(fd, &stat); + int error; - if (!error) - error = cp_new_stat(&stat, statbuf); + error = vfs_fstat(fd, &stat); + if (unlikely(error)) + return error; - return error; + return cp_new_stat(&stat, statbuf); } #endif @@ -736,6 +744,7 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min; tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max; tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max; + tmp.stx_atomic_write_unit_max_opt = stat->atomic_write_unit_max_opt; return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } diff --git a/fs/super.c b/fs/super.c index 97a17f9d9023..bcc4e87123c8 100644 --- a/fs/super.c +++ b/fs/super.c @@ -39,7 +39,8 @@ #include <uapi/linux/mount.h> #include "internal.h" -static int thaw_super_locked(struct super_block *sb, enum freeze_holder who); +static int thaw_super_locked(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner); static LIST_HEAD(super_blocks); static DEFINE_SPINLOCK(sb_lock); @@ -201,7 +202,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); - total_objects = dentries + inodes + fs_objects + 1; + total_objects = dentries + inodes + fs_objects; if (!total_objects) total_objects = 1; @@ -887,52 +888,48 @@ void drop_super_exclusive(struct super_block *sb) } EXPORT_SYMBOL(drop_super_exclusive); -static void __iterate_supers(void (*f)(struct super_block *)) -{ - struct super_block *sb, *p = NULL; - - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (super_flags(sb, SB_DYING)) - continue; - sb->s_count++; - spin_unlock(&sb_lock); +enum super_iter_flags_t { + SUPER_ITER_EXCL = (1U << 0), + SUPER_ITER_UNLOCKED = (1U << 1), + SUPER_ITER_REVERSE = (1U << 2), +}; - f(sb); +static inline struct super_block *first_super(enum super_iter_flags_t flags) +{ + if (flags & SUPER_ITER_REVERSE) + return list_last_entry(&super_blocks, struct super_block, s_list); + return list_first_entry(&super_blocks, struct super_block, s_list); +} - spin_lock(&sb_lock); - if (p) - __put_super(p); - p = sb; - } - if (p) - __put_super(p); - spin_unlock(&sb_lock); +static inline struct super_block *next_super(struct super_block *sb, + enum super_iter_flags_t flags) +{ + if (flags & SUPER_ITER_REVERSE) + return list_prev_entry(sb, s_list); + return list_next_entry(sb, s_list); } -/** - * iterate_supers - call function for all active superblocks - * @f: function to call - * @arg: argument to pass to it - * - * Scans the superblock list and calls given function, passing it - * locked superblock and given argument. - */ -void iterate_supers(void (*f)(struct super_block *, void *), void *arg) + +static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg, + enum super_iter_flags_t flags) { struct super_block *sb, *p = NULL; + bool excl = flags & SUPER_ITER_EXCL; - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - bool locked; + guard(spinlock)(&sb_lock); + for (sb = first_super(flags); + !list_entry_is_head(sb, &super_blocks, s_list); + sb = next_super(sb, flags)) { + if (super_flags(sb, SB_DYING)) + continue; sb->s_count++; spin_unlock(&sb_lock); - locked = super_lock_shared(sb); - if (locked) { - if (sb->s_root) - f(sb, arg); - super_unlock_shared(sb); + if (flags & SUPER_ITER_UNLOCKED) { + f(sb, arg); + } else if (super_lock(sb, excl)) { + f(sb, arg); + super_unlock(sb, excl); } spin_lock(&sb_lock); @@ -942,7 +939,11 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg) } if (p) __put_super(p); - spin_unlock(&sb_lock); +} + +void iterate_supers(void (*f)(struct super_block *, void *), void *arg) +{ + __iterate_supers(f, arg, 0); } /** @@ -963,15 +964,15 @@ void iterate_supers_type(struct file_system_type *type, hlist_for_each_entry(sb, &type->fs_supers, s_instances) { bool locked; + if (super_flags(sb, SB_DYING)) + continue; + sb->s_count++; spin_unlock(&sb_lock); locked = super_lock_shared(sb); - if (locked) { - if (sb->s_root) - f(sb, arg); - super_unlock_shared(sb); - } + if (locked) + f(sb, arg); spin_lock(&sb_lock); if (p) @@ -991,23 +992,21 @@ struct super_block *user_get_super(dev_t dev, bool excl) spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { - if (sb->s_dev == dev) { - bool locked; - - sb->s_count++; - spin_unlock(&sb_lock); - /* still alive? */ - locked = super_lock(sb, excl); - if (locked) { - if (sb->s_root) - return sb; - super_unlock(sb, excl); - } - /* nope, got unmounted */ - spin_lock(&sb_lock); - __put_super(sb); - break; - } + bool locked; + + if (sb->s_dev != dev) + continue; + + sb->s_count++; + spin_unlock(&sb_lock); + + locked = super_lock(sb, excl); + if (locked) + return sb; + + spin_lock(&sb_lock); + __put_super(sb); + break; } spin_unlock(&sb_lock); return NULL; @@ -1111,11 +1110,9 @@ cancel_readonly: return retval; } -static void do_emergency_remount_callback(struct super_block *sb) +static void do_emergency_remount_callback(struct super_block *sb, void *unused) { - bool locked = super_lock_excl(sb); - - if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) { + if (sb->s_bdev && !sb_rdonly(sb)) { struct fs_context *fc; fc = fs_context_for_reconfigure(sb->s_root, @@ -1126,13 +1123,12 @@ static void do_emergency_remount_callback(struct super_block *sb) put_fs_context(fc); } } - if (locked) - super_unlock_excl(sb); } static void do_emergency_remount(struct work_struct *work) { - __iterate_supers(do_emergency_remount_callback); + __iterate_supers(do_emergency_remount_callback, NULL, + SUPER_ITER_EXCL | SUPER_ITER_REVERSE); kfree(work); printk("Emergency Remount complete\n"); } @@ -1148,24 +1144,18 @@ void emergency_remount(void) } } -static void do_thaw_all_callback(struct super_block *sb) +static void do_thaw_all_callback(struct super_block *sb, void *unused) { - bool locked = super_lock_excl(sb); - - if (locked && sb->s_root) { - if (IS_ENABLED(CONFIG_BLOCK)) - while (sb->s_bdev && !bdev_thaw(sb->s_bdev)) - pr_warn("Emergency Thaw on %pg\n", sb->s_bdev); - thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE); - return; - } - if (locked) - super_unlock_excl(sb); + if (IS_ENABLED(CONFIG_BLOCK)) + while (sb->s_bdev && !bdev_thaw(sb->s_bdev)) + pr_warn("Emergency Thaw on %pg\n", sb->s_bdev); + thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL); + return; } static void do_thaw_all(struct work_struct *work) { - __iterate_supers(do_thaw_all_callback); + __iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL); kfree(work); printk(KERN_WARNING "Emergency Thaw complete\n"); } @@ -1186,6 +1176,66 @@ void emergency_thaw_all(void) } } +static inline bool get_active_super(struct super_block *sb) +{ + bool active = false; + + if (super_lock_excl(sb)) { + active = atomic_inc_not_zero(&sb->s_active); + super_unlock_excl(sb); + } + return active; +} + +static const char *filesystems_freeze_ptr = "filesystems_freeze"; + +static void filesystems_freeze_callback(struct super_block *sb, void *unused) +{ + if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super) + return; + + if (!get_active_super(sb)) + return; + + if (sb->s_op->freeze_super) + sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, + filesystems_freeze_ptr); + else + freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, + filesystems_freeze_ptr); + + deactivate_super(sb); +} + +void filesystems_freeze(void) +{ + __iterate_supers(filesystems_freeze_callback, NULL, + SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE); +} + +static void filesystems_thaw_callback(struct super_block *sb, void *unused) +{ + if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super) + return; + + if (!get_active_super(sb)) + return; + + if (sb->s_op->thaw_super) + sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, + filesystems_freeze_ptr); + else + thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, + filesystems_freeze_ptr); + + deactivate_super(sb); +} + +void filesystems_thaw(void) +{ + __iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED); +} + static DEFINE_IDA(unnamed_dev_ida); /** @@ -1479,10 +1529,10 @@ static int fs_bdev_freeze(struct block_device *bdev) if (sb->s_op->freeze_super) error = sb->s_op->freeze_super(sb, - FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE); + FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); else error = freeze_super(sb, - FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE); + FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); if (!error) error = sync_blockdev(bdev); deactivate_super(sb); @@ -1528,10 +1578,10 @@ static int fs_bdev_thaw(struct block_device *bdev) if (sb->s_op->thaw_super) error = sb->s_op->thaw_super(sb, - FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE); + FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); else error = thaw_super(sb, - FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE); + FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); deactivate_super(sb); return error; } @@ -1903,7 +1953,7 @@ static int wait_for_partially_frozen(struct super_block *sb) } #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE) -#define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST) +#define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL) static inline int freeze_inc(struct super_block *sb, enum freeze_holder who) { @@ -1929,11 +1979,34 @@ static inline int freeze_dec(struct super_block *sb, enum freeze_holder who) return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount; } -static inline bool may_freeze(struct super_block *sb, enum freeze_holder who) +static inline bool may_freeze(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) { + lockdep_assert_held(&sb->s_umount); + WARN_ON_ONCE((who & ~FREEZE_FLAGS)); WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); + if (who & FREEZE_EXCL) { + if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) + return false; + if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) + return false; + if (WARN_ON_ONCE(!freeze_owner)) + return false; + /* This freeze already has a specific owner. */ + if (sb->s_writers.freeze_owner) + return false; + /* + * This is already frozen multiple times so we're just + * going to take a reference count and mark the freeze as + * being owned by the caller. + */ + if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) + sb->s_writers.freeze_owner = freeze_owner; + return true; + } + if (who & FREEZE_HOLDER_KERNEL) return (who & FREEZE_MAY_NEST) || sb->s_writers.freeze_kcount == 0; @@ -1943,10 +2016,61 @@ static inline bool may_freeze(struct super_block *sb, enum freeze_holder who) return false; } +static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) +{ + lockdep_assert_held(&sb->s_umount); + + WARN_ON_ONCE((who & ~FREEZE_FLAGS)); + WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); + + if (who & FREEZE_EXCL) { + if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) + return false; + if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) + return false; + if (WARN_ON_ONCE(!freeze_owner)) + return false; + if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0)) + return false; + /* This isn't exclusively frozen. */ + if (!sb->s_writers.freeze_owner) + return false; + /* This isn't exclusively frozen by us. */ + if (sb->s_writers.freeze_owner != freeze_owner) + return false; + /* + * This is still frozen multiple times so we're just + * going to drop our reference count and undo our + * exclusive freeze. + */ + if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1) + sb->s_writers.freeze_owner = NULL; + return true; + } + + if (who & FREEZE_HOLDER_KERNEL) { + /* + * Someone's trying to steal the reference belonging to + * @sb->s_writers.freeze_owner. + */ + if (sb->s_writers.freeze_kcount == 1 && + sb->s_writers.freeze_owner) + return false; + return sb->s_writers.freeze_kcount > 0; + } + + if (who & FREEZE_HOLDER_USERSPACE) + return sb->s_writers.freeze_ucount > 0; + + return false; +} + /** * freeze_super - lock the filesystem and force it into a consistent state * @sb: the super to lock * @who: context that wants to freeze + * @freeze_owner: owner of the freeze * * Syncs the super to make sure the filesystem is consistent and calls the fs's * freeze_fs. Subsequent calls to this without first thawing the fs may return @@ -1998,7 +2122,7 @@ static inline bool may_freeze(struct super_block *sb, enum freeze_holder who) * Return: If the freeze was successful zero is returned. If the freeze * failed a negative error code is returned. */ -int freeze_super(struct super_block *sb, enum freeze_holder who) +int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner) { int ret; @@ -2010,7 +2134,7 @@ int freeze_super(struct super_block *sb, enum freeze_holder who) retry: if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) { - if (may_freeze(sb, who)) + if (may_freeze(sb, who, freeze_owner)) ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1); else ret = -EBUSY; @@ -2032,6 +2156,7 @@ retry: if (sb_rdonly(sb)) { /* Nothing to do really... */ WARN_ON_ONCE(freeze_inc(sb, who) > 1); + sb->s_writers.freeze_owner = freeze_owner; sb->s_writers.frozen = SB_FREEZE_COMPLETE; wake_up_var(&sb->s_writers.frozen); super_unlock_excl(sb); @@ -2079,6 +2204,7 @@ retry: * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). */ WARN_ON_ONCE(freeze_inc(sb, who) > 1); + sb->s_writers.freeze_owner = freeze_owner; sb->s_writers.frozen = SB_FREEZE_COMPLETE; wake_up_var(&sb->s_writers.frozen); lockdep_sb_freeze_release(sb); @@ -2093,13 +2219,17 @@ EXPORT_SYMBOL(freeze_super); * removes that state without releasing the other state or unlocking the * filesystem. */ -static int thaw_super_locked(struct super_block *sb, enum freeze_holder who) +static int thaw_super_locked(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) { int error = -EINVAL; if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) goto out_unlock; + if (!may_unfreeze(sb, who, freeze_owner)) + goto out_unlock; + /* * All freezers share a single active reference. * So just unlock in case there are any left. @@ -2109,6 +2239,7 @@ static int thaw_super_locked(struct super_block *sb, enum freeze_holder who) if (sb_rdonly(sb)) { sb->s_writers.frozen = SB_UNFROZEN; + sb->s_writers.freeze_owner = NULL; wake_up_var(&sb->s_writers.frozen); goto out_deactivate; } @@ -2126,6 +2257,7 @@ static int thaw_super_locked(struct super_block *sb, enum freeze_holder who) } sb->s_writers.frozen = SB_UNFROZEN; + sb->s_writers.freeze_owner = NULL; wake_up_var(&sb->s_writers.frozen); sb_freeze_unlock(sb, SB_FREEZE_FS); out_deactivate: @@ -2141,6 +2273,7 @@ out_unlock: * thaw_super -- unlock filesystem * @sb: the super to thaw * @who: context that wants to freeze + * @freeze_owner: owner of the freeze * * Unlocks the filesystem and marks it writeable again after freeze_super() * if there are no remaining freezes on the filesystem. @@ -2154,13 +2287,14 @@ out_unlock: * have been frozen through the block layer via multiple block devices. * The filesystem remains frozen until all block devices are unfrozen. */ -int thaw_super(struct super_block *sb, enum freeze_holder who) +int thaw_super(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) { if (!super_lock_excl(sb)) { WARN_ON_ONCE("Dying superblock while thawing!"); return -EINVAL; } - return thaw_super_locked(sb, who); + return thaw_super_locked(sb, who, freeze_owner); } EXPORT_SYMBOL(thaw_super); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index cb1af30b49f5..a3fd3cc591bd 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -555,7 +555,7 @@ struct dentry *tracefs_start_creating(const char *name, struct dentry *parent) if (unlikely(IS_DEADDIR(d_inode(parent)))) dentry = ERR_PTR(-ENOENT); else - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (!IS_ERR(dentry) && d_inode(dentry)) { dput(dentry); dentry = ERR_PTR(-EEXIST); diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index ea6f06adcd43..059a02691edd 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -19,6 +19,11 @@ #include <linux/highmem.h> #include "ubifs.h" +union ubifs_in_ptr { + const void *buf; + struct folio *folio; +}; + /* Fake description object for the "none" compressor */ static struct ubifs_compressor none_compr = { .compr_type = UBIFS_COMPR_NONE, @@ -68,28 +73,61 @@ static struct ubifs_compressor zstd_compr = { /* All UBIFS compressors */ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; -static int ubifs_compress_req(const struct ubifs_info *c, - struct acomp_req *req, - void *out_buf, int *out_len, - const char *compr_name) +static void ubifs_compress_common(int *compr_type, union ubifs_in_ptr in_ptr, + size_t in_offset, int in_len, bool in_folio, + void *out_buf, int *out_len) { - struct crypto_wait wait; - int in_len = req->slen; + struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; int dlen = *out_len; int err; + if (*compr_type == UBIFS_COMPR_NONE) + goto no_compr; + + /* If the input data is small, do not even try to compress it */ + if (in_len < UBIFS_MIN_COMPR_LEN) + goto no_compr; + dlen = min(dlen, in_len - UBIFS_MIN_COMPRESS_DIFF); - crypto_init_wait(&wait); - acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); - acomp_request_set_dst_dma(req, out_buf, dlen); - err = crypto_acomp_compress(req); - err = crypto_wait_req(err, &wait); - *out_len = req->dlen; - acomp_request_free(req); + do { + ACOMP_REQUEST_ON_STACK(req, compr->cc); + DECLARE_CRYPTO_WAIT(wait); + + acomp_request_set_callback(req, 0, NULL, NULL); + if (in_folio) + acomp_request_set_src_folio(req, in_ptr.folio, + in_offset, in_len); + else + acomp_request_set_src_dma(req, in_ptr.buf, in_len); + acomp_request_set_dst_dma(req, out_buf, dlen); + err = crypto_acomp_compress(req); + dlen = req->dlen; + if (err != -EAGAIN) + break; + + req = ACOMP_REQUEST_CLONE(req, GFP_NOFS | __GFP_NOWARN); + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + err = crypto_acomp_compress(req); + err = crypto_wait_req(err, &wait); + dlen = req->dlen; + acomp_request_free(req); + } while (0); + + *out_len = dlen; + if (err) + goto no_compr; - return err; + return; + +no_compr: + if (in_folio) + memcpy_from_folio(out_buf, in_ptr.folio, in_offset, in_len); + else + memcpy(out_buf, in_ptr.buf, in_len); + *out_len = in_len; + *compr_type = UBIFS_COMPR_NONE; } /** @@ -114,32 +152,10 @@ static int ubifs_compress_req(const struct ubifs_info *c, void ubifs_compress(const struct ubifs_info *c, const void *in_buf, int in_len, void *out_buf, int *out_len, int *compr_type) { - int err; - struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; - - if (*compr_type == UBIFS_COMPR_NONE) - goto no_compr; + union ubifs_in_ptr in_ptr = { .buf = in_buf }; - /* If the input data is small, do not even try to compress it */ - if (in_len < UBIFS_MIN_COMPR_LEN) - goto no_compr; - - { - ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); - - acomp_request_set_src_dma(req, in_buf, in_len); - err = ubifs_compress_req(c, req, out_buf, out_len, compr->name); - } - - if (err) - goto no_compr; - - return; - -no_compr: - memcpy(out_buf, in_buf, in_len); - *out_len = in_len; - *compr_type = UBIFS_COMPR_NONE; + ubifs_compress_common(compr_type, in_ptr, 0, in_len, false, + out_buf, out_len); } /** @@ -166,55 +182,71 @@ void ubifs_compress_folio(const struct ubifs_info *c, struct folio *in_folio, size_t in_offset, int in_len, void *out_buf, int *out_len, int *compr_type) { - int err; - struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; + union ubifs_in_ptr in_ptr = { .folio = in_folio }; - if (*compr_type == UBIFS_COMPR_NONE) - goto no_compr; - - /* If the input data is small, do not even try to compress it */ - if (in_len < UBIFS_MIN_COMPR_LEN) - goto no_compr; + ubifs_compress_common(compr_type, in_ptr, in_offset, in_len, true, + out_buf, out_len); +} - { - ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); +static int ubifs_decompress_common(const struct ubifs_info *c, + const void *in_buf, int in_len, + void *out_ptr, size_t out_offset, + int *out_len, bool out_folio, + int compr_type) +{ + struct ubifs_compressor *compr; + int dlen = *out_len; + int err; - acomp_request_set_src_folio(req, in_folio, in_offset, in_len); - err = ubifs_compress_req(c, req, out_buf, out_len, compr->name); + if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { + ubifs_err(c, "invalid compression type %d", compr_type); + return -EINVAL; } - if (err) - goto no_compr; - - return; + compr = ubifs_compressors[compr_type]; -no_compr: - memcpy_from_folio(out_buf, in_folio, in_offset, in_len); - *out_len = in_len; - *compr_type = UBIFS_COMPR_NONE; -} + if (unlikely(!compr->capi_name)) { + ubifs_err(c, "%s compression is not compiled in", compr->name); + return -EINVAL; + } -static int ubifs_decompress_req(const struct ubifs_info *c, - struct acomp_req *req, - const void *in_buf, int in_len, int *out_len, - const char *compr_name) -{ - struct crypto_wait wait; - int err; + if (compr_type == UBIFS_COMPR_NONE) { + if (out_folio) + memcpy_to_folio(out_ptr, out_offset, in_buf, in_len); + else + memcpy(out_ptr, in_buf, in_len); + *out_len = in_len; + return 0; + } - crypto_init_wait(&wait); - acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); - acomp_request_set_src_dma(req, in_buf, in_len); - err = crypto_acomp_decompress(req); - err = crypto_wait_req(err, &wait); - *out_len = req->dlen; + do { + ACOMP_REQUEST_ON_STACK(req, compr->cc); + DECLARE_CRYPTO_WAIT(wait); + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + acomp_request_set_src_dma(req, in_buf, in_len); + if (out_folio) + acomp_request_set_dst_folio(req, out_ptr, out_offset, + dlen); + else + acomp_request_set_dst_dma(req, out_ptr, dlen); + err = crypto_acomp_decompress(req); + dlen = req->dlen; + if (err != -EAGAIN) + break; + + req = ACOMP_REQUEST_CLONE(req, GFP_NOFS | __GFP_NOWARN); + err = crypto_acomp_decompress(req); + err = crypto_wait_req(err, &wait); + dlen = req->dlen; + acomp_request_free(req); + } while (0); + + *out_len = dlen; if (err) ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d", - in_len, compr_name, err); - - acomp_request_free(req); + in_len, compr->name, err); return err; } @@ -235,33 +267,8 @@ static int ubifs_decompress_req(const struct ubifs_info *c, int ubifs_decompress(const struct ubifs_info *c, const void *in_buf, int in_len, void *out_buf, int *out_len, int compr_type) { - struct ubifs_compressor *compr; - - if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { - ubifs_err(c, "invalid compression type %d", compr_type); - return -EINVAL; - } - - compr = ubifs_compressors[compr_type]; - - if (unlikely(!compr->capi_name)) { - ubifs_err(c, "%s compression is not compiled in", compr->name); - return -EINVAL; - } - - if (compr_type == UBIFS_COMPR_NONE) { - memcpy(out_buf, in_buf, in_len); - *out_len = in_len; - return 0; - } - - { - ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); - - acomp_request_set_dst_dma(req, out_buf, *out_len); - return ubifs_decompress_req(c, req, in_buf, in_len, out_len, - compr->name); - } + return ubifs_decompress_common(c, in_buf, in_len, out_buf, 0, out_len, + false, compr_type); } /** @@ -283,34 +290,8 @@ int ubifs_decompress_folio(const struct ubifs_info *c, const void *in_buf, int in_len, struct folio *out_folio, size_t out_offset, int *out_len, int compr_type) { - struct ubifs_compressor *compr; - - if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { - ubifs_err(c, "invalid compression type %d", compr_type); - return -EINVAL; - } - - compr = ubifs_compressors[compr_type]; - - if (unlikely(!compr->capi_name)) { - ubifs_err(c, "%s compression is not compiled in", compr->name); - return -EINVAL; - } - - if (compr_type == UBIFS_COMPR_NONE) { - memcpy_to_folio(out_folio, out_offset, in_buf, in_len); - *out_len = in_len; - return 0; - } - - { - ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); - - acomp_request_set_dst_folio(req, out_folio, out_offset, - *out_len); - return ubifs_decompress_req(c, req, in_buf, in_len, out_len, - compr->name); - } + return ubifs_decompress_common(c, in_buf, in_len, out_folio, + out_offset, out_len, true, compr_type); } /** diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c index b780deb81b02..b492794f8e9a 100644 --- a/fs/vboxsf/file.c +++ b/fs/vboxsf/file.c @@ -262,40 +262,42 @@ static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i) return sf_handle; } -static int vboxsf_writepage(struct page *page, struct writeback_control *wbc) +static int vboxsf_writepages(struct address_space *mapping, + struct writeback_control *wbc) { - struct inode *inode = page->mapping->host; + struct inode *inode = mapping->host; + struct folio *folio = NULL; struct vboxsf_inode *sf_i = VBOXSF_I(inode); struct vboxsf_handle *sf_handle; - loff_t off = page_offset(page); loff_t size = i_size_read(inode); - u32 nwrite = PAGE_SIZE; - u8 *buf; - int err; - - if (off + PAGE_SIZE > size) - nwrite = size & ~PAGE_MASK; + int error; sf_handle = vboxsf_get_write_handle(sf_i); if (!sf_handle) return -EBADF; - buf = kmap(page); - err = vboxsf_write(sf_handle->root, sf_handle->handle, - off, &nwrite, buf); - kunmap(page); + while ((folio = writeback_iter(mapping, wbc, folio, &error))) { + loff_t off = folio_pos(folio); + u32 nwrite = folio_size(folio); + u8 *buf; - kref_put(&sf_handle->refcount, vboxsf_handle_release); + if (nwrite > size - off) + nwrite = size - off; - if (err == 0) { - /* mtime changed */ - sf_i->force_restat = 1; - } else { - ClearPageUptodate(page); + buf = kmap_local_folio(folio, 0); + error = vboxsf_write(sf_handle->root, sf_handle->handle, + off, &nwrite, buf); + kunmap_local(buf); + + folio_unlock(folio); } - unlock_page(page); - return err; + kref_put(&sf_handle->refcount, vboxsf_handle_release); + + /* mtime changed */ + if (error == 0) + sf_i->force_restat = 1; + return error; } static int vboxsf_write_end(struct file *file, struct address_space *mapping, @@ -347,10 +349,11 @@ out: */ const struct address_space_operations vboxsf_reg_aops = { .read_folio = vboxsf_read_folio, - .writepage = vboxsf_writepage, + .writepages = vboxsf_writepages, .dirty_folio = filemap_dirty_folio, .write_begin = simple_write_begin, .write_end = vboxsf_write_end, + .migrate_folio = filemap_migrate_folio, }; static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode, diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 63255820b58a..d954f9b8071f 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3312,6 +3312,11 @@ xfs_bmap_compute_alignments( align = xfs_get_cowextsz_hint(ap->ip); else if (ap->datatype & XFS_ALLOC_USERDATA) align = xfs_get_extsz_hint(ap->ip); + + /* Try to align start block to any minimum allocation alignment */ + if (align > 1 && (ap->flags & XFS_BMAPI_EXTSZALIGN)) + args->alignment = align; + if (align) { if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, ap->eof, 0, ap->conv, &ap->offset, diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index b4d9c6e0f3f9..d5f2729305fa 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -87,6 +87,9 @@ struct xfs_bmalloca { /* Do not update the rmap btree. Used for reconstructing bmbt from rmapbt. */ #define XFS_BMAPI_NORMAP (1u << 10) +/* Try to align allocations to the extent size hint */ +#define XFS_BMAPI_EXTSZALIGN (1u << 11) + #define XFS_BMAPI_FLAGS \ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ { XFS_BMAPI_METADATA, "METADATA" }, \ @@ -98,7 +101,8 @@ struct xfs_bmalloca { { XFS_BMAPI_REMAP, "REMAP" }, \ { XFS_BMAPI_COWFORK, "COWFORK" }, \ { XFS_BMAPI_NODISCARD, "NODISCARD" }, \ - { XFS_BMAPI_NORMAP, "NORMAP" } + { XFS_BMAPI_NORMAP, "NORMAP" },\ + { XFS_BMAPI_EXTSZALIGN, "EXTSZALIGN" } static inline int xfs_bmapi_aflag(int w) diff --git a/fs/xfs/libxfs/xfs_log_rlimit.c b/fs/xfs/libxfs/xfs_log_rlimit.c index d3bd6a86c8fe..34bba96d30ca 100644 --- a/fs/xfs/libxfs/xfs_log_rlimit.c +++ b/fs/xfs/libxfs/xfs_log_rlimit.c @@ -91,6 +91,7 @@ xfs_log_calc_trans_resv_for_minlogblocks( */ if (xfs_want_minlogsize_fixes(&mp->m_sb)) { xfs_trans_resv_calc(mp, resv); + resv->tr_atomic_ioend = M_RES(mp)->tr_atomic_ioend; return; } @@ -107,6 +108,9 @@ xfs_log_calc_trans_resv_for_minlogblocks( xfs_trans_resv_calc(mp, resv); + /* Copy the dynamic transaction reservation types from the running fs */ + resv->tr_atomic_ioend = M_RES(mp)->tr_atomic_ioend; + if (xfs_has_reflink(mp)) { /* * In the early days of reflink, typical log operation counts diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index 13d00c7166e1..86a111d0f2fc 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c @@ -22,6 +22,12 @@ #include "xfs_rtbitmap.h" #include "xfs_attr_item.h" #include "xfs_log.h" +#include "xfs_defer.h" +#include "xfs_bmap_item.h" +#include "xfs_extfree_item.h" +#include "xfs_rmap_item.h" +#include "xfs_refcount_item.h" +#include "xfs_trace.h" #define _ALLOC true #define _FREE false @@ -264,6 +270,42 @@ xfs_rtalloc_block_count( */ /* + * Finishing a data device refcount updates (t1): + * the agfs of the ags containing the blocks: nr_ops * sector size + * the refcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size + */ +inline unsigned int +xfs_calc_finish_cui_reservation( + struct xfs_mount *mp, + unsigned int nr_ops) +{ + if (!xfs_has_reflink(mp)) + return 0; + + return xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), + mp->m_sb.sb_blocksize); +} + +/* + * Realtime refcount updates (t2); + * the rt refcount inode + * the rtrefcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size + */ +inline unsigned int +xfs_calc_finish_rt_cui_reservation( + struct xfs_mount *mp, + unsigned int nr_ops) +{ + if (!xfs_has_rtreflink(mp)) + return 0; + + return xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(xfs_rtrefcountbt_block_count(mp, nr_ops), + mp->m_sb.sb_blocksize); +} + +/* * Compute the log reservation required to handle the refcount update * transaction. Refcount updates are always done via deferred log items. * @@ -280,19 +322,10 @@ xfs_calc_refcountbt_reservation( struct xfs_mount *mp, unsigned int nr_ops) { - unsigned int blksz = XFS_FSB_TO_B(mp, 1); - unsigned int t1, t2 = 0; + unsigned int t1, t2; - if (!xfs_has_reflink(mp)) - return 0; - - t1 = xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz); - - if (xfs_has_realtime(mp)) - t2 = xfs_calc_inode_res(mp, 1) + - xfs_calc_buf_res(xfs_rtrefcountbt_block_count(mp, nr_ops), - blksz); + t1 = xfs_calc_finish_cui_reservation(mp, nr_ops); + t2 = xfs_calc_finish_rt_cui_reservation(mp, nr_ops); return max(t1, t2); } @@ -380,6 +413,96 @@ xfs_calc_write_reservation_minlogsize( } /* + * Finishing an EFI can free the blocks and bmap blocks (t2): + * the agf for each of the ags: nr * sector size + * the agfl for each of the ags: nr * sector size + * the super block to reflect the freed blocks: sector size + * worst case split in allocation btrees per extent assuming nr extents: + * nr exts * 2 trees * (2 * max depth - 1) * block size + */ +inline unsigned int +xfs_calc_finish_efi_reservation( + struct xfs_mount *mp, + unsigned int nr) +{ + return xfs_calc_buf_res((2 * nr) + 1, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_block_count(mp, nr), + mp->m_sb.sb_blocksize); +} + +/* + * Or, if it's a realtime file (t3): + * the agf for each of the ags: 2 * sector size + * the agfl for each of the ags: 2 * sector size + * the super block to reflect the freed blocks: sector size + * the realtime bitmap: + * 2 exts * ((XFS_BMBT_MAX_EXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 2 exts * 1 block + * worst case split in allocation btrees per extent assuming 2 extents: + * 2 exts * 2 trees * (2 * max depth - 1) * block size + */ +inline unsigned int +xfs_calc_finish_rt_efi_reservation( + struct xfs_mount *mp, + unsigned int nr) +{ + if (!xfs_has_realtime(mp)) + return 0; + + return xfs_calc_buf_res((2 * nr) + 1, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_rtalloc_block_count(mp, nr), + mp->m_sb.sb_blocksize) + + xfs_calc_buf_res(xfs_allocfree_block_count(mp, nr), + mp->m_sb.sb_blocksize); +} + +/* + * Finishing an RUI is the same as an EFI. We can split the rmap btree twice + * on each end of the record, and that can cause the AGFL to be refilled or + * emptied out. + */ +inline unsigned int +xfs_calc_finish_rui_reservation( + struct xfs_mount *mp, + unsigned int nr) +{ + if (!xfs_has_rmapbt(mp)) + return 0; + return xfs_calc_finish_efi_reservation(mp, nr); +} + +/* + * Finishing an RUI is the same as an EFI. We can split the rmap btree twice + * on each end of the record, and that can cause the AGFL to be refilled or + * emptied out. + */ +inline unsigned int +xfs_calc_finish_rt_rui_reservation( + struct xfs_mount *mp, + unsigned int nr) +{ + if (!xfs_has_rtrmapbt(mp)) + return 0; + return xfs_calc_finish_rt_efi_reservation(mp, nr); +} + +/* + * In finishing a BUI, we can modify: + * the inode being truncated: inode size + * dquots + * the inode's bmap btree: (max depth + 1) * block size + */ +inline unsigned int +xfs_calc_finish_bui_reservation( + struct xfs_mount *mp, + unsigned int nr) +{ + return xfs_calc_inode_res(mp, 1) + XFS_DQUOT_LOGRES + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, + mp->m_sb.sb_blocksize); +} + +/* * In truncating a file we free up to two extents at once. We can modify (t1): * the inode being truncated: inode size * the inode's bmap btree: (max depth + 1) * block size @@ -411,16 +534,8 @@ xfs_calc_itruncate_reservation( t1 = xfs_calc_inode_res(mp, 1) + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); - t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4), blksz); - - if (xfs_has_realtime(mp)) { - t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 2), blksz) + - xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz); - } else { - t3 = 0; - } + t2 = xfs_calc_finish_efi_reservation(mp, 4); + t3 = xfs_calc_finish_rt_efi_reservation(mp, 2); /* * In the early days of reflink, we included enough reservation to log @@ -501,9 +616,7 @@ xfs_calc_rename_reservation( xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)); - t2 = xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_block_count(mp, 3), - XFS_FSB_TO_B(mp, 1)); + t2 = xfs_calc_finish_efi_reservation(mp, 3); if (xfs_has_parent(mp)) { unsigned int rename_overhead, exchange_overhead; @@ -611,9 +724,7 @@ xfs_calc_link_reservation( overhead += xfs_calc_iunlink_remove_reservation(mp); t1 = xfs_calc_inode_res(mp, 2) + xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)); - t2 = xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1), - XFS_FSB_TO_B(mp, 1)); + t2 = xfs_calc_finish_efi_reservation(mp, 1); if (xfs_has_parent(mp)) { t3 = resp->tr_attrsetm.tr_logres; @@ -676,9 +787,7 @@ xfs_calc_remove_reservation( t1 = xfs_calc_inode_res(mp, 2) + xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)); - t2 = xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), - XFS_FSB_TO_B(mp, 1)); + t2 = xfs_calc_finish_efi_reservation(mp, 2); if (xfs_has_parent(mp)) { t3 = resp->tr_attrrm.tr_logres; @@ -1181,6 +1290,15 @@ xfs_calc_namespace_reservations( resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES; } +STATIC void +xfs_calc_default_atomic_ioend_reservation( + struct xfs_mount *mp, + struct xfs_trans_resv *resp) +{ + /* Pick a default that will scale reasonably for the log size. */ + resp->tr_atomic_ioend = resp->tr_itruncate; +} + void xfs_trans_resv_calc( struct xfs_mount *mp, @@ -1275,4 +1393,167 @@ xfs_trans_resv_calc( resp->tr_itruncate.tr_logcount += logcount_adj; resp->tr_write.tr_logcount += logcount_adj; resp->tr_qm_dqalloc.tr_logcount += logcount_adj; + + /* + * Now that we've finished computing the static reservations, we can + * compute the dynamic reservation for atomic writes. + */ + xfs_calc_default_atomic_ioend_reservation(mp, resp); +} + +/* + * Return the per-extent and fixed transaction reservation sizes needed to + * complete an atomic write. + */ +STATIC unsigned int +xfs_calc_atomic_write_ioend_geometry( + struct xfs_mount *mp, + unsigned int *step_size) +{ + const unsigned int efi = xfs_efi_log_space(1); + const unsigned int efd = xfs_efd_log_space(1); + const unsigned int rui = xfs_rui_log_space(1); + const unsigned int rud = xfs_rud_log_space(); + const unsigned int cui = xfs_cui_log_space(1); + const unsigned int cud = xfs_cud_log_space(); + const unsigned int bui = xfs_bui_log_space(1); + const unsigned int bud = xfs_bud_log_space(); + + /* + * Maximum overhead to complete an atomic write ioend in software: + * remove data fork extent + remove cow fork extent + map extent into + * data fork. + * + * tx0: Creates a BUI and a CUI and that's all it needs. + * + * tx1: Roll to finish the BUI. Need space for the BUD, an RUI, and + * enough space to relog the CUI (== CUI + CUD). + * + * tx2: Roll again to finish the RUI. Need space for the RUD and space + * to relog the CUI. + * + * tx3: Roll again, need space for the CUD and possibly a new EFI. + * + * tx4: Roll again, need space for an EFD. + * + * If the extent referenced by the pair of BUI/CUI items is not the one + * being currently processed, then we need to reserve space to relog + * both items. + */ + const unsigned int tx0 = bui + cui; + const unsigned int tx1 = bud + rui + cui + cud; + const unsigned int tx2 = rud + cui + cud; + const unsigned int tx3 = cud + efi; + const unsigned int tx4 = efd; + const unsigned int relog = bui + bud + cui + cud; + + const unsigned int per_intent = max(max3(tx0, tx1, tx2), + max3(tx3, tx4, relog)); + + /* Overhead to finish one step of each intent item type */ + const unsigned int f1 = xfs_calc_finish_efi_reservation(mp, 1); + const unsigned int f2 = xfs_calc_finish_rui_reservation(mp, 1); + const unsigned int f3 = xfs_calc_finish_cui_reservation(mp, 1); + const unsigned int f4 = xfs_calc_finish_bui_reservation(mp, 1); + + /* We only finish one item per transaction in a chain */ + *step_size = max(f4, max3(f1, f2, f3)); + + return per_intent; +} + +/* + * Compute the maximum size (in fsblocks) of atomic writes that we can complete + * given the existing log reservations. + */ +xfs_extlen_t +xfs_calc_max_atomic_write_fsblocks( + struct xfs_mount *mp) +{ + const struct xfs_trans_res *resv = &M_RES(mp)->tr_atomic_ioend; + unsigned int per_intent = 0; + unsigned int step_size = 0; + unsigned int ret = 0; + + if (resv->tr_logres > 0) { + per_intent = xfs_calc_atomic_write_ioend_geometry(mp, + &step_size); + + if (resv->tr_logres >= step_size) + ret = (resv->tr_logres - step_size) / per_intent; + } + + trace_xfs_calc_max_atomic_write_fsblocks(mp, per_intent, step_size, + resv->tr_logres, ret); + + return ret; +} + +/* + * Compute the log blocks and transaction reservation needed to complete an + * atomic write of a given number of blocks. Worst case, each block requires + * separate handling. A return value of 0 means something went wrong. + */ +xfs_extlen_t +xfs_calc_atomic_write_log_geometry( + struct xfs_mount *mp, + xfs_extlen_t blockcount, + unsigned int *new_logres) +{ + struct xfs_trans_res *curr_res = &M_RES(mp)->tr_atomic_ioend; + uint old_logres = curr_res->tr_logres; + unsigned int per_intent, step_size; + unsigned int logres; + xfs_extlen_t min_logblocks; + + ASSERT(blockcount > 0); + + xfs_calc_default_atomic_ioend_reservation(mp, M_RES(mp)); + + per_intent = xfs_calc_atomic_write_ioend_geometry(mp, &step_size); + + /* Check for overflows */ + if (check_mul_overflow(blockcount, per_intent, &logres) || + check_add_overflow(logres, step_size, &logres)) + return 0; + + curr_res->tr_logres = logres; + min_logblocks = xfs_log_calc_minimum_size(mp); + curr_res->tr_logres = old_logres; + + trace_xfs_calc_max_atomic_write_log_geometry(mp, per_intent, step_size, + blockcount, min_logblocks, logres); + + *new_logres = logres; + return min_logblocks; +} + +/* + * Compute the transaction reservation needed to complete an out of place + * atomic write of a given number of blocks. + */ +int +xfs_calc_atomic_write_reservation( + struct xfs_mount *mp, + xfs_extlen_t blockcount) +{ + unsigned int new_logres; + xfs_extlen_t min_logblocks; + + /* + * If the caller doesn't ask for a specific atomic write size, then + * use the defaults. + */ + if (blockcount == 0) { + xfs_calc_default_atomic_ioend_reservation(mp, M_RES(mp)); + return 0; + } + + min_logblocks = xfs_calc_atomic_write_log_geometry(mp, blockcount, + &new_logres); + if (!min_logblocks || min_logblocks > mp->m_sb.sb_logblocks) + return -EINVAL; + + M_RES(mp)->tr_atomic_ioend.tr_logres = new_logres; + return 0; } diff --git a/fs/xfs/libxfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h index 0554b9d775d2..336279e0fc61 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.h +++ b/fs/xfs/libxfs/xfs_trans_resv.h @@ -48,6 +48,7 @@ struct xfs_trans_resv { struct xfs_trans_res tr_qm_dqalloc; /* allocate quota on disk */ struct xfs_trans_res tr_sb; /* modify superblock */ struct xfs_trans_res tr_fsyncts; /* update timestamps on fsync */ + struct xfs_trans_res tr_atomic_ioend; /* untorn write completion */ }; /* shorthand way of accessing reservation structure */ @@ -98,8 +99,32 @@ struct xfs_trans_resv { void xfs_trans_resv_calc(struct xfs_mount *mp, struct xfs_trans_resv *resp); uint xfs_allocfree_block_count(struct xfs_mount *mp, uint num_ops); +unsigned int xfs_calc_finish_bui_reservation(struct xfs_mount *mp, + unsigned int nr_ops); + +unsigned int xfs_calc_finish_efi_reservation(struct xfs_mount *mp, + unsigned int nr_ops); +unsigned int xfs_calc_finish_rt_efi_reservation(struct xfs_mount *mp, + unsigned int nr_ops); + +unsigned int xfs_calc_finish_rui_reservation(struct xfs_mount *mp, + unsigned int nr_ops); +unsigned int xfs_calc_finish_rt_rui_reservation(struct xfs_mount *mp, + unsigned int nr_ops); + +unsigned int xfs_calc_finish_cui_reservation(struct xfs_mount *mp, + unsigned int nr_ops); +unsigned int xfs_calc_finish_rt_cui_reservation(struct xfs_mount *mp, + unsigned int nr_ops); + unsigned int xfs_calc_itruncate_reservation_minlogsize(struct xfs_mount *mp); unsigned int xfs_calc_write_reservation_minlogsize(struct xfs_mount *mp); unsigned int xfs_calc_qm_dqalloc_reservation_minlogsize(struct xfs_mount *mp); +xfs_extlen_t xfs_calc_max_atomic_write_fsblocks(struct xfs_mount *mp); +xfs_extlen_t xfs_calc_atomic_write_log_geometry(struct xfs_mount *mp, + xfs_extlen_t blockcount, unsigned int *new_logres); +int xfs_calc_atomic_write_reservation(struct xfs_mount *mp, + xfs_extlen_t blockcount); + #endif /* __XFS_TRANS_RESV_H__ */ diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index e629663e460a..9b598c5790ad 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -123,7 +123,7 @@ xchk_fsfreeze( { int error; - error = freeze_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL); + error = freeze_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL, NULL); trace_xchk_fsfreeze(sc, error); return error; } @@ -135,7 +135,7 @@ xchk_fsthaw( int error; /* This should always succeed, we have a kernel freeze */ - error = thaw_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL); + error = thaw_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL, NULL); trace_xchk_fsthaw(sc, error); return error; } diff --git a/fs/xfs/scrub/orphanage.c b/fs/xfs/scrub/orphanage.c index 3537f3cca6d5..9c12cb844231 100644 --- a/fs/xfs/scrub/orphanage.c +++ b/fs/xfs/scrub/orphanage.c @@ -153,8 +153,7 @@ xrep_orphanage_create( /* Try to find the orphanage directory. */ inode_lock_nested(root_inode, I_MUTEX_PARENT); - orphanage_dentry = lookup_one_len(ORPHANAGE, root_dentry, - strlen(ORPHANAGE)); + orphanage_dentry = lookup_noperm(&QSTR(ORPHANAGE), root_dentry); if (IS_ERR(orphanage_dentry)) { error = PTR_ERR(orphanage_dentry); goto out_unlock_root; @@ -445,7 +444,7 @@ xrep_adoption_check_dcache( if (!d_orphanage) return 0; - d_child = d_hash_and_lookup(d_orphanage, &qname); + d_child = try_lookup_noperm(&qname, d_orphanage); if (d_child) { trace_xrep_adoption_check_child(sc->mp, d_child); @@ -482,7 +481,7 @@ xrep_adoption_zap_dcache( if (!d_orphanage) return; - d_child = d_hash_and_lookup(d_orphanage, &qname); + d_child = try_lookup_noperm(&qname, d_orphanage); while (d_child != NULL) { trace_xrep_adoption_invalidate_child(sc->mp, d_child); diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 9908850bf76f..76e24032e99a 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c @@ -680,8 +680,6 @@ xfs_scrub_metadata( if (error) goto out; - xfs_warn_experimental(mp, XFS_EXPERIMENTAL_SCRUB); - sc = kzalloc(sizeof(struct xfs_scrub), XCHK_GFP_FLAGS); if (!sc) { error = -ENOMEM; diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c index fe21c76f75b8..2a736d10eafb 100644 --- a/fs/xfs/xfs_bio_io.c +++ b/fs/xfs/xfs_bio_io.c @@ -18,42 +18,36 @@ xfs_rw_bdev( enum req_op op) { - unsigned int is_vmalloc = is_vmalloc_addr(data); - unsigned int left = count; + unsigned int done = 0, added; int error; struct bio *bio; - if (is_vmalloc && op == REQ_OP_WRITE) - flush_kernel_vmap_range(data, count); + op |= REQ_META | REQ_SYNC; + if (!is_vmalloc_addr(data)) + return bdev_rw_virt(bdev, sector, data, count, op); - bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC, - GFP_KERNEL); + bio = bio_alloc(bdev, bio_max_vecs(count), op, GFP_KERNEL); bio->bi_iter.bi_sector = sector; do { - struct page *page = kmem_to_page(data); - unsigned int off = offset_in_page(data); - unsigned int len = min_t(unsigned, left, PAGE_SIZE - off); - - while (bio_add_page(bio, page, len, off) != len) { + added = bio_add_vmalloc_chunk(bio, data + done, count - done); + if (!added) { struct bio *prev = bio; - bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left), + bio = bio_alloc(prev->bi_bdev, + bio_max_vecs(count - done), prev->bi_opf, GFP_KERNEL); bio->bi_iter.bi_sector = bio_end_sector(prev); bio_chain(prev, bio); - submit_bio(prev); } - - data += len; - left -= len; - } while (left > 0); + done += added; + } while (done < count); error = submit_bio_wait(bio); bio_put(bio); - if (is_vmalloc && op == REQ_OP_READ) + if (op == REQ_OP_READ) invalidate_kernel_vmap_range(data, count); return error; } diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index 3d52e9d7ad57..646c515ee355 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -77,6 +77,11 @@ xfs_bui_item_size( *nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents); } +unsigned int xfs_bui_log_space(unsigned int nr) +{ + return xlog_item_space(1, xfs_bui_log_format_sizeof(nr)); +} + /* * This is called to fill in the vector of log iovecs for the * given bui log item. We use only 1 iovec, and we point that @@ -168,6 +173,11 @@ xfs_bud_item_size( *nbytes += sizeof(struct xfs_bud_log_format); } +unsigned int xfs_bud_log_space(void) +{ + return xlog_item_space(1, sizeof(struct xfs_bud_log_format)); +} + /* * This is called to fill in the vector of log iovecs for the * given bud log item. We use only 1 iovec, and we point that diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h index 6fee6a508343..b42fee06899d 100644 --- a/fs/xfs/xfs_bmap_item.h +++ b/fs/xfs/xfs_bmap_item.h @@ -72,4 +72,7 @@ struct xfs_bmap_intent; void xfs_bmap_defer_add(struct xfs_trans *tp, struct xfs_bmap_intent *bi); +unsigned int xfs_bui_log_space(unsigned int nr); +unsigned int xfs_bud_log_space(void); + #endif /* __XFS_BMAP_ITEM_H__ */ diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 1a2b3f06fa71..8af83bd161f9 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1333,45 +1333,18 @@ static void xfs_buf_submit_bio( struct xfs_buf *bp) { + unsigned int len = BBTOB(bp->b_length); + unsigned int nr_vecs = bio_add_max_vecs(bp->b_addr, len); unsigned int map = 0; struct blk_plug plug; struct bio *bio; - if (is_vmalloc_addr(bp->b_addr)) { - unsigned int size = BBTOB(bp->b_length); - unsigned int alloc_size = roundup(size, PAGE_SIZE); - void *data = bp->b_addr; - - bio = bio_alloc(bp->b_target->bt_bdev, alloc_size >> PAGE_SHIFT, - xfs_buf_bio_op(bp), GFP_NOIO); - - do { - unsigned int len = min(size, PAGE_SIZE); - - ASSERT(offset_in_page(data) == 0); - __bio_add_page(bio, vmalloc_to_page(data), len, 0); - data += len; - size -= len; - } while (size); - - flush_kernel_vmap_range(bp->b_addr, alloc_size); - } else { - /* - * Single folio or slab allocation. Must be contiguous and thus - * only a single bvec is needed. - * - * This uses the page based bio add helper for now as that is - * the lowest common denominator between folios and slab - * allocations. To be replaced with a better block layer - * helper soon (hopefully). - */ - bio = bio_alloc(bp->b_target->bt_bdev, 1, xfs_buf_bio_op(bp), - GFP_NOIO); - __bio_add_page(bio, virt_to_page(bp->b_addr), - BBTOB(bp->b_length), - offset_in_page(bp->b_addr)); - } - + bio = bio_alloc(bp->b_target->bt_bdev, nr_vecs, xfs_buf_bio_op(bp), + GFP_NOIO); + if (is_vmalloc_addr(bp->b_addr)) + bio_add_vmalloc(bio, bp->b_addr, len); + else + bio_add_virt_nofail(bio, bp->b_addr, len); bio->bi_private = bp; bio->bi_end_io = xfs_buf_bio_end_io; @@ -1714,23 +1687,65 @@ xfs_free_buftarg( kfree(btp); } +/* + * Configure this buffer target for hardware-assisted atomic writes if the + * underlying block device supports is congruent with the filesystem geometry. + */ +static inline void +xfs_configure_buftarg_atomic_writes( + struct xfs_buftarg *btp) +{ + struct xfs_mount *mp = btp->bt_mount; + unsigned int min_bytes, max_bytes; + + min_bytes = bdev_atomic_write_unit_min_bytes(btp->bt_bdev); + max_bytes = bdev_atomic_write_unit_max_bytes(btp->bt_bdev); + + /* + * Ignore atomic write geometry that is nonsense or doesn't even cover + * a single fsblock. + */ + if (min_bytes > max_bytes || + min_bytes > mp->m_sb.sb_blocksize || + max_bytes < mp->m_sb.sb_blocksize) { + min_bytes = 0; + max_bytes = 0; + } + + btp->bt_bdev_awu_min = min_bytes; + btp->bt_bdev_awu_max = max_bytes; +} + +/* Configure a buffer target that abstracts a block device. */ int -xfs_setsize_buftarg( +xfs_configure_buftarg( struct xfs_buftarg *btp, unsigned int sectorsize) { + int error; + + ASSERT(btp->bt_bdev != NULL); + /* Set up metadata sector size info */ btp->bt_meta_sectorsize = sectorsize; btp->bt_meta_sectormask = sectorsize - 1; - if (set_blocksize(btp->bt_bdev_file, sectorsize)) { + error = bdev_validate_blocksize(btp->bt_bdev, sectorsize); + if (error) { xfs_warn(btp->bt_mount, - "Cannot set_blocksize to %u on device %pg", - sectorsize, btp->bt_bdev); + "Cannot use blocksize %u on device %pg, err %d", + sectorsize, btp->bt_bdev, error); return -EINVAL; } - return 0; + /* + * Flush the block device pagecache so our bios see anything dirtied + * before mount. + */ + if (bdev_can_atomic_write(btp->bt_bdev)) + xfs_configure_buftarg_atomic_writes(btp); + + return sync_blockdev(btp->bt_bdev); } int @@ -1779,6 +1794,8 @@ xfs_alloc_buftarg( { struct xfs_buftarg *btp; const struct dax_holder_operations *ops = NULL; + int error; + #if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE) ops = &xfs_dax_holder_operations; @@ -1792,28 +1809,31 @@ xfs_alloc_buftarg( btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off, mp, ops); - if (bdev_can_atomic_write(btp->bt_bdev)) { - btp->bt_bdev_awu_min = bdev_atomic_write_unit_min_bytes( - btp->bt_bdev); - btp->bt_bdev_awu_max = bdev_atomic_write_unit_max_bytes( - btp->bt_bdev); - } + /* + * Flush and invalidate all devices' pagecaches before reading any + * metadata because XFS doesn't use the bdev pagecache. + */ + error = sync_blockdev(btp->bt_bdev); + if (error) + goto error_free; /* * When allocating the buftargs we have not yet read the super block and * thus don't know the file system sector size yet. */ - if (xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev))) - goto error_free; - if (xfs_init_buftarg(btp, bdev_logical_block_size(btp->bt_bdev), - mp->m_super->s_id)) + btp->bt_meta_sectorsize = bdev_logical_block_size(btp->bt_bdev); + btp->bt_meta_sectormask = btp->bt_meta_sectorsize - 1; + + error = xfs_init_buftarg(btp, btp->bt_meta_sectorsize, + mp->m_super->s_id); + if (error) goto error_free; return btp; error_free: kfree(btp); - return NULL; + return ERR_PTR(error); } static inline void diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index d0b065a9a9f0..9d2ab567cf81 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -112,7 +112,7 @@ struct xfs_buftarg { struct percpu_counter bt_readahead_count; struct ratelimit_state bt_ioerror_rl; - /* Atomic write unit values */ + /* Atomic write unit values, bytes */ unsigned int bt_bdev_awu_min; unsigned int bt_bdev_awu_max; @@ -374,7 +374,7 @@ struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp, extern void xfs_free_buftarg(struct xfs_buftarg *); extern void xfs_buftarg_wait(struct xfs_buftarg *); extern void xfs_buftarg_drain(struct xfs_buftarg *); -extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int); +int xfs_configure_buftarg(struct xfs_buftarg *btp, unsigned int sectorsize); #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 19eb0b7a3e58..90139e0f3271 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -104,6 +104,25 @@ xfs_buf_item_size_segment( } /* + * Compute the worst case log item overhead for an invalidated buffer with the + * given map count and block size. + */ +unsigned int +xfs_buf_inval_log_space( + unsigned int map_count, + unsigned int blocksize) +{ + unsigned int chunks = DIV_ROUND_UP(blocksize, XFS_BLF_CHUNK); + unsigned int bitmap_size = DIV_ROUND_UP(chunks, NBWORD); + unsigned int ret = + offsetof(struct xfs_buf_log_format, blf_data_map) + + (bitmap_size * sizeof_field(struct xfs_buf_log_format, + blf_data_map[0])); + + return ret * map_count; +} + +/* * Return the number of log iovecs and space needed to log the given buf log * item. * diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index 8cde85259a58..e10e324cd245 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h @@ -64,6 +64,9 @@ static inline void xfs_buf_dquot_iodone(struct xfs_buf *bp) void xfs_buf_iodone(struct xfs_buf *); bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec); +unsigned int xfs_buf_inval_log_space(unsigned int map_count, + unsigned int blocksize); + extern struct kmem_cache *xfs_buf_item_cache; #endif /* __XFS_BUF_ITEM_H__ */ diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index c1a306268ae4..94d0873bcd62 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -167,6 +167,14 @@ xfs_discard_extents( return error; } +/* + * Care must be taken setting up the trim cursor as the perags may not have been + * initialised when the cursor is initialised. e.g. a clean mount which hasn't + * read in AGFs and the first operation run on the mounted fs is a trim. This + * can result in perag fields that aren't initialised until + * xfs_trim_gather_extents() calls xfs_alloc_read_agf() to lock down the AG for + * the free space search. + */ struct xfs_trim_cur { xfs_agblock_t start; xfs_extlen_t count; @@ -204,6 +212,14 @@ xfs_trim_gather_extents( if (error) goto out_trans_cancel; + /* + * First time through tcur->count will not have been initialised as + * pag->pagf_longest is not guaranteed to be valid before we read + * the AGF buffer above. + */ + if (!tcur->count) + tcur->count = pag->pagf_longest; + if (tcur->by_bno) { /* sub-AG discard request always starts at tcur->start */ cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag); @@ -350,7 +366,6 @@ xfs_trim_perag_extents( { struct xfs_trim_cur tcur = { .start = start, - .count = pag->pagf_longest, .end = end, .minlen = minlen, }; diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 777438b853da..d574f5f639fa 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -83,6 +83,11 @@ xfs_efi_item_size( *nbytes += xfs_efi_log_format_sizeof(efip->efi_format.efi_nextents); } +unsigned int xfs_efi_log_space(unsigned int nr) +{ + return xlog_item_space(1, xfs_efi_log_format_sizeof(nr)); +} + /* * This is called to fill in the vector of log iovecs for the * given efi log item. We use only 1 iovec, and we point that @@ -254,6 +259,11 @@ xfs_efd_item_size( *nbytes += xfs_efd_log_format_sizeof(efdp->efd_format.efd_nextents); } +unsigned int xfs_efd_log_space(unsigned int nr) +{ + return xlog_item_space(1, xfs_efd_log_format_sizeof(nr)); +} + /* * This is called to fill in the vector of log iovecs for the * given efd log item. We use only 1 iovec, and we point that diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h index 41b7c4306079..c8402040410b 100644 --- a/fs/xfs/xfs_extfree_item.h +++ b/fs/xfs/xfs_extfree_item.h @@ -94,4 +94,7 @@ void xfs_extent_free_defer_add(struct xfs_trans *tp, struct xfs_extent_free_item *xefi, struct xfs_defer_pending **dfpp); +unsigned int xfs_efi_log_space(unsigned int nr); +unsigned int xfs_efd_log_space(unsigned int nr); + #endif /* __XFS_EXTFREE_ITEM_H__ */ diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 84f08c976ac4..48254a72071b 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -576,7 +576,10 @@ xfs_dio_write_end_io( nofs_flag = memalloc_nofs_save(); if (flags & IOMAP_DIO_COW) { - error = xfs_reflink_end_cow(ip, offset, size); + if (iocb->ki_flags & IOCB_ATOMIC) + error = xfs_reflink_end_atomic_cow(ip, offset, size); + else + error = xfs_reflink_end_cow(ip, offset, size); if (error) goto out; } @@ -726,6 +729,72 @@ xfs_file_dio_write_zoned( } /* + * Handle block atomic writes + * + * Two methods of atomic writes are supported: + * - REQ_ATOMIC-based, which would typically use some form of HW offload in the + * disk + * - COW-based, which uses a COW fork as a staging extent for data updates + * before atomically updating extent mappings for the range being written + * + */ +static noinline ssize_t +xfs_file_dio_write_atomic( + struct xfs_inode *ip, + struct kiocb *iocb, + struct iov_iter *from) +{ + unsigned int iolock = XFS_IOLOCK_SHARED; + ssize_t ret, ocount = iov_iter_count(from); + const struct iomap_ops *dops; + + /* + * HW offload should be faster, so try that first if it is already + * known that the write length is not too large. + */ + if (ocount > xfs_inode_buftarg(ip)->bt_bdev_awu_max) + dops = &xfs_atomic_write_cow_iomap_ops; + else + dops = &xfs_direct_write_iomap_ops; + +retry: + ret = xfs_ilock_iocb_for_write(iocb, &iolock); + if (ret) + return ret; + + ret = xfs_file_write_checks(iocb, from, &iolock, NULL); + if (ret) + goto out_unlock; + + /* Demote similar to xfs_file_dio_write_aligned() */ + if (iolock == XFS_IOLOCK_EXCL) { + xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); + iolock = XFS_IOLOCK_SHARED; + } + + trace_xfs_file_direct_write(iocb, from); + ret = iomap_dio_rw(iocb, from, dops, &xfs_dio_write_ops, + 0, NULL, 0); + + /* + * The retry mechanism is based on the ->iomap_begin method returning + * -ENOPROTOOPT, which would be when the REQ_ATOMIC-based write is not + * possible. The REQ_ATOMIC-based method typically not be possible if + * the write spans multiple extents or the disk blocks are misaligned. + */ + if (ret == -ENOPROTOOPT && dops == &xfs_direct_write_iomap_ops) { + xfs_iunlock(ip, iolock); + dops = &xfs_atomic_write_cow_iomap_ops; + goto retry; + } + +out_unlock: + if (iolock) + xfs_iunlock(ip, iolock); + return ret; +} + +/* * Handle block unaligned direct I/O writes * * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing @@ -840,6 +909,8 @@ xfs_file_dio_write( return xfs_file_dio_write_unaligned(ip, iocb, from); if (xfs_is_zoned_inode(ip)) return xfs_file_dio_write_zoned(ip, iocb, from); + if (iocb->ki_flags & IOCB_ATOMIC) + return xfs_file_dio_write_atomic(ip, iocb, from); return xfs_file_dio_write_aligned(ip, iocb, from, &xfs_direct_write_iomap_ops, &xfs_dio_write_ops, NULL); } @@ -1032,14 +1103,12 @@ xfs_file_write_iter( return xfs_file_dax_write(iocb, from); if (iocb->ki_flags & IOCB_ATOMIC) { - /* - * Currently only atomic writing of a single FS block is - * supported. It would be possible to atomic write smaller than - * a FS block, but there is no requirement to support this. - * Note that iomap also does not support this yet. - */ - if (ocount != ip->i_mount->m_sb.sb_blocksize) + if (ocount < xfs_get_atomic_write_min(ip)) return -EINVAL; + + if (ocount > xfs_get_atomic_write_max(ip)) + return -EINVAL; + ret = generic_atomic_write_valid(iocb, from); if (ret) return ret; @@ -1488,7 +1557,7 @@ xfs_file_open( if (xfs_is_shutdown(XFS_M(inode->i_sb))) return -EIO; file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; - if (xfs_inode_can_atomicwrite(XFS_I(inode))) + if (xfs_get_atomic_write_min(XFS_I(inode)) > 0) file->f_mode |= FMODE_CAN_ATOMIC_WRITE; return generic_file_open(inode, file); } diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index a961aa420c48..044918fbae06 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c @@ -304,11 +304,9 @@ xfs_filestream_create_association( * for us, so all we need to do here is take another active reference to * the perag for the cached association. * - * If we fail to store the association, we need to drop the fstrms - * counter as well as drop the perag reference we take here for the - * item. We do not need to return an error for this failure - as long as - * we return a referenced AG, the allocation can still go ahead just - * fine. + * If we fail to store the association, we do not need to return an + * error for this failure - as long as we return a referenced AG, the + * allocation can still go ahead just fine. */ item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!item) @@ -316,14 +314,9 @@ xfs_filestream_create_association( atomic_inc(&pag_group(args->pag)->xg_active_ref); item->pag = args->pag; - error = xfs_mru_cache_insert(mp->m_filestream, pino, &item->mru); - if (error) - goto out_free_item; + xfs_mru_cache_insert(mp->m_filestream, pino, &item->mru); return 0; -out_free_item: - xfs_perag_rele(item->pag); - kfree(item); out_put_fstrms: atomic_dec(&args->pag->pagf_fstrms); return 0; diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c index f18fec0adf66..f6f628c01feb 100644 --- a/fs/xfs/xfs_globals.c +++ b/fs/xfs/xfs_globals.c @@ -23,8 +23,6 @@ xfs_param_t xfs_params = { .inherit_sync = { 0, 1, 1 }, .inherit_nodump = { 0, 1, 1 }, .inherit_noatim = { 0, 1, 1 }, - .xfs_buf_timer = { 100/2, 1*100, 30*100 }, - .xfs_buf_age = { 1*100, 15*100, 7200*100}, .inherit_nosym = { 0, 0, 1 }, .rotorstep = { 1, 1, 255 }, .inherit_nodfrg = { 0, 1, 1 }, diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index eae0159983ca..d7e2b902ef5c 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -356,19 +356,9 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip) (XFS_IS_REALTIME_INODE(ip) ? \ (ip)->i_mount->m_rtdev_targp : (ip)->i_mount->m_ddev_targp) -static inline bool -xfs_inode_can_atomicwrite( - struct xfs_inode *ip) +static inline bool xfs_inode_can_hw_atomic_write(const struct xfs_inode *ip) { - struct xfs_mount *mp = ip->i_mount; - struct xfs_buftarg *target = xfs_inode_buftarg(ip); - - if (mp->m_sb.sb_blocksize < target->bt_bdev_awu_min) - return false; - if (mp->m_sb.sb_blocksize > target->bt_bdev_awu_max) - return false; - - return true; + return xfs_inode_buftarg(ip)->bt_bdev_awu_max > 0; } /* diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index cb23c8871f81..ff05e6b1b0bb 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -798,6 +798,38 @@ imap_spans_range( return true; } +static bool +xfs_bmap_hw_atomic_write_possible( + struct xfs_inode *ip, + struct xfs_bmbt_irec *imap, + xfs_fileoff_t offset_fsb, + xfs_fileoff_t end_fsb) +{ + struct xfs_mount *mp = ip->i_mount; + xfs_fsize_t len = XFS_FSB_TO_B(mp, end_fsb - offset_fsb); + + /* + * atomic writes are required to be naturally aligned for disk blocks, + * which ensures that we adhere to block layer rules that we won't + * straddle any boundary or violate write alignment requirement. + */ + if (!IS_ALIGNED(imap->br_startblock, imap->br_blockcount)) + return false; + + /* + * Spanning multiple extents would mean that multiple BIOs would be + * issued, and so would lose atomicity required for REQ_ATOMIC-based + * atomics. + */ + if (!imap_spans_range(imap, offset_fsb, end_fsb)) + return false; + + /* + * The ->iomap_begin caller should ensure this, but check anyway. + */ + return len <= xfs_inode_buftarg(ip)->bt_bdev_awu_max; +} + static int xfs_direct_write_iomap_begin( struct inode *inode, @@ -812,9 +844,11 @@ xfs_direct_write_iomap_begin( struct xfs_bmbt_irec imap, cmap; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); + xfs_fileoff_t orig_end_fsb = end_fsb; int nimaps = 1, error = 0; bool shared = false; u16 iomap_flags = 0; + bool needs_alloc; unsigned int lockmode; u64 seq; @@ -875,13 +909,37 @@ relock: (flags & IOMAP_DIRECT) || IS_DAX(inode)); if (error) goto out_unlock; - if (shared) + if (shared) { + if ((flags & IOMAP_ATOMIC) && + !xfs_bmap_hw_atomic_write_possible(ip, &cmap, + offset_fsb, end_fsb)) { + error = -ENOPROTOOPT; + goto out_unlock; + } goto out_found_cow; + } end_fsb = imap.br_startoff + imap.br_blockcount; length = XFS_FSB_TO_B(mp, end_fsb) - offset; } - if (imap_needs_alloc(inode, flags, &imap, nimaps)) + needs_alloc = imap_needs_alloc(inode, flags, &imap, nimaps); + + if (flags & IOMAP_ATOMIC) { + error = -ENOPROTOOPT; + /* + * If we allocate less than what is required for the write + * then we may end up with multiple extents, which means that + * REQ_ATOMIC-based cannot be used, so avoid this possibility. + */ + if (needs_alloc && orig_end_fsb - offset_fsb > 1) + goto out_unlock; + + if (!xfs_bmap_hw_atomic_write_possible(ip, &imap, offset_fsb, + orig_end_fsb)) + goto out_unlock; + } + + if (needs_alloc) goto allocate_blocks; /* @@ -1023,6 +1081,134 @@ const struct iomap_ops xfs_zoned_direct_write_iomap_ops = { #endif /* CONFIG_XFS_RT */ static int +xfs_atomic_write_cow_iomap_begin( + struct inode *inode, + loff_t offset, + loff_t length, + unsigned flags, + struct iomap *iomap, + struct iomap *srcmap) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + const xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); + xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); + xfs_filblks_t count_fsb = end_fsb - offset_fsb; + int nmaps = 1; + xfs_filblks_t resaligned; + struct xfs_bmbt_irec cmap; + struct xfs_iext_cursor icur; + struct xfs_trans *tp; + unsigned int dblocks = 0, rblocks = 0; + int error; + u64 seq; + + ASSERT(flags & IOMAP_WRITE); + ASSERT(flags & IOMAP_DIRECT); + + if (xfs_is_shutdown(mp)) + return -EIO; + + if (!xfs_can_sw_atomic_write(mp)) { + ASSERT(xfs_can_sw_atomic_write(mp)); + return -EINVAL; + } + + /* blocks are always allocated in this path */ + if (flags & IOMAP_NOWAIT) + return -EAGAIN; + + trace_xfs_iomap_atomic_write_cow(ip, offset, length); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + if (!ip->i_cowfp) { + ASSERT(!xfs_is_reflink_inode(ip)); + xfs_ifork_init_cow(ip); + } + + if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) + cmap.br_startoff = end_fsb; + if (cmap.br_startoff <= offset_fsb) { + xfs_trim_extent(&cmap, offset_fsb, count_fsb); + goto found; + } + + end_fsb = cmap.br_startoff; + count_fsb = end_fsb - offset_fsb; + + resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, + xfs_get_cowextsz_hint(ip)); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + if (XFS_IS_REALTIME_INODE(ip)) { + dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0); + rblocks = resaligned; + } else { + dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); + rblocks = 0; + } + + error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, + rblocks, false, &tp); + if (error) + return error; + + /* extent layout could have changed since the unlock, so check again */ + if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) + cmap.br_startoff = end_fsb; + if (cmap.br_startoff <= offset_fsb) { + xfs_trim_extent(&cmap, offset_fsb, count_fsb); + xfs_trans_cancel(tp); + goto found; + } + + /* + * Allocate the entire reservation as unwritten blocks. + * + * Use XFS_BMAPI_EXTSZALIGN to hint at aligning new extents according to + * extszhint, such that there will be a greater chance that future + * atomic writes to that same range will be aligned (and don't require + * this COW-based method). + */ + error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, + XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC | + XFS_BMAPI_EXTSZALIGN, 0, &cmap, &nmaps); + if (error) { + xfs_trans_cancel(tp); + goto out_unlock; + } + + xfs_inode_set_cowblocks_tag(ip); + error = xfs_trans_commit(tp); + if (error) + goto out_unlock; + +found: + if (cmap.br_state != XFS_EXT_NORM) { + error = xfs_reflink_convert_cow_locked(ip, offset_fsb, + count_fsb); + if (error) + goto out_unlock; + cmap.br_state = XFS_EXT_NORM; + } + + length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount); + trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); + seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq); + +out_unlock: + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return error; +} + +const struct iomap_ops xfs_atomic_write_cow_iomap_ops = { + .iomap_begin = xfs_atomic_write_cow_iomap_begin, +}; + +static int xfs_dax_write_iomap_end( struct inode *inode, loff_t pos, diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index d330c4a581b1..674f8ac1b9bd 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -56,5 +56,6 @@ extern const struct iomap_ops xfs_read_iomap_ops; extern const struct iomap_ops xfs_seek_iomap_ops; extern const struct iomap_ops xfs_xattr_iomap_ops; extern const struct iomap_ops xfs_dax_write_iomap_ops; +extern const struct iomap_ops xfs_atomic_write_cow_iomap_ops; #endif /* __XFS_IOMAP_H__*/ diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 756bd3ca8e00..8cddbb7c149b 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -601,16 +601,82 @@ xfs_report_dioalign( stat->dio_offset_align = stat->dio_read_offset_align; } +unsigned int +xfs_get_atomic_write_min( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + + /* + * If we can complete an atomic write via atomic out of place writes, + * then advertise a minimum size of one fsblock. Without this + * mechanism, we can only guarantee atomic writes up to a single LBA. + * + * If out of place writes are not available, we can guarantee an atomic + * write of exactly one single fsblock if the bdev will make that + * guarantee for us. + */ + if (xfs_inode_can_hw_atomic_write(ip) || xfs_can_sw_atomic_write(mp)) + return mp->m_sb.sb_blocksize; + + return 0; +} + +unsigned int +xfs_get_atomic_write_max( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + + /* + * If out of place writes are not available, we can guarantee an atomic + * write of exactly one single fsblock if the bdev will make that + * guarantee for us. + */ + if (!xfs_can_sw_atomic_write(mp)) { + if (xfs_inode_can_hw_atomic_write(ip)) + return mp->m_sb.sb_blocksize; + return 0; + } + + /* + * If we can complete an atomic write via atomic out of place writes, + * then advertise a maximum size of whatever we can complete through + * that means. Hardware support is reported via max_opt, not here. + */ + if (XFS_IS_REALTIME_INODE(ip)) + return XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].awu_max); + return XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_AG].awu_max); +} + +unsigned int +xfs_get_atomic_write_max_opt( + struct xfs_inode *ip) +{ + unsigned int awu_max = xfs_get_atomic_write_max(ip); + + /* if the max is 1x block, then just keep behaviour that opt is 0 */ + if (awu_max <= ip->i_mount->m_sb.sb_blocksize) + return 0; + + /* + * Advertise the maximum size of an atomic write that we can tell the + * block device to perform for us. In general the bdev limit will be + * less than our out of place write limit, but we don't want to exceed + * the awu_max. + */ + return min(awu_max, xfs_inode_buftarg(ip)->bt_bdev_awu_max); +} + static void xfs_report_atomic_write( struct xfs_inode *ip, struct kstat *stat) { - unsigned int unit_min = 0, unit_max = 0; - - if (xfs_inode_can_atomicwrite(ip)) - unit_min = unit_max = ip->i_mount->m_sb.sb_blocksize; - generic_fill_statx_atomic_writes(stat, unit_min, unit_max); + generic_fill_statx_atomic_writes(stat, + xfs_get_atomic_write_min(ip), + xfs_get_atomic_write_max(ip), + xfs_get_atomic_write_max_opt(ip)); } STATIC int diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h index 3c1a2605ffd2..0896f6b8b3b8 100644 --- a/fs/xfs/xfs_iops.h +++ b/fs/xfs/xfs_iops.h @@ -19,5 +19,8 @@ int xfs_inode_init_security(struct inode *inode, struct inode *dir, extern void xfs_setup_inode(struct xfs_inode *ip); extern void xfs_setup_iops(struct xfs_inode *ip); extern void xfs_diflags_to_iflags(struct xfs_inode *ip, bool init); +unsigned int xfs_get_atomic_write_min(struct xfs_inode *ip); +unsigned int xfs_get_atomic_write_max(struct xfs_inode *ip); +unsigned int xfs_get_atomic_write_max_opt(struct xfs_inode *ip); #endif /* __XFS_IOPS_H__ */ diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 980aabc49512..793468b4d30d 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1607,27 +1607,6 @@ xlog_bio_end_io( &iclog->ic_end_io_work); } -static int -xlog_map_iclog_data( - struct bio *bio, - void *data, - size_t count) -{ - do { - struct page *page = kmem_to_page(data); - unsigned int off = offset_in_page(data); - size_t len = min_t(size_t, count, PAGE_SIZE - off); - - if (bio_add_page(bio, page, len, off) != len) - return -EIO; - - data += len; - count -= len; - } while (count); - - return 0; -} - STATIC void xlog_write_iclog( struct xlog *log, @@ -1693,11 +1672,12 @@ xlog_write_iclog( iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); - if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) - goto shutdown; - - if (is_vmalloc_addr(iclog->ic_data)) - flush_kernel_vmap_range(iclog->ic_data, count); + if (is_vmalloc_addr(iclog->ic_data)) { + if (!bio_add_vmalloc(&iclog->ic_bio, iclog->ic_data, count)) + goto shutdown; + } else { + bio_add_virt_nofail(&iclog->ic_bio, iclog->ic_data, count); + } /* * If this log buffer would straddle the end of the log we will have diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 1ca406ec1b40..f66d2d430e4f 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -309,9 +309,7 @@ xlog_cil_alloc_shadow_bufs( * Then round nbytes up to 64-bit alignment so that the initial * buffer alignment is easy to calculate and verify. */ - nbytes += niovecs * - (sizeof(uint64_t) + sizeof(struct xlog_op_header)); - nbytes = round_up(nbytes, sizeof(uint64_t)); + nbytes = xlog_item_space(niovecs, nbytes); /* * The data buffer needs to start 64-bit aligned, so round up diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index f3d78869e5e5..39a102cc1b43 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -698,4 +698,17 @@ xlog_kvmalloc( return p; } +/* + * Given a count of iovecs and space for a log item, compute the space we need + * in the log to store that data plus the log headers. + */ +static inline unsigned int +xlog_item_space( + unsigned int niovecs, + unsigned int nbytes) +{ + nbytes += niovecs * (sizeof(uint64_t) + sizeof(struct xlog_op_header)); + return round_up(nbytes, sizeof(uint64_t)); +} + #endif /* __XFS_LOG_PRIV_H__ */ diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c index 15d410d16bb2..19aba2c3d525 100644 --- a/fs/xfs/xfs_message.c +++ b/fs/xfs/xfs_message.c @@ -141,14 +141,6 @@ xfs_warn_experimental( const char *name; long opstate; } features[] = { - [XFS_EXPERIMENTAL_PNFS] = { - .opstate = XFS_OPSTATE_WARNED_PNFS, - .name = "pNFS", - }, - [XFS_EXPERIMENTAL_SCRUB] = { - .opstate = XFS_OPSTATE_WARNED_SCRUB, - .name = "online scrub", - }, [XFS_EXPERIMENTAL_SHRINK] = { .opstate = XFS_OPSTATE_WARNED_SHRINK, .name = "online shrink", @@ -161,14 +153,6 @@ xfs_warn_experimental( .opstate = XFS_OPSTATE_WARNED_LBS, .name = "large block size", }, - [XFS_EXPERIMENTAL_EXCHRANGE] = { - .opstate = XFS_OPSTATE_WARNED_EXCHRANGE, - .name = "exchange range", - }, - [XFS_EXPERIMENTAL_PPTR] = { - .opstate = XFS_OPSTATE_WARNED_PPTR, - .name = "parent pointer", - }, [XFS_EXPERIMENTAL_METADIR] = { .opstate = XFS_OPSTATE_WARNED_METADIR, .name = "metadata directory tree", diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h index a92a4d09c8e9..d68e72379f9d 100644 --- a/fs/xfs/xfs_message.h +++ b/fs/xfs/xfs_message.h @@ -91,13 +91,9 @@ void xfs_buf_alert_ratelimited(struct xfs_buf *bp, const char *rlmsg, const char *fmt, ...); enum xfs_experimental_feat { - XFS_EXPERIMENTAL_PNFS, - XFS_EXPERIMENTAL_SCRUB, XFS_EXPERIMENTAL_SHRINK, XFS_EXPERIMENTAL_LARP, XFS_EXPERIMENTAL_LBS, - XFS_EXPERIMENTAL_EXCHRANGE, - XFS_EXPERIMENTAL_PPTR, XFS_EXPERIMENTAL_METADIR, XFS_EXPERIMENTAL_ZONED, diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 00b53f479ece..29276fe60df9 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -666,6 +666,158 @@ xfs_agbtree_compute_maxlevels( mp->m_agbtree_maxlevels = max(levels, mp->m_refc_maxlevels); } +/* Maximum atomic write IO size that the kernel allows. */ +static inline xfs_extlen_t xfs_calc_atomic_write_max(struct xfs_mount *mp) +{ + return rounddown_pow_of_two(XFS_B_TO_FSB(mp, MAX_RW_COUNT)); +} + +static inline unsigned int max_pow_of_two_factor(const unsigned int nr) +{ + return 1 << (ffs(nr) - 1); +} + +/* + * If the data device advertises atomic write support, limit the size of data + * device atomic writes to the greatest power-of-two factor of the AG size so + * that every atomic write unit aligns with the start of every AG. This is + * required so that the per-AG allocations for an atomic write will always be + * aligned compatibly with the alignment requirements of the storage. + * + * If the data device doesn't advertise atomic writes, then there are no + * alignment restrictions and the largest out-of-place write we can do + * ourselves is the number of blocks that user files can allocate from any AG. + */ +static inline xfs_extlen_t xfs_calc_perag_awu_max(struct xfs_mount *mp) +{ + if (mp->m_ddev_targp->bt_bdev_awu_min > 0) + return max_pow_of_two_factor(mp->m_sb.sb_agblocks); + return rounddown_pow_of_two(mp->m_ag_max_usable); +} + +/* + * Reflink on the realtime device requires rtgroups, and atomic writes require + * reflink. + * + * If the realtime device advertises atomic write support, limit the size of + * data device atomic writes to the greatest power-of-two factor of the rtgroup + * size so that every atomic write unit aligns with the start of every rtgroup. + * This is required so that the per-rtgroup allocations for an atomic write + * will always be aligned compatibly with the alignment requirements of the + * storage. + * + * If the rt device doesn't advertise atomic writes, then there are no + * alignment restrictions and the largest out-of-place write we can do + * ourselves is the number of blocks that user files can allocate from any + * rtgroup. + */ +static inline xfs_extlen_t xfs_calc_rtgroup_awu_max(struct xfs_mount *mp) +{ + struct xfs_groups *rgs = &mp->m_groups[XG_TYPE_RTG]; + + if (rgs->blocks == 0) + return 0; + if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_bdev_awu_min > 0) + return max_pow_of_two_factor(rgs->blocks); + return rounddown_pow_of_two(rgs->blocks); +} + +/* Compute the maximum atomic write unit size for each section. */ +static inline void +xfs_calc_atomic_write_unit_max( + struct xfs_mount *mp) +{ + struct xfs_groups *ags = &mp->m_groups[XG_TYPE_AG]; + struct xfs_groups *rgs = &mp->m_groups[XG_TYPE_RTG]; + + const xfs_extlen_t max_write = xfs_calc_atomic_write_max(mp); + const xfs_extlen_t max_ioend = xfs_reflink_max_atomic_cow(mp); + const xfs_extlen_t max_agsize = xfs_calc_perag_awu_max(mp); + const xfs_extlen_t max_rgsize = xfs_calc_rtgroup_awu_max(mp); + + ags->awu_max = min3(max_write, max_ioend, max_agsize); + rgs->awu_max = min3(max_write, max_ioend, max_rgsize); + + trace_xfs_calc_atomic_write_unit_max(mp, max_write, max_ioend, + max_agsize, max_rgsize); +} + +/* + * Try to set the atomic write maximum to a new value that we got from + * userspace via mount option. + */ +int +xfs_set_max_atomic_write_opt( + struct xfs_mount *mp, + unsigned long long new_max_bytes) +{ + const xfs_filblks_t new_max_fsbs = XFS_B_TO_FSBT(mp, new_max_bytes); + const xfs_extlen_t max_write = xfs_calc_atomic_write_max(mp); + const xfs_extlen_t max_group = + max(mp->m_groups[XG_TYPE_AG].blocks, + mp->m_groups[XG_TYPE_RTG].blocks); + const xfs_extlen_t max_group_write = + max(xfs_calc_perag_awu_max(mp), xfs_calc_rtgroup_awu_max(mp)); + int error; + + if (new_max_bytes == 0) + goto set_limit; + + ASSERT(max_write <= U32_MAX); + + /* generic_atomic_write_valid enforces power of two length */ + if (!is_power_of_2(new_max_bytes)) { + xfs_warn(mp, + "max atomic write size of %llu bytes is not a power of 2", + new_max_bytes); + return -EINVAL; + } + + if (new_max_bytes & mp->m_blockmask) { + xfs_warn(mp, + "max atomic write size of %llu bytes not aligned with fsblock", + new_max_bytes); + return -EINVAL; + } + + if (new_max_fsbs > max_write) { + xfs_warn(mp, + "max atomic write size of %lluk cannot be larger than max write size %lluk", + new_max_bytes >> 10, + XFS_FSB_TO_B(mp, max_write) >> 10); + return -EINVAL; + } + + if (new_max_fsbs > max_group) { + xfs_warn(mp, + "max atomic write size of %lluk cannot be larger than allocation group size %lluk", + new_max_bytes >> 10, + XFS_FSB_TO_B(mp, max_group) >> 10); + return -EINVAL; + } + + if (new_max_fsbs > max_group_write) { + xfs_warn(mp, + "max atomic write size of %lluk cannot be larger than max allocation group write size %lluk", + new_max_bytes >> 10, + XFS_FSB_TO_B(mp, max_group_write) >> 10); + return -EINVAL; + } + +set_limit: + error = xfs_calc_atomic_write_reservation(mp, new_max_fsbs); + if (error) { + xfs_warn(mp, + "cannot support completing atomic writes of %lluk", + new_max_bytes >> 10); + return error; + } + + xfs_calc_atomic_write_unit_max(mp); + mp->m_awu_max_bytes = new_max_bytes; + return 0; +} + /* Compute maximum possible height for realtime btree types for this fs. */ static inline void xfs_rtbtree_compute_maxlevels( @@ -1082,6 +1234,15 @@ xfs_mountfs( xfs_zone_gc_start(mp); } + /* + * Pre-calculate atomic write unit max. This involves computations + * derived from transaction reservations, so we must do this after the + * log is fully initialized. + */ + error = xfs_set_max_atomic_write_opt(mp, mp->m_awu_max_bytes); + if (error) + goto out_agresv; + return 0; out_agresv: diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index e5192c12e7ac..d85084f9f317 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -119,6 +119,12 @@ struct xfs_groups { * SMR hard drives. */ xfs_fsblock_t start_fsb; + + /* + * Maximum length of an atomic write for files stored in this + * collection of allocation groups, in fsblocks. + */ + xfs_extlen_t awu_max; }; struct xfs_freecounter { @@ -230,6 +236,10 @@ typedef struct xfs_mount { bool m_update_sb; /* sb needs update in mount */ unsigned int m_max_open_zones; unsigned int m_zonegc_low_space; + struct xfs_mru_cache *m_zone_cache; /* Inode to open zone cache */ + + /* max_atomic_write mount option value */ + unsigned long long m_awu_max_bytes; /* * Bitsets of per-fs metadata that have been checked and/or are sick. @@ -464,6 +474,11 @@ static inline bool xfs_has_nonzoned(const struct xfs_mount *mp) return !xfs_has_zoned(mp); } +static inline bool xfs_can_sw_atomic_write(struct xfs_mount *mp) +{ + return xfs_has_reflink(mp); +} + /* * Some features are always on for v5 file systems, allow the compiler to * eliminiate dead code when building without v4 support. @@ -543,10 +558,6 @@ __XFS_HAS_FEAT(nouuid, NOUUID) */ #define XFS_OPSTATE_BLOCKGC_ENABLED 6 -/* Kernel has logged a warning about pNFS being used on this fs. */ -#define XFS_OPSTATE_WARNED_PNFS 7 -/* Kernel has logged a warning about online fsck being used on this fs. */ -#define XFS_OPSTATE_WARNED_SCRUB 8 /* Kernel has logged a warning about shrink being used on this fs. */ #define XFS_OPSTATE_WARNED_SHRINK 9 /* Kernel has logged a warning about logged xattr updates being used. */ @@ -559,10 +570,6 @@ __XFS_HAS_FEAT(nouuid, NOUUID) #define XFS_OPSTATE_USE_LARP 13 /* Kernel has logged a warning about blocksize > pagesize on this fs. */ #define XFS_OPSTATE_WARNED_LBS 14 -/* Kernel has logged a warning about exchange-range being used on this fs. */ -#define XFS_OPSTATE_WARNED_EXCHRANGE 15 -/* Kernel has logged a warning about parent pointers being used on this fs. */ -#define XFS_OPSTATE_WARNED_PPTR 16 /* Kernel has logged a warning about metadata dirs being used on this fs. */ #define XFS_OPSTATE_WARNED_METADIR 17 /* Filesystem should use qflags to determine quotaon status */ @@ -631,7 +638,6 @@ xfs_should_warn(struct xfs_mount *mp, long nr) { (1UL << XFS_OPSTATE_READONLY), "read_only" }, \ { (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \ { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \ - { (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \ { (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \ { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \ { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }, \ @@ -793,4 +799,7 @@ static inline void xfs_mod_sb_delalloc(struct xfs_mount *mp, int64_t delta) percpu_counter_add(&mp->m_delalloc_blks, delta); } +int xfs_set_max_atomic_write_opt(struct xfs_mount *mp, + unsigned long long new_max_bytes); + #endif /* __XFS_MOUNT_H__ */ diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index d0f5b403bdbe..08443ceec329 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -414,6 +414,8 @@ xfs_mru_cache_destroy( * To insert an element, call xfs_mru_cache_insert() with the data store, the * element's key and the client data pointer. This function returns 0 on * success or ENOMEM if memory for the data element couldn't be allocated. + * + * The passed in elem is freed through the per-cache free_func on failure. */ int xfs_mru_cache_insert( @@ -421,14 +423,15 @@ xfs_mru_cache_insert( unsigned long key, struct xfs_mru_cache_elem *elem) { - int error; + int error = -EINVAL; ASSERT(mru && mru->lists); if (!mru || !mru->lists) - return -EINVAL; + goto out_free; + error = -ENOMEM; if (radix_tree_preload(GFP_KERNEL)) - return -ENOMEM; + goto out_free; INIT_LIST_HEAD(&elem->list_node); elem->key = key; @@ -440,6 +443,12 @@ xfs_mru_cache_insert( _xfs_mru_cache_list_insert(mru, elem); spin_unlock(&mru->lock); + if (error) + goto out_free; + return 0; + +out_free: + mru->free_func(mru->data, elem); return error; } diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c index ed8d8ed42f0a..3545dc1d953c 100644 --- a/fs/xfs/xfs_notify_failure.c +++ b/fs/xfs/xfs_notify_failure.c @@ -127,7 +127,7 @@ xfs_dax_notify_failure_freeze( struct super_block *sb = mp->m_super; int error; - error = freeze_super(sb, FREEZE_HOLDER_KERNEL); + error = freeze_super(sb, FREEZE_HOLDER_KERNEL, NULL); if (error) xfs_emerg(mp, "already frozen by kernel, err=%d", error); @@ -143,7 +143,7 @@ xfs_dax_notify_failure_thaw( int error; if (kernel_frozen) { - error = thaw_super(sb, FREEZE_HOLDER_KERNEL); + error = thaw_super(sb, FREEZE_HOLDER_KERNEL, NULL); if (error) xfs_emerg(mp, "still frozen after notify failure, err=%d", error); @@ -153,7 +153,7 @@ xfs_dax_notify_failure_thaw( * Also thaw userspace call anyway because the device is about to be * removed immediately. */ - thaw_super(sb, FREEZE_HOLDER_USERSPACE); + thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL); } static int diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 6f4479deac6d..afe7497012d4 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -58,8 +58,6 @@ xfs_fs_get_uuid( { struct xfs_mount *mp = XFS_M(sb); - xfs_warn_experimental(mp, XFS_EXPERIMENTAL_PNFS); - if (*len < sizeof(uuid_t)) return -EINVAL; diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index fe2d7aab8554..076501123d89 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -78,6 +78,11 @@ xfs_cui_item_size( *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents); } +unsigned int xfs_cui_log_space(unsigned int nr) +{ + return xlog_item_space(1, xfs_cui_log_format_sizeof(nr)); +} + /* * This is called to fill in the vector of log iovecs for the * given cui log item. We use only 1 iovec, and we point that @@ -179,6 +184,11 @@ xfs_cud_item_size( *nbytes += sizeof(struct xfs_cud_log_format); } +unsigned int xfs_cud_log_space(void) +{ + return xlog_item_space(1, sizeof(struct xfs_cud_log_format)); +} + /* * This is called to fill in the vector of log iovecs for the * given cud log item. We use only 1 iovec, and we point that diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h index bfee8f30c63c..0fc3f493342b 100644 --- a/fs/xfs/xfs_refcount_item.h +++ b/fs/xfs/xfs_refcount_item.h @@ -76,4 +76,7 @@ struct xfs_refcount_intent; void xfs_refcount_defer_add(struct xfs_trans *tp, struct xfs_refcount_intent *ri); +unsigned int xfs_cui_log_space(unsigned int nr); +unsigned int xfs_cud_log_space(void); + #endif /* __XFS_REFCOUNT_ITEM_H__ */ diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index cc3b4df88110..ad3bcb76d805 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -293,7 +293,7 @@ xfs_bmap_trim_cow( return xfs_reflink_trim_around_shared(ip, imap, shared); } -static int +int xfs_reflink_convert_cow_locked( struct xfs_inode *ip, xfs_fileoff_t offset_fsb, @@ -786,35 +786,19 @@ xfs_reflink_update_quota( * requirements as low as possible. */ STATIC int -xfs_reflink_end_cow_extent( +xfs_reflink_end_cow_extent_locked( + struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *offset_fsb, xfs_fileoff_t end_fsb) { struct xfs_iext_cursor icur; struct xfs_bmbt_irec got, del, data; - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); - unsigned int resblks; int nmaps; bool isrt = XFS_IS_REALTIME_INODE(ip); int error; - resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, - XFS_TRANS_RESERVE, &tp); - if (error) - return error; - - /* - * Lock the inode. We have to ijoin without automatic unlock because - * the lead transaction is the refcountbt record deletion; the data - * fork update follows as a deferred log item. - */ - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, 0); - /* * In case of racing, overlapping AIO writes no COW extents might be * left by the time I/O completes for the loser of the race. In that @@ -823,7 +807,7 @@ xfs_reflink_end_cow_extent( if (!xfs_iext_lookup_extent(ip, ifp, *offset_fsb, &icur, &got) || got.br_startoff >= end_fsb) { *offset_fsb = end_fsb; - goto out_cancel; + return 0; } /* @@ -837,7 +821,7 @@ xfs_reflink_end_cow_extent( if (!xfs_iext_next_extent(ifp, &icur, &got) || got.br_startoff >= end_fsb) { *offset_fsb = end_fsb; - goto out_cancel; + return 0; } } del = got; @@ -846,14 +830,14 @@ xfs_reflink_end_cow_extent( error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, XFS_IEXT_REFLINK_END_COW_CNT); if (error) - goto out_cancel; + return error; /* Grab the corresponding mapping in the data fork. */ nmaps = 1; error = xfs_bmapi_read(ip, del.br_startoff, del.br_blockcount, &data, &nmaps, 0); if (error) - goto out_cancel; + return error; /* We can only remap the smaller of the two extent sizes. */ data.br_blockcount = min(data.br_blockcount, del.br_blockcount); @@ -882,7 +866,7 @@ xfs_reflink_end_cow_extent( error = xfs_bunmapi(NULL, ip, data.br_startoff, data.br_blockcount, 0, 1, &done); if (error) - goto out_cancel; + return error; ASSERT(done); } @@ -899,17 +883,45 @@ xfs_reflink_end_cow_extent( /* Remove the mapping from the CoW fork. */ xfs_bmap_del_extent_cow(ip, &icur, &got, &del); - error = xfs_trans_commit(tp); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - if (error) - return error; - /* Update the caller about how much progress we made. */ *offset_fsb = del.br_startoff + del.br_blockcount; return 0; +} -out_cancel: - xfs_trans_cancel(tp); +/* + * Remap part of the CoW fork into the data fork. + * + * We aim to remap the range starting at @offset_fsb and ending at @end_fsb + * into the data fork; this function will remap what it can (at the end of the + * range) and update @end_fsb appropriately. Each remap gets its own + * transaction because we can end up merging and splitting bmbt blocks for + * every remap operation and we'd like to keep the block reservation + * requirements as low as possible. + */ +STATIC int +xfs_reflink_end_cow_extent( + struct xfs_inode *ip, + xfs_fileoff_t *offset_fsb, + xfs_fileoff_t end_fsb) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + unsigned int resblks; + int error; + + resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, + XFS_TRANS_RESERVE, &tp); + if (error) + return error; + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, 0); + + error = xfs_reflink_end_cow_extent_locked(tp, ip, offset_fsb, end_fsb); + if (error) + xfs_trans_cancel(tp); + else + error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } @@ -973,6 +985,78 @@ xfs_reflink_end_cow( } /* + * Fully remap all of the file's data fork at once, which is the critical part + * in achieving atomic behaviour. + * The regular CoW end path does not use function as to keep the block + * reservation per transaction as low as possible. + */ +int +xfs_reflink_end_atomic_cow( + struct xfs_inode *ip, + xfs_off_t offset, + xfs_off_t count) +{ + xfs_fileoff_t offset_fsb; + xfs_fileoff_t end_fsb; + int error = 0; + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + unsigned int resblks; + + trace_xfs_reflink_end_cow(ip, offset, count); + + offset_fsb = XFS_B_TO_FSBT(mp, offset); + end_fsb = XFS_B_TO_FSB(mp, offset + count); + + /* + * Each remapping operation could cause a btree split, so in the worst + * case that's one for each block. + */ + resblks = (end_fsb - offset_fsb) * + XFS_NEXTENTADD_SPACE_RES(mp, 1, XFS_DATA_FORK); + + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_atomic_ioend, resblks, 0, + XFS_TRANS_RESERVE, &tp); + if (error) + return error; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, 0); + + while (end_fsb > offset_fsb && !error) { + error = xfs_reflink_end_cow_extent_locked(tp, ip, &offset_fsb, + end_fsb); + } + if (error) { + trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_); + goto out_cancel; + } + error = xfs_trans_commit(tp); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return error; +out_cancel: + xfs_trans_cancel(tp); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return error; +} + +/* Compute the largest atomic write that we can complete through software. */ +xfs_extlen_t +xfs_reflink_max_atomic_cow( + struct xfs_mount *mp) +{ + /* We cannot do any atomic writes without out of place writes. */ + if (!xfs_can_sw_atomic_write(mp)) + return 0; + + /* + * Atomic write limits must always be a power-of-2, according to + * generic_atomic_write_valid. + */ + return rounddown_pow_of_two(xfs_calc_max_atomic_write_fsblocks(mp)); +} + +/* * Free all CoW staging blocks that are still referenced by the ondisk refcount * metadata. The ondisk metadata does not track which inode created the * staging extent, so callers must ensure that there are no cached inodes with diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h index cc4e92278279..36cda724da89 100644 --- a/fs/xfs/xfs_reflink.h +++ b/fs/xfs/xfs_reflink.h @@ -35,6 +35,8 @@ int xfs_reflink_allocate_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap, bool convert_now); extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t count); +int xfs_reflink_convert_cow_locked(struct xfs_inode *ip, + xfs_fileoff_t offset_fsb, xfs_filblks_t count_fsb); extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip, struct xfs_trans **tpp, xfs_fileoff_t offset_fsb, @@ -43,6 +45,8 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t count, bool cancel_real); extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t count); +int xfs_reflink_end_atomic_cow(struct xfs_inode *ip, xfs_off_t offset, + xfs_off_t count); extern int xfs_reflink_recover_cow(struct xfs_mount *mp); extern loff_t xfs_reflink_remap_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, @@ -64,4 +68,6 @@ extern int xfs_reflink_update_dest(struct xfs_inode *dest, xfs_off_t newlen, bool xfs_reflink_supports_rextsize(struct xfs_mount *mp, unsigned int rextsize); +xfs_extlen_t xfs_reflink_max_atomic_cow(struct xfs_mount *mp); + #endif /* __XFS_REFLINK_H */ diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index 89decffe76c8..c99700318ec2 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -77,6 +77,11 @@ xfs_rui_item_size( *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents); } +unsigned int xfs_rui_log_space(unsigned int nr) +{ + return xlog_item_space(1, xfs_rui_log_format_sizeof(nr)); +} + /* * This is called to fill in the vector of log iovecs for the * given rui log item. We use only 1 iovec, and we point that @@ -180,6 +185,11 @@ xfs_rud_item_size( *nbytes += sizeof(struct xfs_rud_log_format); } +unsigned int xfs_rud_log_space(void) +{ + return xlog_item_space(1, sizeof(struct xfs_rud_log_format)); +} + /* * This is called to fill in the vector of log iovecs for the * given rud log item. We use only 1 iovec, and we point that diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h index 40d331555675..3a99f0117f2d 100644 --- a/fs/xfs/xfs_rmap_item.h +++ b/fs/xfs/xfs_rmap_item.h @@ -75,4 +75,7 @@ struct xfs_rmap_intent; void xfs_rmap_defer_add(struct xfs_trans *tp, struct xfs_rmap_intent *ri); +unsigned int xfs_rui_log_space(unsigned int nr); +unsigned int xfs_rud_log_space(void); + #endif /* __XFS_RMAP_ITEM_H__ */ diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 4a11ddccc563..0bc4b5489078 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -111,7 +111,7 @@ enum { Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, Opt_max_open_zones, - Opt_lifetime, Opt_nolifetime, + Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write, }; static const struct fs_parameter_spec xfs_fs_parameters[] = { @@ -159,6 +159,7 @@ static const struct fs_parameter_spec xfs_fs_parameters[] = { fsparam_u32("max_open_zones", Opt_max_open_zones), fsparam_flag("lifetime", Opt_lifetime), fsparam_flag("nolifetime", Opt_nolifetime), + fsparam_string("max_atomic_write", Opt_max_atomic_write), {} }; @@ -241,6 +242,9 @@ xfs_fs_show_options( if (mp->m_max_open_zones) seq_printf(m, ",max_open_zones=%u", mp->m_max_open_zones); + if (mp->m_awu_max_bytes) + seq_printf(m, ",max_atomic_write=%lluk", + mp->m_awu_max_bytes >> 10); return 0; } @@ -380,10 +384,11 @@ xfs_blkdev_get( struct file **bdev_filep) { int error = 0; + blk_mode_t mode; - *bdev_filep = bdev_file_open_by_path(name, - BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES, - mp->m_super, &fs_holder_ops); + mode = sb_open_mode(mp->m_super->s_flags); + *bdev_filep = bdev_file_open_by_path(name, mode, + mp->m_super, &fs_holder_ops); if (IS_ERR(*bdev_filep)) { error = PTR_ERR(*bdev_filep); *bdev_filep = NULL; @@ -481,21 +486,29 @@ xfs_open_devices( /* * Setup xfs_mount buffer target pointers */ - error = -ENOMEM; mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file); - if (!mp->m_ddev_targp) + if (IS_ERR(mp->m_ddev_targp)) { + error = PTR_ERR(mp->m_ddev_targp); + mp->m_ddev_targp = NULL; goto out_close_rtdev; + } if (rtdev_file) { mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file); - if (!mp->m_rtdev_targp) + if (IS_ERR(mp->m_rtdev_targp)) { + error = PTR_ERR(mp->m_rtdev_targp); + mp->m_rtdev_targp = NULL; goto out_free_ddev_targ; + } } if (logdev_file && file_bdev(logdev_file) != ddev) { mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file); - if (!mp->m_logdev_targp) + if (IS_ERR(mp->m_logdev_targp)) { + error = PTR_ERR(mp->m_logdev_targp); + mp->m_logdev_targp = NULL; goto out_free_rtdev_targ; + } } else { mp->m_logdev_targp = mp->m_ddev_targp; /* Handle won't be used, drop it */ @@ -528,7 +541,7 @@ xfs_setup_devices( { int error; - error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); + error = xfs_configure_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); if (error) return error; @@ -537,7 +550,7 @@ xfs_setup_devices( if (xfs_has_sector(mp)) log_sector_size = mp->m_sb.sb_logsectsize; - error = xfs_setsize_buftarg(mp->m_logdev_targp, + error = xfs_configure_buftarg(mp->m_logdev_targp, log_sector_size); if (error) return error; @@ -551,7 +564,7 @@ xfs_setup_devices( } mp->m_rtdev_targp = mp->m_ddev_targp; } else if (mp->m_rtname) { - error = xfs_setsize_buftarg(mp->m_rtdev_targp, + error = xfs_configure_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_sectsize); if (error) return error; @@ -1334,6 +1347,42 @@ suffix_kstrtoint( return ret; } +static int +suffix_kstrtoull( + const char *s, + unsigned int base, + unsigned long long *res) +{ + int last, shift_left_factor = 0; + unsigned long long _res; + char *value; + int ret = 0; + + value = kstrdup(s, GFP_KERNEL); + if (!value) + return -ENOMEM; + + last = strlen(value) - 1; + if (value[last] == 'K' || value[last] == 'k') { + shift_left_factor = 10; + value[last] = '\0'; + } + if (value[last] == 'M' || value[last] == 'm') { + shift_left_factor = 20; + value[last] = '\0'; + } + if (value[last] == 'G' || value[last] == 'g') { + shift_left_factor = 30; + value[last] = '\0'; + } + + if (kstrtoull(value, base, &_res)) + ret = -EINVAL; + kfree(value); + *res = _res << shift_left_factor; + return ret; +} + static inline void xfs_fs_warn_deprecated( struct fs_context *fc, @@ -1518,6 +1567,14 @@ xfs_fs_parse_param( case Opt_nolifetime: parsing_mp->m_features |= XFS_FEAT_NOLIFETIME; return 0; + case Opt_max_atomic_write: + if (suffix_kstrtoull(param->string, 10, + &parsing_mp->m_awu_max_bytes)) { + xfs_warn(parsing_mp, + "max atomic write size must be positive integer"); + return -EINVAL; + } + return 0; default: xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); return -EINVAL; @@ -1897,13 +1954,6 @@ xfs_fs_fill_super( } } - - if (xfs_has_exchange_range(mp)) - xfs_warn_experimental(mp, XFS_EXPERIMENTAL_EXCHRANGE); - - if (xfs_has_parent(mp)) - xfs_warn_experimental(mp, XFS_EXPERIMENTAL_PPTR); - /* * If no quota mount options were provided, maybe we'll try to pick * up the quota accounting and enforcement flags from the ondisk sb. @@ -1969,6 +2019,20 @@ xfs_remount_rw( struct xfs_sb *sbp = &mp->m_sb; int error; + if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp && + bdev_read_only(mp->m_logdev_targp->bt_bdev)) { + xfs_warn(mp, + "ro->rw transition prohibited by read-only logdev"); + return -EACCES; + } + + if (mp->m_rtdev_targp && + bdev_read_only(mp->m_rtdev_targp->bt_bdev)) { + xfs_warn(mp, + "ro->rw transition prohibited by read-only rtdev"); + return -EACCES; + } + if (xfs_has_norecovery(mp)) { xfs_warn(mp, "ro->rw transition prohibited on norecovery mount"); @@ -2129,6 +2193,14 @@ xfs_fs_reconfigure( mp->m_features |= XFS_FEAT_ATTR2; } + /* Validate new max_atomic_write option before making other changes */ + if (mp->m_awu_max_bytes != new_mp->m_awu_max_bytes) { + error = xfs_set_max_atomic_write_opt(mp, + new_mp->m_awu_max_bytes); + if (error) + return error; + } + /* inode32 -> inode64 */ if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { mp->m_features &= ~XFS_FEAT_SMALL_INUMS; diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h index 276696a07040..51646f066c4f 100644 --- a/fs/xfs/xfs_sysctl.h +++ b/fs/xfs/xfs_sysctl.h @@ -29,8 +29,6 @@ typedef struct xfs_param { xfs_sysctl_val_t inherit_sync; /* Inherit the "sync" inode flag. */ xfs_sysctl_val_t inherit_nodump;/* Inherit the "nodump" inode flag. */ xfs_sysctl_val_t inherit_noatim;/* Inherit the "noatime" inode flag. */ - xfs_sysctl_val_t xfs_buf_timer; /* Interval between xfsbufd wakeups. */ - xfs_sysctl_val_t xfs_buf_age; /* Metadata buffer age before flush. */ xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */ xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */ xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */ diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index e56ba1963160..01d284a1c759 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -170,6 +170,99 @@ DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound); DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list); DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list); +TRACE_EVENT(xfs_calc_atomic_write_unit_max, + TP_PROTO(struct xfs_mount *mp, unsigned int max_write, + unsigned int max_ioend, unsigned int max_agsize, + unsigned int max_rgsize), + TP_ARGS(mp, max_write, max_ioend, max_agsize, max_rgsize), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, max_write) + __field(unsigned int, max_ioend) + __field(unsigned int, max_agsize) + __field(unsigned int, max_rgsize) + __field(unsigned int, data_awu_max) + __field(unsigned int, rt_awu_max) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->max_write = max_write; + __entry->max_ioend = max_ioend; + __entry->max_agsize = max_agsize; + __entry->max_rgsize = max_rgsize; + __entry->data_awu_max = mp->m_groups[XG_TYPE_AG].awu_max; + __entry->rt_awu_max = mp->m_groups[XG_TYPE_RTG].awu_max; + ), + TP_printk("dev %d:%d max_write %u max_ioend %u max_agsize %u max_rgsize %u data_awu_max %u rt_awu_max %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->max_write, + __entry->max_ioend, + __entry->max_agsize, + __entry->max_rgsize, + __entry->data_awu_max, + __entry->rt_awu_max) +); + +TRACE_EVENT(xfs_calc_max_atomic_write_fsblocks, + TP_PROTO(struct xfs_mount *mp, unsigned int per_intent, + unsigned int step_size, unsigned int logres, + unsigned int blockcount), + TP_ARGS(mp, per_intent, step_size, logres, blockcount), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, per_intent) + __field(unsigned int, step_size) + __field(unsigned int, logres) + __field(unsigned int, blockcount) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->per_intent = per_intent; + __entry->step_size = step_size; + __entry->logres = logres; + __entry->blockcount = blockcount; + ), + TP_printk("dev %d:%d per_intent %u step_size %u logres %u blockcount %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->per_intent, + __entry->step_size, + __entry->logres, + __entry->blockcount) +); + +TRACE_EVENT(xfs_calc_max_atomic_write_log_geometry, + TP_PROTO(struct xfs_mount *mp, unsigned int per_intent, + unsigned int step_size, unsigned int blockcount, + unsigned int min_logblocks, unsigned int logres), + TP_ARGS(mp, per_intent, step_size, blockcount, min_logblocks, logres), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, per_intent) + __field(unsigned int, step_size) + __field(unsigned int, blockcount) + __field(unsigned int, min_logblocks) + __field(unsigned int, cur_logblocks) + __field(unsigned int, logres) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->per_intent = per_intent; + __entry->step_size = step_size; + __entry->blockcount = blockcount; + __entry->min_logblocks = min_logblocks; + __entry->cur_logblocks = mp->m_sb.sb_logblocks; + __entry->logres = logres; + ), + TP_printk("dev %d:%d per_intent %u step_size %u blockcount %u min_logblocks %u logblocks %u logres %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->per_intent, + __entry->step_size, + __entry->blockcount, + __entry->min_logblocks, + __entry->cur_logblocks, + __entry->logres) +); + TRACE_EVENT(xlog_intent_recovery_failed, TP_PROTO(struct xfs_mount *mp, const struct xfs_defer_op_type *ops, int error), @@ -1657,6 +1750,28 @@ DEFINE_RW_EVENT(xfs_file_direct_write); DEFINE_RW_EVENT(xfs_file_dax_write); DEFINE_RW_EVENT(xfs_reflink_bounce_dio_write); +TRACE_EVENT(xfs_iomap_atomic_write_cow, + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), + TP_ARGS(ip, offset, count), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_off_t, offset) + __field(ssize_t, count) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->offset = offset; + __entry->count = count; + ), + TP_printk("dev %d:%d ino 0x%llx pos 0x%llx bytecount 0x%zx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->offset, + __entry->count) +) + DECLARE_EVENT_CLASS(xfs_imap_class, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, int whichfork, struct xfs_bmbt_irec *irec), diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index d509e49b2aaa..80add26c0111 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -24,6 +24,7 @@ #include "xfs_zone_priv.h" #include "xfs_zones.h" #include "xfs_trace.h" +#include "xfs_mru_cache.h" void xfs_open_zone_put( @@ -796,6 +797,100 @@ xfs_submit_zoned_bio( submit_bio(&ioend->io_bio); } +/* + * Cache the last zone written to for an inode so that it is considered first + * for subsequent writes. + */ +struct xfs_zone_cache_item { + struct xfs_mru_cache_elem mru; + struct xfs_open_zone *oz; +}; + +static inline struct xfs_zone_cache_item * +xfs_zone_cache_item(struct xfs_mru_cache_elem *mru) +{ + return container_of(mru, struct xfs_zone_cache_item, mru); +} + +static void +xfs_zone_cache_free_func( + void *data, + struct xfs_mru_cache_elem *mru) +{ + struct xfs_zone_cache_item *item = xfs_zone_cache_item(mru); + + xfs_open_zone_put(item->oz); + kfree(item); +} + +/* + * Check if we have a cached last open zone available for the inode and + * if yes return a reference to it. + */ +static struct xfs_open_zone * +xfs_cached_zone( + struct xfs_mount *mp, + struct xfs_inode *ip) +{ + struct xfs_mru_cache_elem *mru; + struct xfs_open_zone *oz; + + mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino); + if (!mru) + return NULL; + oz = xfs_zone_cache_item(mru)->oz; + if (oz) { + /* + * GC only steals open zones at mount time, so no GC zones + * should end up in the cache. + */ + ASSERT(!oz->oz_is_gc); + ASSERT(atomic_read(&oz->oz_ref) > 0); + atomic_inc(&oz->oz_ref); + } + xfs_mru_cache_done(mp->m_zone_cache); + return oz; +} + +/* + * Update the last used zone cache for a given inode. + * + * The caller must have a reference on the open zone. + */ +static void +xfs_zone_cache_create_association( + struct xfs_inode *ip, + struct xfs_open_zone *oz) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_zone_cache_item *item = NULL; + struct xfs_mru_cache_elem *mru; + + ASSERT(atomic_read(&oz->oz_ref) > 0); + atomic_inc(&oz->oz_ref); + + mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino); + if (mru) { + /* + * If we have an association already, update it to point to the + * new zone. + */ + item = xfs_zone_cache_item(mru); + xfs_open_zone_put(item->oz); + item->oz = oz; + xfs_mru_cache_done(mp->m_zone_cache); + return; + } + + item = kmalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + xfs_open_zone_put(oz); + return; + } + item->oz = oz; + xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru); +} + void xfs_zone_alloc_and_submit( struct iomap_ioend *ioend, @@ -819,11 +914,16 @@ xfs_zone_alloc_and_submit( */ if (!*oz && ioend->io_offset) *oz = xfs_last_used_zone(ioend); + if (!*oz) + *oz = xfs_cached_zone(mp, ip); + if (!*oz) { select_zone: *oz = xfs_select_zone(mp, write_hint, pack_tight); if (!*oz) goto out_error; + + xfs_zone_cache_create_association(ip, *oz); } alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size), @@ -1211,6 +1311,14 @@ xfs_mount_zones( error = xfs_zone_gc_mount(mp); if (error) goto out_free_zone_info; + + /* + * Set up a mru cache to track inode to open zone for data placement + * purposes. The magic values for group count and life time is the + * same as the defaults for file streams, which seems sane enough. + */ + xfs_mru_cache_create(&mp->m_zone_cache, mp, + 5000, 10, xfs_zone_cache_free_func); return 0; out_free_zone_info: @@ -1224,4 +1332,5 @@ xfs_unmount_zones( { xfs_zone_gc_unmount(mp); xfs_free_zone_info(mp->m_zone_info); + xfs_mru_cache_destroy(mp->m_zone_cache); } diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index faf1eb87895d..d165eb979f21 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -1111,28 +1111,19 @@ static int zonefs_read_super(struct super_block *sb) struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_super *super; u32 crc, stored_crc; - struct page *page; - struct bio_vec bio_vec; - struct bio bio; int ret; - page = alloc_page(GFP_KERNEL); - if (!page) + super = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!super) return -ENOMEM; - bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ); - bio.bi_iter.bi_sector = 0; - __bio_add_page(&bio, page, PAGE_SIZE, 0); - - ret = submit_bio_wait(&bio); + ret = bdev_rw_virt(sb->s_bdev, 0, super, PAGE_SIZE, REQ_OP_READ); if (ret) - goto free_page; - - super = page_address(page); + goto free_super; ret = -EINVAL; if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC) - goto free_page; + goto free_super; stored_crc = le32_to_cpu(super->s_crc); super->s_crc = 0; @@ -1140,14 +1131,14 @@ static int zonefs_read_super(struct super_block *sb) if (crc != stored_crc) { zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)", crc, stored_crc); - goto free_page; + goto free_super; } sbi->s_features = le64_to_cpu(super->s_features); if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) { zonefs_err(sb, "Unknown features set 0x%llx\n", sbi->s_features); - goto free_page; + goto free_super; } if (sbi->s_features & ZONEFS_F_UID) { @@ -1155,7 +1146,7 @@ static int zonefs_read_super(struct super_block *sb) le32_to_cpu(super->s_uid)); if (!uid_valid(sbi->s_uid)) { zonefs_err(sb, "Invalid UID feature\n"); - goto free_page; + goto free_super; } } @@ -1164,7 +1155,7 @@ static int zonefs_read_super(struct super_block *sb) le32_to_cpu(super->s_gid)); if (!gid_valid(sbi->s_gid)) { zonefs_err(sb, "Invalid GID feature\n"); - goto free_page; + goto free_super; } } @@ -1173,15 +1164,14 @@ static int zonefs_read_super(struct super_block *sb) if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) { zonefs_err(sb, "Reserved area is being used\n"); - goto free_page; + goto free_super; } import_uuid(&sbi->s_uuid, super->s_uuid); ret = 0; -free_page: - __free_page(page); - +free_super: + kfree(super); return ret; } diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h index d0343d58a74a..ac29a22eb7cf 100644 --- a/include/asm-generic/simd.h +++ b/include/asm-generic/simd.h @@ -1,6 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_SIMD_H +#define _ASM_GENERIC_SIMD_H -#include <linux/hardirq.h> +#include <linux/compiler_attributes.h> +#include <linux/preempt.h> +#include <linux/types.h> /* * may_use_simd - whether it is allowable at this time to issue SIMD @@ -13,3 +17,5 @@ static __must_check inline bool may_use_simd(void) { return !in_interrupt(); } + +#endif /* _ASM_GENERIC_SIMD_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 58a635a6d5bd..66409bc3a4e0 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -108,13 +108,13 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) #define TEXT_MAIN .text #endif #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else -#define DATA_MAIN .data +#define DATA_MAIN .data .data.rel .data.rel.local #define SDATA_MAIN .sdata #define RODATA_MAIN .rodata #define BSS_MAIN .bss diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index c497c73baf13..9eacb9fa375d 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -32,30 +32,28 @@ /* Set this bit for if virtual address destination cannot be used for DMA. */ #define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 -/* Set this bit if source is a folio. */ -#define CRYPTO_ACOMP_REQ_SRC_FOLIO 0x00000020 - -/* Set this bit if destination is a folio. */ -#define CRYPTO_ACOMP_REQ_DST_FOLIO 0x00000040 +/* Private flags that should not be touched by the user. */ +#define CRYPTO_ACOMP_REQ_PRIVATE \ + (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \ + CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA) #define CRYPTO_ACOMP_DST_MAX 131072 #define MAX_SYNC_COMP_REQSIZE 0 -#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \ +#define ACOMP_REQUEST_ON_STACK(name, tfm) \ char __##name##_req[sizeof(struct acomp_req) + \ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ struct acomp_req *name = acomp_request_on_stack_init( \ - __##name##_req, (tfm), (gfp), false) + __##name##_req, (tfm)) + +#define ACOMP_REQUEST_CLONE(name, gfp) \ + acomp_request_clone(name, sizeof(__##name##_req), gfp) struct acomp_req; struct folio; struct acomp_req_chain { - struct list_head head; - struct acomp_req *req0; - struct acomp_req *cur; - int (*op)(struct acomp_req *req); crypto_completion_t compl; void *data; struct scatterlist ssg; @@ -68,8 +66,6 @@ struct acomp_req_chain { u8 *dst; struct folio *dfolio; }; - size_t soff; - size_t doff; u32 flags; }; @@ -81,10 +77,6 @@ struct acomp_req_chain { * @dst: Destination scatterlist * @svirt: Source virtual address * @dvirt: Destination virtual address - * @sfolio: Source folio - * @soff: Source folio offset - * @dfolio: Destination folio - * @doff: Destination folio offset * @slen: Size of the input buffer * @dlen: Size of the output buffer and number of bytes produced * @chain: Private API code data, do not use @@ -95,15 +87,11 @@ struct acomp_req { union { struct scatterlist *src; const u8 *svirt; - struct folio *sfolio; }; union { struct scatterlist *dst; u8 *dvirt; - struct folio *dfolio; }; - size_t soff; - size_t doff; unsigned int slen; unsigned int dlen; @@ -126,18 +114,11 @@ struct crypto_acomp { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); unsigned int reqsize; - struct crypto_acomp *fb; struct crypto_tfm base; }; -struct crypto_acomp_stream { - spinlock_t lock; - void *ctx; -}; - #define COMP_ALG_COMMON { \ struct crypto_alg base; \ - struct crypto_acomp_stream __percpu *stream; \ } struct comp_alg_common COMP_ALG_COMMON; @@ -213,7 +194,7 @@ static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) static inline void acomp_request_set_tfm(struct acomp_req *req, struct crypto_acomp *tfm) { - req->base.tfm = crypto_acomp_tfm(tfm); + crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm)); } static inline bool acomp_is_async(struct crypto_acomp *tfm) @@ -310,6 +291,11 @@ static inline void *acomp_request_extra(struct acomp_req *req) return (void *)((char *)req + len); } +static inline bool acomp_req_on_stack(struct acomp_req *req) +{ + return crypto_req_on_stack(&req->base); +} + /** * acomp_request_free() -- zeroize and free asynchronous (de)compression * request as well as the output buffer if allocated @@ -319,7 +305,7 @@ static inline void *acomp_request_extra(struct acomp_req *req) */ static inline void acomp_request_free(struct acomp_req *req) { - if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK)) + if (!req || acomp_req_on_stack(req)) return; kfree_sensitive(req); } @@ -340,17 +326,9 @@ static inline void acomp_request_set_callback(struct acomp_req *req, crypto_completion_t cmpl, void *data) { - u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | - CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA | - CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO | - CRYPTO_TFM_REQ_ON_STACK; - - req->base.complete = cmpl; - req->base.data = data; - req->base.flags &= keep; - req->base.flags |= flgs & ~keep; - - crypto_reqchain_init(&req->base); + flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE; + flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE; + crypto_request_set_callback(&req->base, flgs, cmpl, data); } /** @@ -379,8 +357,6 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | - CRYPTO_ACOMP_REQ_SRC_FOLIO | - CRYPTO_ACOMP_REQ_DST_FOLIO | CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA); } @@ -403,7 +379,6 @@ static inline void acomp_request_set_src_sg(struct acomp_req *req, req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; - req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; } /** @@ -423,7 +398,6 @@ static inline void acomp_request_set_src_dma(struct acomp_req *req, req->slen = slen; req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; - req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; } @@ -444,7 +418,6 @@ static inline void acomp_request_set_src_nondma(struct acomp_req *req, req->svirt = src; req->slen = slen; - req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; } @@ -463,13 +436,9 @@ static inline void acomp_request_set_src_folio(struct acomp_req *req, struct folio *folio, size_t off, unsigned int len) { - req->sfolio = folio; - req->soff = off; - req->slen = len; - - req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; - req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; - req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO; + sg_init_table(&req->chain.ssg, 1); + sg_set_folio(&req->chain.ssg, folio, len, off); + acomp_request_set_src_sg(req, &req->chain.ssg, len); } /** @@ -490,7 +459,6 @@ static inline void acomp_request_set_dst_sg(struct acomp_req *req, req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; - req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; } /** @@ -510,7 +478,6 @@ static inline void acomp_request_set_dst_dma(struct acomp_req *req, req->dlen = dlen; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; - req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } @@ -530,7 +497,6 @@ static inline void acomp_request_set_dst_nondma(struct acomp_req *req, req->dvirt = dst; req->dlen = dlen; - req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } @@ -549,19 +515,9 @@ static inline void acomp_request_set_dst_folio(struct acomp_req *req, struct folio *folio, size_t off, unsigned int len) { - req->dfolio = folio; - req->doff = off; - req->dlen = len; - - req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; - req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; - req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO; -} - -static inline void acomp_request_chain(struct acomp_req *req, - struct acomp_req *head) -{ - crypto_request_chain(&req->base, &head->base); + sg_init_table(&req->chain.dsg, 1); + sg_set_folio(&req->chain.dsg, folio, len, off); + acomp_request_set_dst_sg(req, &req->chain.dsg, len); } /** @@ -587,18 +543,15 @@ int crypto_acomp_compress(struct acomp_req *req); int crypto_acomp_decompress(struct acomp_req *req); static inline struct acomp_req *acomp_request_on_stack_init( - char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly) + char *buf, struct crypto_acomp *tfm) { - struct acomp_req *req; - - if (!stackonly && (req = acomp_request_alloc(tfm, gfp))) - return req; - - req = (void *)buf; - acomp_request_set_tfm(req, tfm->fb); - req->base.flags = CRYPTO_TFM_REQ_ON_STACK; + struct acomp_req *req = (void *)buf; + crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm)); return req; } +struct acomp_req *acomp_request_clone(struct acomp_req *req, + size_t total, gfp_t gfp); + #endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 6e07bbc04089..188eface0a11 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -68,16 +68,17 @@ struct crypto_instance { struct crypto_spawn *spawns; }; - struct work_struct free_work; - void *__ctx[] CRYPTO_MINALIGN_ATTR; }; struct crypto_template { struct list_head list; struct hlist_head instances; + struct hlist_head dead; struct module *module; + struct work_struct free_work; + int (*create)(struct crypto_template *tmpl, struct rtattr **tb); char name[CRYPTO_MAX_ALG_NAME]; @@ -106,18 +107,6 @@ struct crypto_queue { unsigned int max_qlen; }; -struct scatter_walk { - /* Must be the first member, see struct skcipher_walk. */ - union { - void *const addr; - - /* Private API field, do not touch. */ - union crypto_no_such_thing *__addr; - }; - struct scatterlist *sg; - unsigned int offset; -}; - struct crypto_attr_alg { char name[CRYPTO_MAX_ALG_NAME]; }; @@ -157,8 +146,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); -int crypto_inst_setname(struct crypto_instance *inst, const char *name, - struct crypto_alg *alg); +int __crypto_inst_setname(struct crypto_instance *inst, const char *name, + const char *driver, struct crypto_alg *alg); + +#define crypto_inst_setname(inst, name, ...) \ + CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \ + inst, name, ##__VA_ARGS__) +#define crypto_inst_setname_1(inst, name, alg) \ + __crypto_inst_setname(inst, name, name, alg) +#define crypto_inst_setname_2(inst, name, driver, alg) \ + __crypto_inst_setname(inst, name, driver, alg) void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, @@ -266,14 +263,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; } -static inline bool crypto_request_chained(struct crypto_async_request *req) +static inline bool crypto_tfm_req_virt(struct crypto_tfm *tfm) { - return !list_empty(&req->list); + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_VIRT; } -static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm) +static inline u32 crypto_request_flags(struct crypto_async_request *req) { - return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN; + return req->flags & ~CRYPTO_TFM_REQ_ON_STACK; } #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h index 0c0176285349..dd7694477e50 100644 --- a/include/crypto/blake2b.h +++ b/include/crypto/blake2b.h @@ -7,10 +7,20 @@ #include <linux/types.h> #include <linux/string.h> +struct blake2b_state { + /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ + u64 h[8]; + u64 t[2]; + /* The true state ends here. The rest is temporary storage. */ + u64 f[2]; +}; + enum blake2b_lengths { BLAKE2B_BLOCK_SIZE = 128, BLAKE2B_HASH_SIZE = 64, BLAKE2B_KEY_SIZE = 64, + BLAKE2B_STATE_SIZE = offsetof(struct blake2b_state, f), + BLAKE2B_DESC_SIZE = sizeof(struct blake2b_state), BLAKE2B_160_HASH_SIZE = 20, BLAKE2B_256_HASH_SIZE = 32, @@ -18,16 +28,6 @@ enum blake2b_lengths { BLAKE2B_512_HASH_SIZE = 64, }; -struct blake2b_state { - /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */ - u64 h[8]; - u64 t[2]; - u64 f[2]; - u8 buf[BLAKE2B_BLOCK_SIZE]; - unsigned int buflen; - unsigned int outlen; -}; - enum blake2b_iv { BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL, BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL, @@ -40,7 +40,7 @@ enum blake2b_iv { }; static inline void __blake2b_init(struct blake2b_state *state, size_t outlen, - const void *key, size_t keylen) + size_t keylen) { state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen); state->h[1] = BLAKE2B_IV1; @@ -52,15 +52,6 @@ static inline void __blake2b_init(struct blake2b_state *state, size_t outlen, state->h[7] = BLAKE2B_IV7; state->t[0] = 0; state->t[1] = 0; - state->f[0] = 0; - state->f[1] = 0; - state->buflen = 0; - state->outlen = outlen; - if (keylen) { - memcpy(state->buf, key, keylen); - memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen); - state->buflen = BLAKE2B_BLOCK_SIZE; - } } #endif /* _CRYPTO_BLAKE2B_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index f8cc073bba41..91f6b4cf561c 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -16,6 +16,7 @@ #define _CRYPTO_CHACHA_H #include <linux/unaligned.h> +#include <linux/string.h> #include <linux/types.h> /* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */ @@ -25,21 +26,32 @@ #define CHACHA_BLOCK_SIZE 64 #define CHACHAPOLY_IV_SIZE 12 -#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) +#define CHACHA_KEY_WORDS 8 +#define CHACHA_STATE_WORDS 16 +#define HCHACHA_OUT_WORDS 8 /* 192-bit nonce, then 64-bit stream position */ #define XCHACHA_IV_SIZE 32 -void chacha_block_generic(u32 *state, u8 *stream, int nrounds); -static inline void chacha20_block(u32 *state, u8 *stream) +struct chacha_state { + u32 x[CHACHA_STATE_WORDS]; +}; + +void chacha_block_generic(struct chacha_state *state, + u8 out[CHACHA_BLOCK_SIZE], int nrounds); +static inline void chacha20_block(struct chacha_state *state, + u8 out[CHACHA_BLOCK_SIZE]) { - chacha_block_generic(state, stream, 20); + chacha_block_generic(state, out, 20); } -void hchacha_block_arch(const u32 *state, u32 *out, int nrounds); -void hchacha_block_generic(const u32 *state, u32 *out, int nrounds); +void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); +void hchacha_block_generic(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); -static inline void hchacha_block(const u32 *state, u32 *out, int nrounds) +static inline void hchacha_block(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) { if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) hchacha_block_arch(state, out, nrounds); @@ -54,37 +66,40 @@ enum chacha_constants { /* expand 32-byte k */ CHACHA_CONSTANT_TE_K = 0x6b206574U }; -static inline void chacha_init_consts(u32 *state) +static inline void chacha_init_consts(struct chacha_state *state) { - state[0] = CHACHA_CONSTANT_EXPA; - state[1] = CHACHA_CONSTANT_ND_3; - state[2] = CHACHA_CONSTANT_2_BY; - state[3] = CHACHA_CONSTANT_TE_K; + state->x[0] = CHACHA_CONSTANT_EXPA; + state->x[1] = CHACHA_CONSTANT_ND_3; + state->x[2] = CHACHA_CONSTANT_2_BY; + state->x[3] = CHACHA_CONSTANT_TE_K; } -static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) +static inline void chacha_init(struct chacha_state *state, + const u32 key[CHACHA_KEY_WORDS], + const u8 iv[CHACHA_IV_SIZE]) { chacha_init_consts(state); - state[4] = key[0]; - state[5] = key[1]; - state[6] = key[2]; - state[7] = key[3]; - state[8] = key[4]; - state[9] = key[5]; - state[10] = key[6]; - state[11] = key[7]; - state[12] = get_unaligned_le32(iv + 0); - state[13] = get_unaligned_le32(iv + 4); - state[14] = get_unaligned_le32(iv + 8); - state[15] = get_unaligned_le32(iv + 12); + state->x[4] = key[0]; + state->x[5] = key[1]; + state->x[6] = key[2]; + state->x[7] = key[3]; + state->x[8] = key[4]; + state->x[9] = key[5]; + state->x[10] = key[6]; + state->x[11] = key[7]; + state->x[12] = get_unaligned_le32(iv + 0); + state->x[13] = get_unaligned_le32(iv + 4); + state->x[14] = get_unaligned_le32(iv + 8); + state->x[15] = get_unaligned_le32(iv + 12); } -void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, +void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds); -void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, +void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds); -static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src, +static inline void chacha_crypt(struct chacha_state *state, + u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) @@ -93,10 +108,24 @@ static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src, chacha_crypt_generic(state, dst, src, bytes, nrounds); } -static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes) +static inline void chacha20_crypt(struct chacha_state *state, + u8 *dst, const u8 *src, unsigned int bytes) { chacha_crypt(state, dst, src, bytes, 20); } +static inline void chacha_zeroize_state(struct chacha_state *state) +{ + memzero_explicit(state, sizeof(*state)); +} + +#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA) +bool chacha_is_arch_optimized(void); +#else +static inline bool chacha_is_arch_optimized(void) +{ + return false; +} +#endif + #endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h index da1ee73e9ce9..06984a26c8cf 100644 --- a/include/crypto/ctr.h +++ b/include/crypto/ctr.h @@ -8,58 +8,8 @@ #ifndef _CRYPTO_CTR_H #define _CRYPTO_CTR_H -#include <crypto/algapi.h> -#include <crypto/internal/skcipher.h> -#include <linux/string.h> -#include <linux/types.h> - #define CTR_RFC3686_NONCE_SIZE 4 #define CTR_RFC3686_IV_SIZE 8 #define CTR_RFC3686_BLOCK_SIZE 16 -static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, - void (*fn)(struct crypto_skcipher *, - const u8 *, u8 *)) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - int blocksize = crypto_skcipher_chunksize(tfm); - u8 buf[MAX_CIPHER_BLOCKSIZE]; - struct skcipher_walk walk; - int err; - - /* avoid integer division due to variable blocksize parameter */ - if (WARN_ON_ONCE(!is_power_of_2(blocksize))) - return -EINVAL; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes > 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; - int nbytes = walk.nbytes; - int tail = 0; - - if (nbytes < walk.total) { - tail = walk.nbytes & (blocksize - 1); - nbytes -= tail; - } - - do { - int bsize = min(nbytes, blocksize); - - fn(tfm, walk.iv, buf); - - crypto_xor_cpy(dst, src, buf, bsize); - crypto_inc(walk.iv, blocksize); - - dst += bsize; - src += bsize; - nbytes -= bsize; - } while (nbytes > 0); - - err = skcipher_walk_done(&walk, tail); - } - return err; -} - #endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h index f832c9f2aca3..043d938e9a2c 100644 --- a/include/crypto/ghash.h +++ b/include/crypto/ghash.h @@ -7,18 +7,18 @@ #define __CRYPTO_GHASH_H__ #include <linux/types.h> -#include <crypto/gf128mul.h> #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 +struct gf128mul_4k; + struct ghash_ctx { struct gf128mul_4k *gf128; }; struct ghash_desc_ctx { u8 buffer[GHASH_BLOCK_SIZE]; - u32 bytes; }; #endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h index a67988316d06..6f6b9de12cd3 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -8,14 +8,17 @@ #ifndef _CRYPTO_HASH_H #define _CRYPTO_HASH_H -#include <linux/atomic.h> #include <linux/crypto.h> +#include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/string.h> /* Set this bit for virtual address instead of SG list. */ #define CRYPTO_AHASH_REQ_VIRT 0x00000001 +#define CRYPTO_AHASH_REQ_PRIVATE \ + CRYPTO_AHASH_REQ_VIRT + struct crypto_ahash; /** @@ -62,6 +65,10 @@ struct ahash_request { }; u8 *result; + struct scatterlist sg_head[2]; + crypto_completion_t saved_complete; + void *saved_data; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -82,6 +89,8 @@ struct ahash_request { * transformation object. Data processing can happen synchronously * [SHASH] or asynchronously [AHASH] at this point. Driver must not use * req->result. + * For block-only algorithms, @update must return the number + * of bytes to store in the API partial block buffer. * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the * transformation and retrieves the resulting hash from the driver and * pushes it back to upper layers. No data processing happens at this @@ -124,6 +133,10 @@ struct ahash_request { * data so the transformation can continue from this point onward. No * data processing happens at this point. Driver must not use * req->result. + * @export_core: Export partial state without partial block. Only defined + * for algorithms that are not block-only. + * @import_core: Import partial state without partial block. Only defined + * for algorithms that are not block-only. * @init_tfm: Initialize the cryptographic transformation object. * This function is called only once at the instantiation * time, right after the transformation context was @@ -136,7 +149,6 @@ struct ahash_request { * This is a counterpart to @init_tfm, used to remove * various changes set in @init_tfm. * @clone_tfm: Copy transform into new object, may allocate memory. - * @reqsize: Size of the request context. * @halg: see struct hash_alg_common */ struct ahash_alg { @@ -147,14 +159,14 @@ struct ahash_alg { int (*digest)(struct ahash_request *req); int (*export)(struct ahash_request *req, void *out); int (*import)(struct ahash_request *req, const void *in); + int (*export_core)(struct ahash_request *req, void *out); + int (*import_core)(struct ahash_request *req, const void *in); int (*setkey)(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); int (*init_tfm)(struct crypto_ahash *tfm); void (*exit_tfm)(struct crypto_ahash *tfm); int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src); - unsigned int reqsize; - struct hash_alg_common halg; }; @@ -165,17 +177,31 @@ struct shash_desc { #define HASH_MAX_DIGESTSIZE 64 +/* Worst case is sha3-224. */ +#define HASH_MAX_STATESIZE 200 + 144 + 1 + /* - * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc' - * containing a 'struct sha3_state'. + * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc' + * containing a 'struct s390_sha_ctx'. */ #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) +#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \ + HASH_MAX_DESCSIZE) #define SHASH_DESC_ON_STACK(shash, ctx) \ char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \ __aligned(__alignof__(struct shash_desc)); \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc +#define HASH_REQUEST_ON_STACK(name, _tfm) \ + char __##name##_req[sizeof(struct ahash_request) + \ + MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct ahash_request *name = \ + ahash_request_on_stack_init(__##name##_req, (_tfm)) + +#define HASH_REQUEST_CLONE(name, gfp) \ + hash_request_clone(name, sizeof(__##name##_req), gfp) + /** * struct shash_alg - synchronous message digest definition * @init: see struct ahash_alg @@ -185,6 +211,8 @@ struct shash_desc { * @digest: see struct ahash_alg * @export: see struct ahash_alg * @import: see struct ahash_alg + * @export_core: see struct ahash_alg + * @import_core: see struct ahash_alg * @setkey: see struct ahash_alg * @init_tfm: Initialize the cryptographic transformation object. * This function is called only once at the instantiation @@ -215,6 +243,8 @@ struct shash_alg { unsigned int len, u8 *out); int (*export)(struct shash_desc *desc, void *out); int (*import)(struct shash_desc *desc, const void *in); + int (*export_core)(struct shash_desc *desc, void *out); + int (*import_core)(struct shash_desc *desc, const void *in); int (*setkey)(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); int (*init_tfm)(struct crypto_shash *tfm); @@ -238,7 +268,6 @@ struct crypto_ahash { }; struct crypto_shash { - unsigned int descsize; struct crypto_tfm base; }; @@ -252,6 +281,11 @@ struct crypto_shash { * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well. */ +static inline bool ahash_req_on_stack(struct ahash_request *req) +{ + return crypto_req_on_stack(&req->base); +} + static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_ahash, base); @@ -459,7 +493,11 @@ int crypto_ahash_finup(struct ahash_request *req); * -EBUSY if queue is full and request should be resubmitted later; * other < 0 if an error occurred */ -int crypto_ahash_final(struct ahash_request *req); +static inline int crypto_ahash_final(struct ahash_request *req) +{ + req->nbytes = 0; + return crypto_ahash_finup(req); +} /** * crypto_ahash_digest() - calculate message digest for a buffer @@ -548,7 +586,7 @@ int crypto_ahash_update(struct ahash_request *req); static inline void ahash_request_set_tfm(struct ahash_request *req, struct crypto_ahash *tfm) { - req->base.tfm = crypto_ahash_tfm(tfm); + crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm)); } /** @@ -582,9 +620,12 @@ static inline struct ahash_request *ahash_request_alloc_noprof( * ahash_request_free() - zeroize and free the request data structure * @req: request data structure cipher handle to be freed */ -static inline void ahash_request_free(struct ahash_request *req) +void ahash_request_free(struct ahash_request *req); + +static inline void ahash_request_zero(struct ahash_request *req) { - kfree_sensitive(req); + memzero_explicit(req, sizeof(*req) + + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); } static inline struct ahash_request *ahash_request_cast( @@ -623,14 +664,9 @@ static inline void ahash_request_set_callback(struct ahash_request *req, crypto_completion_t compl, void *data) { - u32 keep = CRYPTO_AHASH_REQ_VIRT; - - req->base.complete = compl; - req->base.data = data; - flags &= ~keep; - req->base.flags &= keep; - req->base.flags |= flags; - crypto_reqchain_init(&req->base); + flags &= ~CRYPTO_AHASH_REQ_PRIVATE; + flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE; + crypto_request_set_callback(&req->base, flags, compl, data); } /** @@ -679,12 +715,6 @@ static inline void ahash_request_set_virt(struct ahash_request *req, req->base.flags |= CRYPTO_AHASH_REQ_VIRT; } -static inline void ahash_request_chain(struct ahash_request *req, - struct ahash_request *head) -{ - crypto_request_chain(&req->base, &head->base); -} - /** * DOC: Synchronous Message Digest API * @@ -820,7 +850,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) */ static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) { - return tfm->descsize; + return crypto_shash_alg(tfm)->descsize; } static inline void *shash_desc_ctx(struct shash_desc *desc) @@ -838,7 +868,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc) * cipher handle must point to a keyed message digest cipher in order for this * function to succeed. * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, @@ -855,7 +885,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, * crypto_shash_update and crypto_shash_final. The parameters have the same * meaning as discussed for those separate three functions. * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ @@ -875,12 +905,15 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, * directly, and it allocates a hash descriptor on the stack internally. * Note that this stack allocation may be fairly large. * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 on success; < 0 if an error occurred. */ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, unsigned int len, u8 *out); +int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data, + unsigned int len, u8 *out); + /** * crypto_shash_export() - extract operational state for message digest * @desc: reference to the operational state handle whose state is exported @@ -890,7 +923,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, * caller-allocated output buffer out which must have sufficient size (e.g. by * calling crypto_shash_descsize). * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 if the export creation was successful; < 0 if an error occurred */ int crypto_shash_export(struct shash_desc *desc, void *out); @@ -904,7 +937,7 @@ int crypto_shash_export(struct shash_desc *desc, void *out); * the input buffer. That buffer should have been generated with the * crypto_ahash_export function. * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 if the import was successful; < 0 if an error occurred */ int crypto_shash_import(struct shash_desc *desc, const void *in); @@ -917,19 +950,29 @@ int crypto_shash_import(struct shash_desc *desc, const void *in); * operational state handle. Any potentially existing state created by * previous operations is discarded. * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 if the message digest initialization was successful; < 0 if an * error occurred */ -static inline int crypto_shash_init(struct shash_desc *desc) -{ - struct crypto_shash *tfm = desc->tfm; +int crypto_shash_init(struct shash_desc *desc); - if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_shash_alg(tfm)->init(desc); -} +/** + * crypto_shash_finup() - calculate message digest of buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Context: Softirq or process context. + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ +int crypto_shash_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); /** * crypto_shash_update() - add data to message digest for processing @@ -939,12 +982,15 @@ static inline int crypto_shash_init(struct shash_desc *desc) * * Updates the message digest state of the operational state handle. * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 if the message digest update was successful; < 0 if an error * occurred */ -int crypto_shash_update(struct shash_desc *desc, const u8 *data, - unsigned int len); +static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return crypto_shash_finup(desc, data, len, NULL); +} /** * crypto_shash_final() - calculate message digest @@ -956,29 +1002,14 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data, * into the output buffer. The caller must ensure that the output buffer is * large enough by using crypto_shash_digestsize. * - * Context: Any context. - * Return: 0 if the message digest creation was successful; < 0 if an error - * occurred - */ -int crypto_shash_final(struct shash_desc *desc, u8 *out); - -/** - * crypto_shash_finup() - calculate message digest of buffer - * @desc: see crypto_shash_final() - * @data: see crypto_shash_update() - * @len: see crypto_shash_update() - * @out: see crypto_shash_final() - * - * This function is a "short-hand" for the function calls of - * crypto_shash_update and crypto_shash_final. The parameters have the same - * meaning as discussed for those separate functions. - * - * Context: Any context. + * Context: Softirq or process context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ -int crypto_shash_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out); +static inline int crypto_shash_final(struct shash_desc *desc, u8 *out) +{ + return crypto_shash_finup(desc, NULL, 0, out); +} static inline void shash_desc_zero(struct shash_desc *desc) { @@ -986,14 +1017,25 @@ static inline void shash_desc_zero(struct shash_desc *desc) sizeof(*desc) + crypto_shash_descsize(desc->tfm)); } -static inline int ahash_request_err(struct ahash_request *req) +static inline bool ahash_is_async(struct crypto_ahash *tfm) { - return req->base.err; + return crypto_tfm_is_async(&tfm->base); } -static inline bool ahash_is_async(struct crypto_ahash *tfm) +static inline struct ahash_request *ahash_request_on_stack_init( + char *buf, struct crypto_ahash *tfm) { - return crypto_tfm_is_async(&tfm->base); + struct ahash_request *req = (void *)buf; + + crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm)); + return req; +} + +static inline struct ahash_request *ahash_request_clone( + struct ahash_request *req, size_t total, gfp_t gfp) +{ + return container_of(crypto_request_clone(&req->base, total, gfp), + struct ahash_request, base); } #endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index aaf59f3236fa..ffffd88bbbad 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -11,12 +11,17 @@ #include <crypto/acompress.h> #include <crypto/algapi.h> +#include <crypto/scatterwalk.h> +#include <linux/compiler_types.h> +#include <linux/cpumask_types.h> +#include <linux/spinlock.h> +#include <linux/workqueue_types.h> -#define ACOMP_REQUEST_ON_STACK(name, tfm) \ +#define ACOMP_FBREQ_ON_STACK(name, req) \ char __##name##_req[sizeof(struct acomp_req) + \ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ - struct acomp_req *name = acomp_request_on_stack_init( \ - __##name##_req, (tfm), 0, true) + struct acomp_req *name = acomp_fbreq_on_stack_init( \ + __##name##_req, (req)) /** * struct acomp_alg - asynchronous compression algorithm @@ -35,9 +40,7 @@ * counterpart to @init, used to remove various changes set in * @init. * - * @reqsize: Context size for (de)compression requests * @base: Common crypto API algorithm data structure - * @stream: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with scomp */ struct acomp_alg { @@ -46,14 +49,61 @@ struct acomp_alg { int (*init)(struct crypto_acomp *tfm); void (*exit)(struct crypto_acomp *tfm); - unsigned int reqsize; - union { struct COMP_ALG_COMMON; struct comp_alg_common calg; }; }; +struct crypto_acomp_stream { + spinlock_t lock; + void *ctx; +}; + +struct crypto_acomp_streams { + /* These must come first because of struct scomp_alg. */ + void *(*alloc_ctx)(void); + union { + void (*free_ctx)(void *); + void (*cfree_ctx)(const void *); + }; + + struct crypto_acomp_stream __percpu *streams; + struct work_struct stream_work; + cpumask_t stream_want; +}; + +struct acomp_walk { + union { + /* Virtual address of the source. */ + struct { + struct { + const void *const addr; + } virt; + } src; + + /* Private field for the API, do not use. */ + struct scatter_walk in; + }; + + union { + /* Virtual address of the destination. */ + struct { + struct { + void *const addr; + } virt; + } dst; + + /* Private field for the API, do not use. */ + struct scatter_walk out; + }; + + unsigned int slen; + unsigned int dlen; + + int flags; +}; + /* * Transform internal helpers. */ @@ -98,17 +148,10 @@ void crypto_unregister_acomp(struct acomp_alg *alg); int crypto_register_acomps(struct acomp_alg *algs, int count); void crypto_unregister_acomps(struct acomp_alg *algs, int count); -static inline bool acomp_request_chained(struct acomp_req *req) -{ - return crypto_request_chained(&req->base); -} - static inline bool acomp_request_issg(struct acomp_req *req) { return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | - CRYPTO_ACOMP_REQ_DST_VIRT | - CRYPTO_ACOMP_REQ_SRC_FOLIO | - CRYPTO_ACOMP_REQ_DST_FOLIO)); + CRYPTO_ACOMP_REQ_DST_VIRT)); } static inline bool acomp_request_src_isvirt(struct acomp_req *req) @@ -143,19 +186,62 @@ static inline bool acomp_request_isnondma(struct acomp_req *req) CRYPTO_ACOMP_REQ_DST_NONDMA); } -static inline bool acomp_request_src_isfolio(struct acomp_req *req) +static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm) +{ + return crypto_tfm_req_virt(&tfm->base); +} + +void crypto_acomp_free_streams(struct crypto_acomp_streams *s); +int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s); + +struct crypto_acomp_stream *crypto_acomp_lock_stream_bh( + struct crypto_acomp_streams *s) __acquires(stream); + +static inline void crypto_acomp_unlock_stream_bh( + struct crypto_acomp_stream *stream) __releases(stream) +{ + spin_unlock_bh(&stream->lock); +} + +void acomp_walk_done_src(struct acomp_walk *walk, int used); +void acomp_walk_done_dst(struct acomp_walk *walk, int used); +int acomp_walk_next_src(struct acomp_walk *walk); +int acomp_walk_next_dst(struct acomp_walk *walk); +int acomp_walk_virt(struct acomp_walk *__restrict walk, + struct acomp_req *__restrict req, bool atomic); + +static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur) +{ + return walk->slen != cur; +} + +static inline u32 acomp_request_flags(struct acomp_req *req) { - return req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO; + return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE; } -static inline bool acomp_request_dst_isfolio(struct acomp_req *req) +static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm) { - return req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO; + return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb); } -static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm) +static inline struct acomp_req *acomp_fbreq_on_stack_init( + char *buf, struct acomp_req *old) { - return crypto_tfm_req_chain(&tfm->base); + struct crypto_acomp *tfm = crypto_acomp_reqtfm(old); + struct acomp_req *req = (void *)buf; + + crypto_stack_request_init(&req->base, + crypto_acomp_tfm(crypto_acomp_fb(tfm))); + acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL); + req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE; + req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE; + req->src = old->src; + req->dst = old->dst; + req->slen = old->slen; + req->dlen = old->dlen; + + return req; } #endif diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h index 982fe5e8471c..3e09e2485306 100644 --- a/include/crypto/internal/blake2b.h +++ b/include/crypto/internal/blake2b.h @@ -7,65 +7,36 @@ #ifndef _CRYPTO_INTERNAL_BLAKE2B_H #define _CRYPTO_INTERNAL_BLAKE2B_H +#include <asm/byteorder.h> #include <crypto/blake2b.h> #include <crypto/internal/hash.h> +#include <linux/array_size.h> +#include <linux/compiler.h> +#include <linux/build_bug.h> +#include <linux/errno.h> +#include <linux/math.h> #include <linux/string.h> - -void blake2b_compress_generic(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc); +#include <linux/types.h> static inline void blake2b_set_lastblock(struct blake2b_state *state) { state->f[0] = -1; + state->f[1] = 0; } -typedef void (*blake2b_compress_t)(struct blake2b_state *state, - const u8 *block, size_t nblocks, u32 inc); - -static inline void __blake2b_update(struct blake2b_state *state, - const u8 *in, size_t inlen, - blake2b_compress_t compress) +static inline void blake2b_set_nonlast(struct blake2b_state *state) { - const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; - } - if (inlen > BLAKE2B_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE); - in += BLAKE2B_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; + state->f[0] = 0; + state->f[1] = 0; } -static inline void __blake2b_final(struct blake2b_state *state, u8 *out, - blake2b_compress_t compress) -{ - int i; - - blake2b_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */ - (*compress)(state, state->buf, 1, state->buflen); - for (i = 0; i < ARRAY_SIZE(state->h); i++) - __cpu_to_le64s(&state->h[i]); - memcpy(out, state->h, state->outlen); -} +typedef void (*blake2b_compress_t)(struct blake2b_state *state, + const u8 *block, size_t nblocks, u32 inc); /* Helper functions for shash implementations of BLAKE2b */ struct blake2b_tfm_ctx { - u8 key[BLAKE2B_KEY_SIZE]; + u8 key[BLAKE2B_BLOCK_SIZE]; unsigned int keylen; }; @@ -74,10 +45,13 @@ static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, { struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); - if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE) + if (keylen > BLAKE2B_KEY_SIZE) return -EINVAL; + BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE); + memcpy(tctx->key, key, keylen); + memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen); tctx->keylen = keylen; return 0; @@ -89,26 +63,38 @@ static inline int crypto_blake2b_init(struct shash_desc *desc) struct blake2b_state *state = shash_desc_ctx(desc); unsigned int outlen = crypto_shash_digestsize(desc->tfm); - __blake2b_init(state, outlen, tctx->key, tctx->keylen); - return 0; + __blake2b_init(state, outlen, tctx->keylen); + return tctx->keylen ? + crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0; } -static inline int crypto_blake2b_update(struct shash_desc *desc, - const u8 *in, unsigned int inlen, - blake2b_compress_t compress) +static inline int crypto_blake2b_update_bo(struct shash_desc *desc, + const u8 *in, unsigned int inlen, + blake2b_compress_t compress) { struct blake2b_state *state = shash_desc_ctx(desc); - __blake2b_update(state, in, inlen, compress); - return 0; + blake2b_set_nonlast(state); + compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE); + return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE); } -static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out, +static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in, + unsigned int inlen, u8 *out, blake2b_compress_t compress) { struct blake2b_state *state = shash_desc_ctx(desc); + u8 buf[BLAKE2B_BLOCK_SIZE]; + int i; - __blake2b_final(state, out, compress); + memcpy(buf, in, inlen); + memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen); + blake2b_set_lastblock(state); + compress(state, buf, 1, inlen); + for (i = 0; i < ARRAY_SIZE(state->h); i++) + __cpu_to_le64s(&state->h[i]); + memcpy(out, state->h, crypto_shash_digestsize(desc->tfm)); + memzero_explicit(buf, sizeof(buf)); return 0; } diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h new file mode 100644 index 000000000000..52d9d4c82493 --- /dev/null +++ b/include/crypto/internal/blockhash.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Handle partial blocks for block hash. + * + * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> + * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H +#define _CRYPTO_INTERNAL_BLOCKHASH_H + +#include <linux/string.h> +#include <linux/types.h> + +#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \ + buf, buflen) \ + ({ \ + typeof(block_fn) *_block_fn = &(block_fn); \ + typeof(state + 0) _state = (state); \ + unsigned int _buflen = (buflen); \ + size_t _nbytes = (nbytes); \ + unsigned int _bs = (bs); \ + const u8 *_src = (src); \ + u8 *_buf = (buf); \ + while ((_buflen + _nbytes) >= _bs) { \ + const u8 *data = _src; \ + size_t len = _nbytes; \ + size_t blocks; \ + int remain; \ + if (_buflen) { \ + remain = _bs - _buflen; \ + memcpy(_buf + _buflen, _src, remain); \ + data = _buf; \ + len = _bs; \ + } \ + remain = len % bs; \ + blocks = (len - remain) / (dv); \ + (*_block_fn)(_state, data, blocks); \ + _src += len - remain - _buflen; \ + _nbytes -= len - remain - _buflen; \ + _buflen = 0; \ + } \ + memcpy(_buf + _buflen, _src, _nbytes); \ + _buflen += _nbytes; \ + }) + +#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \ + BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen) +#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \ + BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen) + +#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */ diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h deleted file mode 100644 index b085dc1ac151..000000000000 --- a/include/crypto/internal/chacha.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef _CRYPTO_INTERNAL_CHACHA_H -#define _CRYPTO_INTERNAL_CHACHA_H - -#include <crypto/chacha.h> -#include <crypto/internal/skcipher.h> -#include <linux/crypto.h> - -struct chacha_ctx { - u32 key[8]; - int nrounds; -}; - -static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize, int nrounds) -{ - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - int i; - - if (keysize != CHACHA_KEY_SIZE) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(ctx->key); i++) - ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); - - ctx->nrounds = nrounds; - return 0; -} - -static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize) -{ - return chacha_setkey(tfm, key, keysize, 20); -} - -static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize) -{ - return chacha_setkey(tfm, key, keysize, 12); -} - -#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h index fbf4be56cf12..b6a4ea2240fc 100644 --- a/include/crypto/internal/engine.h +++ b/include/crypto/internal/engine.h @@ -27,10 +27,10 @@ struct device; * @retry_support: indication that the hardware allows re-execution * of a failed backlog request * crypto-engine, in head position to keep order + * @rt: whether this queue is set to run as a realtime task * @list: link with the global crypto engine list * @queue_lock: spinlock to synchronise access to request queue * @queue: the crypto queue of the engine - * @rt: whether this queue is set to run as a realtime task * @prepare_crypt_hardware: a request will soon arrive from the queue * so the subsystem requests the driver to prepare the hardware * by issuing this call @@ -51,14 +51,13 @@ struct crypto_engine { bool running; bool retry_support; + bool rt; struct list_head list; spinlock_t queue_lock; struct crypto_queue queue; struct device *dev; - bool rt; - int (*prepare_crypt_hardware)(struct crypto_engine *engine); int (*unprepare_crypt_hardware)(struct crypto_engine *engine); int (*do_batch_requests)(struct crypto_engine *engine); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 7fd7126f593a..012f5fb22d43 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -15,7 +15,6 @@ struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; - struct crypto_sync_skcipher *sknull; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 052ac7924af3..0f85c543f80b 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -11,6 +11,24 @@ #include <crypto/algapi.h> #include <crypto/hash.h> +/* Set this bit to handle partial blocks in the API. */ +#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000 + +/* Set this bit if final requires at least one byte. */ +#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000 + +/* Set this bit if finup can deal with multiple blocks. */ +#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000 + +/* This bit is set by the Crypto API if export_core is not supported. */ +#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000 + +#define HASH_FBREQ_ON_STACK(name, req) \ + char __##name##_req[sizeof(struct ahash_request) + \ + MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct ahash_request *name = ahash_fbreq_on_stack_init( \ + __##name##_req, (req)) + struct ahash_request; struct ahash_instance { @@ -49,6 +67,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count); void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); +void ahash_free_singlespawn_instance(struct ahash_instance *inst); int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); @@ -58,12 +77,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) { return crypto_shash_alg_has_setkey(alg) && !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); } +static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg) +{ + return crypto_hash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); +} + int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); @@ -187,7 +214,7 @@ static inline void ahash_request_complete(struct ahash_request *req, int err) static inline u32 ahash_request_flags(struct ahash_request *req) { - return req->base.flags; + return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE; } static inline struct crypto_ahash *crypto_spawn_ahash( @@ -247,20 +274,96 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_shash, base); } -static inline bool ahash_request_chained(struct ahash_request *req) +static inline bool ahash_request_isvirt(struct ahash_request *req) { - return false; + return req->base.flags & CRYPTO_AHASH_REQ_VIRT; } -static inline bool ahash_request_isvirt(struct ahash_request *req) +static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm) { - return req->base.flags & CRYPTO_AHASH_REQ_VIRT; + return crypto_tfm_req_virt(&tfm->base); } -static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm) +static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm) { - return crypto_tfm_req_chain(&tfm->base); + return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb); } +static inline struct ahash_request *ahash_fbreq_on_stack_init( + char *buf, struct ahash_request *old) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(old); + struct ahash_request *req = (void *)buf; + + crypto_stack_request_init(&req->base, + crypto_ahash_tfm(crypto_ahash_fb(tfm))); + ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL); + req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE; + req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE; + req->src = old->src; + req->result = old->result; + req->nbytes = old->nbytes; + + return req; +} + +/* Return the state size without partial block for block-only algorithms. */ +static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm) +{ + return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1; +} + +/* This can only be used if the request was never cloned. */ +#define HASH_REQUEST_ZERO(name) \ + memzero_explicit(__##name##_req, sizeof(__##name##_req)) + +/** + * crypto_ahash_export_core() - extract core state for message digest + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * Export the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +int crypto_ahash_export_core(struct ahash_request *req, void *out); + +/** + * crypto_ahash_import_core() - import core state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * Import the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +int crypto_ahash_import_core(struct ahash_request *req, const void *in); + +/** + * crypto_shash_export_core() - extract core state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * Export the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ +int crypto_shash_export_core(struct shash_desc *desc, void *out); + +/** + * crypto_shash_import_core() - import core state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * Import the hash state without the partial block buffer. + * + * Context: Softirq or process context. + * Return: 0 if the import was successful; < 0 if an error occurred + */ +int crypto_shash_import_core(struct shash_desc *desc, const void *in); + #endif /* _CRYPTO_INTERNAL_HASH_H */ diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h index e614594f88c1..c60315f47562 100644 --- a/include/crypto/internal/poly1305.h +++ b/include/crypto/internal/poly1305.h @@ -6,9 +6,8 @@ #ifndef _CRYPTO_INTERNAL_POLY1305_H #define _CRYPTO_INTERNAL_POLY1305_H -#include <linux/unaligned.h> -#include <linux/types.h> #include <crypto/poly1305.h> +#include <linux/types.h> /* * Poly1305 core functions. These only accept whole blocks; the caller must @@ -31,4 +30,29 @@ void poly1305_core_blocks(struct poly1305_state *state, void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], void *dst); +void poly1305_block_init_arch(struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +void poly1305_block_init_generic(struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src, + unsigned int len, u32 padbit); + +static inline void poly1305_blocks_generic(struct poly1305_block_state *state, + const u8 *src, unsigned int len, + u32 padbit) +{ + poly1305_core_blocks(&state->h, &state->core_r, src, + len / POLY1305_BLOCK_SIZE, padbit); +} + +void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], const u32 nonce[4]); + +static inline void poly1305_emit_generic(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]) +{ + poly1305_core_emit(state, nonce, digest); +} + #endif diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index f25aa2ea3b48..533d6c16a491 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -9,10 +9,7 @@ #ifndef _CRYPTO_SCOMP_INT_H #define _CRYPTO_SCOMP_INT_H -#include <crypto/acompress.h> -#include <crypto/algapi.h> - -struct acomp_req; +#include <crypto/internal/acompress.h> struct crypto_scomp { struct crypto_tfm base; @@ -26,12 +23,10 @@ struct crypto_scomp { * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation * @base: Common crypto API algorithm data structure - * @stream: Per-cpu memory for algorithm + * @streams: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { - void *(*alloc_ctx)(void); - void (*free_ctx)(void *ctx); int (*compress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); @@ -40,6 +35,14 @@ struct scomp_alg { void *ctx); union { + struct { + void *(*alloc_ctx)(void); + void (*free_ctx)(void *ctx); + }; + struct crypto_acomp_streams streams; + }; + + union { struct COMP_ALG_COMMON; struct comp_alg_common calg; }; diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h new file mode 100644 index 000000000000..b9bccd3ff57f --- /dev/null +++ b/include/crypto/internal/sha2.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _CRYPTO_INTERNAL_SHA2_H +#define _CRYPTO_INTERNAL_SHA2_H + +#include <crypto/internal/simd.h> +#include <crypto/sha2.h> +#include <linux/compiler_attributes.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/unaligned.h> + +#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) +bool sha256_is_arch_optimized(void); +#else +static inline bool sha256_is_arch_optimized(void) +{ + return false; +} +#endif +void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); +void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); + +static inline void sha256_choose_blocks( + u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks, + bool force_generic, bool force_simd) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic) + sha256_blocks_generic(state, data, nblocks); + else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) && + (force_simd || crypto_simd_usable())) + sha256_blocks_simd(state, data, nblocks); + else + sha256_blocks_arch(state, data, nblocks); +} + +static __always_inline void sha256_finup( + struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE], + size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size, + bool force_generic, bool force_simd) +{ + const size_t bit_offset = SHA256_BLOCK_SIZE - 8; + __be64 *bits = (__be64 *)&buf[bit_offset]; + int i; + + buf[len++] = 0x80; + if (len > bit_offset) { + memset(&buf[len], 0, SHA256_BLOCK_SIZE - len); + sha256_choose_blocks(sctx->state, buf, 1, force_generic, + force_simd); + len = 0; + } + + memset(&buf[len], 0, bit_offset - len); + *bits = cpu_to_be64(sctx->count << 3); + sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd); + + for (i = 0; i < digest_size; i += 4) + put_unaligned_be32(sctx->state[i / 4], out + i); +} + +#endif /* _CRYPTO_INTERNAL_SHA2_H */ diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h index be97b97a75dd..7e7f1ac3b7fd 100644 --- a/include/crypto/internal/simd.h +++ b/include/crypto/internal/simd.h @@ -6,6 +6,7 @@ #ifndef _CRYPTO_INTERNAL_SIMD_H #define _CRYPTO_INTERNAL_SIMD_H +#include <asm/simd.h> #include <linux/percpu.h> #include <linux/types.h> @@ -43,14 +44,9 @@ void simd_unregister_aeads(struct aead_alg *algs, int count, * * This delegates to may_use_simd(), except that this also returns false if SIMD * in crypto code has been temporarily disabled on this CPU by the crypto - * self-tests, in order to test the no-SIMD fallback code. This override is - * currently limited to configurations where the extra self-tests are enabled, - * because it might be a bit too invasive to be part of the regular self-tests. - * - * This is a macro so that <asm/simd.h>, which some architectures don't have, - * doesn't have to be included directly here. + * self-tests, in order to test the no-SIMD fallback code. */ -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +#ifdef CONFIG_CRYPTO_SELFTESTS DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test); #define crypto_simd_usable() \ (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test)) diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index a958ab0636ad..d5aa535263f6 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -10,6 +10,7 @@ #include <crypto/algapi.h> #include <crypto/internal/cipher.h> +#include <crypto/scatterwalk.h> #include <crypto/skcipher.h> #include <linux/types.h> @@ -54,48 +55,6 @@ struct crypto_lskcipher_spawn { struct crypto_spawn base; }; -struct skcipher_walk { - union { - /* Virtual address of the source. */ - struct { - struct { - const void *const addr; - } virt; - } src; - - /* Private field for the API, do not use. */ - struct scatter_walk in; - }; - - unsigned int nbytes; - - union { - /* Virtual address of the destination. */ - struct { - struct { - void *const addr; - } virt; - } dst; - - /* Private field for the API, do not use. */ - struct scatter_walk out; - }; - - unsigned int total; - - u8 *page; - u8 *buffer; - u8 *oiv; - void *iv; - - unsigned int ivsize; - - int flags; - unsigned int blocksize; - unsigned int stride; - unsigned int alignmask; -}; - static inline struct crypto_instance *skcipher_crypto_instance( struct skcipher_instance *inst) { @@ -212,7 +171,6 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); int lskcipher_register_instance(struct crypto_template *tmpl, struct lskcipher_instance *inst); -int skcipher_walk_done(struct skcipher_walk *walk, int res); int skcipher_walk_virt(struct skcipher_walk *__restrict walk, struct skcipher_request *__restrict req, bool atomic); @@ -223,11 +181,6 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, struct aead_request *__restrict req, bool atomic); -static inline void skcipher_walk_abort(struct skcipher_walk *walk) -{ - skcipher_walk_done(walk, -ECANCELED); -} - static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) { return crypto_tfm_ctx(&tfm->base); diff --git a/include/crypto/md5.h b/include/crypto/md5.h index cf9e9dec3d21..198b5d69b92f 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h @@ -8,6 +8,7 @@ #define MD5_HMAC_BLOCK_SIZE 64 #define MD5_BLOCK_WORDS 16 #define MD5_HASH_WORDS 4 +#define MD5_STATE_SIZE 24 #define MD5_H0 0x67452301UL #define MD5_H1 0xefcdab89UL @@ -18,8 +19,8 @@ extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; struct md5_state { u32 hash[MD5_HASH_WORDS]; - u32 block[MD5_BLOCK_WORDS]; u64 byte_count; + u32 block[MD5_BLOCK_WORDS]; }; #endif diff --git a/include/crypto/null.h b/include/crypto/null.h index 0ef577cc00e3..1c66abf9de3b 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -9,7 +9,4 @@ #define NULL_DIGEST_SIZE 0 #define NULL_IV_SIZE 0 -struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void); -void crypto_put_default_null_skcipher(void); - #endif diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index 090692ec3bc7..e54abda8cfe9 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -7,7 +7,6 @@ #define _CRYPTO_POLY1305_H #include <linux/types.h> -#include <linux/crypto.h> #define POLY1305_BLOCK_SIZE 16 #define POLY1305_KEY_SIZE 32 @@ -38,17 +37,8 @@ struct poly1305_state { }; }; -struct poly1305_desc_ctx { - /* partial buffer */ - u8 buf[POLY1305_BLOCK_SIZE]; - /* bytes used in partial buffer */ - unsigned int buflen; - /* how many keys have been set in r[] */ - unsigned short rset; - /* whether s[] has been set */ - bool sset; - /* finalize key */ - u32 s[4]; +/* Combined state for block function. */ +struct poly1305_block_state { /* accumulator */ struct poly1305_state h; /* key */ @@ -58,42 +48,29 @@ struct poly1305_desc_ctx { }; }; -void poly1305_init_arch(struct poly1305_desc_ctx *desc, - const u8 key[POLY1305_KEY_SIZE]); -void poly1305_init_generic(struct poly1305_desc_ctx *desc, - const u8 key[POLY1305_KEY_SIZE]); - -static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) - poly1305_init_arch(desc, key); - else - poly1305_init_generic(desc, key); -} - -void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src, - unsigned int nbytes); -void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, - unsigned int nbytes); - -static inline void poly1305_update(struct poly1305_desc_ctx *desc, - const u8 *src, unsigned int nbytes) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) - poly1305_update_arch(desc, src, nbytes); - else - poly1305_update_generic(desc, src, nbytes); -} +struct poly1305_desc_ctx { + /* partial buffer */ + u8 buf[POLY1305_BLOCK_SIZE]; + /* bytes used in partial buffer */ + unsigned int buflen; + /* finalize key */ + u32 s[4]; + struct poly1305_block_state state; +}; -void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest); -void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest); +void poly1305_init(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); +void poly1305_update(struct poly1305_desc_ctx *desc, + const u8 *src, unsigned int nbytes); +void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest); -static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest) +#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305) +bool poly1305_is_arch_optimized(void); +#else +static inline bool poly1305_is_arch_optimized(void) { - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) - poly1305_final_arch(desc, digest); - else - poly1305_final_generic(desc, digest); + return false; } +#endif #endif diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h index 1d630f371f77..d2e63743e592 100644 --- a/include/crypto/polyval.h +++ b/include/crypto/polyval.h @@ -8,15 +8,7 @@ #ifndef _CRYPTO_POLYVAL_H #define _CRYPTO_POLYVAL_H -#include <linux/types.h> -#include <linux/crypto.h> - #define POLYVAL_BLOCK_SIZE 16 #define POLYVAL_DIGEST_SIZE 16 -void polyval_mul_non4k(u8 *op1, const u8 *op2); - -void polyval_update_non4k(const u8 *key, const u8 *in, - size_t nblocks, u8 *accumulator); - #endif diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 5ac4388f50e1..f8224cc390f8 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -102,12 +102,10 @@ static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg) } /** - * crypto_rng_alg - obtain name of RNG - * @tfm: cipher handle - * - * Return the generic name (cra_name) of the initialized random number generator + * crypto_rng_alg() - obtain 'struct rng_alg' pointer from RNG handle + * @tfm: RNG handle * - * Return: generic name string + * Return: Pointer to 'struct rng_alg', derived from @tfm RNG handle */ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) { diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 94a8585f26b2..15ab743f68c8 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -11,11 +11,64 @@ #ifndef _CRYPTO_SCATTERWALK_H #define _CRYPTO_SCATTERWALK_H -#include <crypto/algapi.h> - +#include <linux/errno.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/scatterlist.h> +#include <linux/types.h> + +struct scatter_walk { + /* Must be the first member, see struct skcipher_walk. */ + union { + void *const addr; + + /* Private API field, do not touch. */ + union crypto_no_such_thing *__addr; + }; + struct scatterlist *sg; + unsigned int offset; +}; + +struct skcipher_walk { + union { + /* Virtual address of the source. */ + struct { + struct { + const void *const addr; + } virt; + } src; + + /* Private field for the API, do not use. */ + struct scatter_walk in; + }; + + union { + /* Virtual address of the destination. */ + struct { + struct { + void *const addr; + } virt; + } dst; + + /* Private field for the API, do not use. */ + struct scatter_walk out; + }; + + unsigned int nbytes; + unsigned int total; + + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + + unsigned int ivsize; + + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; static inline void scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, int num) @@ -243,4 +296,12 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len); +int skcipher_walk_first(struct skcipher_walk *walk, bool atomic); +int skcipher_walk_done(struct skcipher_walk *walk, int res); + +static inline void skcipher_walk_abort(struct skcipher_walk *walk) +{ + skcipher_walk_done(walk, -ECANCELED); +} + #endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h index 044ecea60ac8..f48230b1413c 100644 --- a/include/crypto/sha1.h +++ b/include/crypto/sha1.h @@ -10,6 +10,7 @@ #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 +#define SHA1_STATE_SIZE offsetof(struct sha1_state, buffer) #define SHA1_H0 0x67452301UL #define SHA1_H1 0xefcdab89UL @@ -25,14 +26,6 @@ struct sha1_state { u8 buffer[SHA1_BLOCK_SIZE]; }; -struct shash_desc; - -extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len); - -extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash); - /* * An implementation of SHA-1's compression function. Don't use in new code! * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h index 0c342ed0d038..62701d136c79 100644 --- a/include/crypto/sha1_base.h +++ b/include/crypto/sha1_base.h @@ -10,10 +10,9 @@ #include <crypto/internal/hash.h> #include <crypto/sha1.h> -#include <linux/crypto.h> -#include <linux/module.h> +#include <linux/math.h> #include <linux/string.h> - +#include <linux/types.h> #include <linux/unaligned.h> typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks); @@ -32,63 +31,38 @@ static inline int sha1_base_init(struct shash_desc *desc) return 0; } -static inline int sha1_base_do_update(struct shash_desc *desc, - const u8 *data, - unsigned int len, - sha1_block_fn *block_fn) +static inline int sha1_base_do_update_blocks(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha1_block_fn *block_fn) { + unsigned int remain = len - round_down(len, SHA1_BLOCK_SIZE); struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; - - sctx->count += len; - - if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { - int blocks; - - if (partial) { - int p = SHA1_BLOCK_SIZE - partial; - - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - - block_fn(sctx, sctx->buffer, 1); - } - blocks = len / SHA1_BLOCK_SIZE; - len %= SHA1_BLOCK_SIZE; - - if (blocks) { - block_fn(sctx, data, blocks); - data += blocks * SHA1_BLOCK_SIZE; - } - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); - - return 0; + sctx->count += len - remain; + block_fn(sctx, data, len / SHA1_BLOCK_SIZE); + return remain; } -static inline int sha1_base_do_finalize(struct shash_desc *desc, - sha1_block_fn *block_fn) +static inline int sha1_base_do_finup(struct shash_desc *desc, + const u8 *src, unsigned int len, + sha1_block_fn *block_fn) { - const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64); + unsigned int bit_offset = SHA1_BLOCK_SIZE / 8 - 1; struct sha1_state *sctx = shash_desc_ctx(desc); - __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); - unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; - - sctx->buffer[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial); - partial = 0; - - block_fn(sctx, sctx->buffer, 1); - } - - memset(sctx->buffer + partial, 0x0, bit_offset - partial); - *bits = cpu_to_be64(sctx->count << 3); - block_fn(sctx, sctx->buffer, 1); + union { + __be64 b64[SHA1_BLOCK_SIZE / 4]; + u8 u8[SHA1_BLOCK_SIZE * 2]; + } block = {}; + + if (len >= bit_offset * 8) + bit_offset += SHA1_BLOCK_SIZE / 8; + memcpy(&block, src, len); + block.u8[len] = 0x80; + sctx->count += len; + block.b64[bit_offset] = cpu_to_be64(sctx->count << 3); + block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SHA1_BLOCK_SIZE); + memzero_explicit(&block, sizeof(block)); return 0; } @@ -102,7 +76,6 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], digest++); - memzero_explicit(sctx, sizeof(*sctx)); return 0; } diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h index b9e9281d76c9..4912572578dc 100644 --- a/include/crypto/sha2.h +++ b/include/crypto/sha2.h @@ -13,12 +13,14 @@ #define SHA256_DIGEST_SIZE 32 #define SHA256_BLOCK_SIZE 64 +#define SHA256_STATE_WORDS 8 #define SHA384_DIGEST_SIZE 48 #define SHA384_BLOCK_SIZE 128 #define SHA512_DIGEST_SIZE 64 #define SHA512_BLOCK_SIZE 128 +#define SHA512_STATE_SIZE 80 #define SHA224_H0 0xc1059ed8UL #define SHA224_H1 0x367cd507UL @@ -64,9 +66,19 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE]; extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE]; -struct sha256_state { - u32 state[SHA256_DIGEST_SIZE / 4]; +struct crypto_sha256_state { + u32 state[SHA256_STATE_WORDS]; u64 count; +}; + +struct sha256_state { + union { + struct crypto_sha256_state ctx; + struct { + u32 state[SHA256_STATE_WORDS]; + u64 count; + }; + }; u8 buf[SHA256_BLOCK_SIZE]; }; @@ -76,31 +88,7 @@ struct sha512_state { u8 buf[SHA512_BLOCK_SIZE]; }; -struct shash_desc; - -extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len); - -extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash); - -extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, - unsigned int len); - -extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash); - -/* - * Stand-alone implementation of the SHA256 algorithm. It is designed to - * have as little dependencies as possible so it can be used in the - * kexec_file purgatory. In other cases you should generally use the - * hash APIs from include/crypto/hash.h. Especially when hashing large - * amounts of data as those APIs may be hw-accelerated. - * - * For details see lib/crypto/sha256.c - */ - -static inline void sha256_init(struct sha256_state *sctx) +static inline void sha256_block_init(struct crypto_sha256_state *sctx) { sctx->state[0] = SHA256_H0; sctx->state[1] = SHA256_H1; @@ -112,11 +100,16 @@ static inline void sha256_init(struct sha256_state *sctx) sctx->state[7] = SHA256_H7; sctx->count = 0; } -void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); -void sha256_final(struct sha256_state *sctx, u8 *out); -void sha256(const u8 *data, unsigned int len, u8 *out); -static inline void sha224_init(struct sha256_state *sctx) +static inline void sha256_init(struct sha256_state *sctx) +{ + sha256_block_init(&sctx->ctx); +} +void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len); +void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]); +void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]); + +static inline void sha224_block_init(struct crypto_sha256_state *sctx) { sctx->state[0] = SHA224_H0; sctx->state[1] = SHA224_H1; @@ -128,7 +121,12 @@ static inline void sha224_init(struct sha256_state *sctx) sctx->state[7] = SHA224_H7; sctx->count = 0; } + +static inline void sha224_init(struct sha256_state *sctx) +{ + sha224_block_init(&sctx->ctx); +} /* Simply use sha256_update as it is equivalent to sha224_update. */ -void sha224_final(struct sha256_state *sctx, u8 *out); +void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]); #endif /* _CRYPTO_SHA2_H */ diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h deleted file mode 100644 index e0418818d63c..000000000000 --- a/include/crypto/sha256_base.h +++ /dev/null @@ -1,135 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * sha256_base.h - core logic for SHA-256 implementations - * - * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> - */ - -#ifndef _CRYPTO_SHA256_BASE_H -#define _CRYPTO_SHA256_BASE_H - -#include <asm/byteorder.h> -#include <linux/unaligned.h> -#include <crypto/internal/hash.h> -#include <crypto/sha2.h> -#include <linux/string.h> -#include <linux/types.h> - -typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, - int blocks); - -static inline int sha224_base_init(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - sha224_init(sctx); - return 0; -} - -static inline int sha256_base_init(struct shash_desc *desc) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - sha256_init(sctx); - return 0; -} - -static inline int lib_sha256_base_do_update(struct sha256_state *sctx, - const u8 *data, - unsigned int len, - sha256_block_fn *block_fn) -{ - unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; - - sctx->count += len; - - if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { - int blocks; - - if (partial) { - int p = SHA256_BLOCK_SIZE - partial; - - memcpy(sctx->buf + partial, data, p); - data += p; - len -= p; - - block_fn(sctx, sctx->buf, 1); - } - - blocks = len / SHA256_BLOCK_SIZE; - len %= SHA256_BLOCK_SIZE; - - if (blocks) { - block_fn(sctx, data, blocks); - data += blocks * SHA256_BLOCK_SIZE; - } - partial = 0; - } - if (len) - memcpy(sctx->buf + partial, data, len); - - return 0; -} - -static inline int sha256_base_do_update(struct shash_desc *desc, - const u8 *data, - unsigned int len, - sha256_block_fn *block_fn) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - return lib_sha256_base_do_update(sctx, data, len, block_fn); -} - -static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx, - sha256_block_fn *block_fn) -{ - const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); - __be64 *bits = (__be64 *)(sctx->buf + bit_offset); - unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; - - sctx->buf[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); - partial = 0; - - block_fn(sctx, sctx->buf, 1); - } - - memset(sctx->buf + partial, 0x0, bit_offset - partial); - *bits = cpu_to_be64(sctx->count << 3); - block_fn(sctx, sctx->buf, 1); - - return 0; -} - -static inline int sha256_base_do_finalize(struct shash_desc *desc, - sha256_block_fn *block_fn) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - return lib_sha256_base_do_finalize(sctx, block_fn); -} - -static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out, - unsigned int digest_size) -{ - __be32 *digest = (__be32 *)out; - int i; - - for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) - put_unaligned_be32(sctx->state[i], digest++); - - memzero_explicit(sctx, sizeof(*sctx)); - return 0; -} - -static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) -{ - unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - struct sha256_state *sctx = shash_desc_ctx(desc); - - return lib_sha256_base_finish(sctx, out, digest_size); -} - -#endif /* _CRYPTO_SHA256_BASE_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index 080f60c2e6b1..41e1b83a6d91 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h @@ -5,30 +5,32 @@ #ifndef __CRYPTO_SHA3_H__ #define __CRYPTO_SHA3_H__ +#include <linux/types.h> + #define SHA3_224_DIGEST_SIZE (224 / 8) #define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE) +#define SHA3_224_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_224_BLOCK_SIZE + 1 #define SHA3_256_DIGEST_SIZE (256 / 8) #define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE) +#define SHA3_256_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_256_BLOCK_SIZE + 1 #define SHA3_384_DIGEST_SIZE (384 / 8) #define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE) +#define SHA3_384_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_384_BLOCK_SIZE + 1 #define SHA3_512_DIGEST_SIZE (512 / 8) #define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE) +#define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1 -struct sha3_state { - u64 st[25]; - unsigned int rsiz; - unsigned int rsizw; +#define SHA3_STATE_SIZE 200 - unsigned int partial; - u8 buf[SHA3_224_BLOCK_SIZE]; +struct shash_desc; + +struct sha3_state { + u64 st[SHA3_STATE_SIZE / 8]; }; int crypto_sha3_init(struct shash_desc *desc); -int crypto_sha3_update(struct shash_desc *desc, const u8 *data, - unsigned int len); -int crypto_sha3_final(struct shash_desc *desc, u8 *out); #endif diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h index 679916a84cb2..aa814bab442d 100644 --- a/include/crypto/sha512_base.h +++ b/include/crypto/sha512_base.h @@ -10,10 +10,10 @@ #include <crypto/internal/hash.h> #include <crypto/sha2.h> -#include <linux/crypto.h> -#include <linux/module.h> +#include <linux/compiler.h> +#include <linux/math.h> #include <linux/string.h> - +#include <linux/types.h> #include <linux/unaligned.h> typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src, @@ -53,66 +53,51 @@ static inline int sha512_base_init(struct shash_desc *desc) return 0; } -static inline int sha512_base_do_update(struct shash_desc *desc, - const u8 *data, - unsigned int len, - sha512_block_fn *block_fn) +static inline int sha512_base_do_update_blocks(struct shash_desc *desc, + const u8 *data, + unsigned int len, + sha512_block_fn *block_fn) { + unsigned int remain = len - round_down(len, SHA512_BLOCK_SIZE); struct sha512_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + len -= remain; sctx->count[0] += len; if (sctx->count[0] < len) sctx->count[1]++; - - if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { - int blocks; - - if (partial) { - int p = SHA512_BLOCK_SIZE - partial; - - memcpy(sctx->buf + partial, data, p); - data += p; - len -= p; - - block_fn(sctx, sctx->buf, 1); - } - - blocks = len / SHA512_BLOCK_SIZE; - len %= SHA512_BLOCK_SIZE; - - if (blocks) { - block_fn(sctx, data, blocks); - data += blocks * SHA512_BLOCK_SIZE; - } - partial = 0; - } - if (len) - memcpy(sctx->buf + partial, data, len); - - return 0; + block_fn(sctx, data, len / SHA512_BLOCK_SIZE); + return remain; } -static inline int sha512_base_do_finalize(struct shash_desc *desc, - sha512_block_fn *block_fn) +static inline int sha512_base_do_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, + sha512_block_fn *block_fn) { - const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]); + unsigned int bit_offset = SHA512_BLOCK_SIZE / 8 - 2; struct sha512_state *sctx = shash_desc_ctx(desc); - __be64 *bits = (__be64 *)(sctx->buf + bit_offset); - unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; + union { + __be64 b64[SHA512_BLOCK_SIZE / 4]; + u8 u8[SHA512_BLOCK_SIZE * 2]; + } block = {}; - sctx->buf[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial); - partial = 0; + if (len >= SHA512_BLOCK_SIZE) { + int remain; - block_fn(sctx, sctx->buf, 1); + remain = sha512_base_do_update_blocks(desc, src, len, block_fn); + src += len - remain; + len = remain; } - memset(sctx->buf + partial, 0x0, bit_offset - partial); - bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); - bits[1] = cpu_to_be64(sctx->count[0] << 3); - block_fn(sctx, sctx->buf, 1); + if (len >= bit_offset * 8) + bit_offset += SHA512_BLOCK_SIZE / 8; + memcpy(&block, src, len); + block.u8[len] = 0x80; + sctx->count[0] += len; + block.b64[bit_offset] = cpu_to_be64(sctx->count[1] << 3 | + sctx->count[0] >> 61); + block.b64[bit_offset + 1] = cpu_to_be64(sctx->count[0] << 3); + block_fn(sctx, block.u8, (bit_offset + 2) * 8 / SHA512_BLOCK_SIZE); + memzero_explicit(&block, sizeof(block)); return 0; } @@ -126,9 +111,10 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) put_unaligned_be64(sctx->state[i], digest++); - - memzero_explicit(sctx, sizeof(*sctx)); return 0; } +void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src, + int blocks); + #endif /* _CRYPTO_SHA512_BASE_H */ diff --git a/include/crypto/sig.h b/include/crypto/sig.h index 11024708c069..fa6dafafab3f 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -128,7 +128,7 @@ static inline void crypto_free_sig(struct crypto_sig *tfm) /** * crypto_sig_keysize() - Get key size * - * Function returns the key size in bytes. + * Function returns the key size in bits. * Function assumes that the key is already set in the transformation. If this * function is called without a setkey or with a failed setkey, you may end up * in a NULL dereference. diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h index 1f021ad0533f..c8d02c86c298 100644 --- a/include/crypto/sm3.h +++ b/include/crypto/sm3.h @@ -14,6 +14,7 @@ #define SM3_DIGEST_SIZE 32 #define SM3_BLOCK_SIZE 64 +#define SM3_STATE_SIZE 40 #define SM3_T1 0x79CC4519 #define SM3_T2 0x7A879D8A @@ -58,7 +59,6 @@ static inline void sm3_init(struct sm3_state *sctx) sctx->count = 0; } -void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len); -void sm3_final(struct sm3_state *sctx, u8 *out); +void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks); #endif diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h index b33ed39c2bce..7c53570bc05e 100644 --- a/include/crypto/sm3_base.h +++ b/include/crypto/sm3_base.h @@ -11,87 +11,59 @@ #include <crypto/internal/hash.h> #include <crypto/sm3.h> -#include <linux/crypto.h> +#include <linux/math.h> #include <linux/module.h> #include <linux/string.h> +#include <linux/types.h> #include <linux/unaligned.h> typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); static inline int sm3_base_init(struct shash_desc *desc) { - struct sm3_state *sctx = shash_desc_ctx(desc); - - sctx->state[0] = SM3_IVA; - sctx->state[1] = SM3_IVB; - sctx->state[2] = SM3_IVC; - sctx->state[3] = SM3_IVD; - sctx->state[4] = SM3_IVE; - sctx->state[5] = SM3_IVF; - sctx->state[6] = SM3_IVG; - sctx->state[7] = SM3_IVH; - sctx->count = 0; - + sm3_init(shash_desc_ctx(desc)); return 0; } -static inline int sm3_base_do_update(struct shash_desc *desc, - const u8 *data, - unsigned int len, - sm3_block_fn *block_fn) +static inline int sm3_base_do_update_blocks(struct shash_desc *desc, + const u8 *data, unsigned int len, + sm3_block_fn *block_fn) { + unsigned int remain = len - round_down(len, SM3_BLOCK_SIZE); struct sm3_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - - sctx->count += len; - - if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { - int blocks; - - if (partial) { - int p = SM3_BLOCK_SIZE - partial; - - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - block_fn(sctx, sctx->buffer, 1); - } - - blocks = len / SM3_BLOCK_SIZE; - len %= SM3_BLOCK_SIZE; - - if (blocks) { - block_fn(sctx, data, blocks); - data += blocks * SM3_BLOCK_SIZE; - } - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); - - return 0; + sctx->count += len - remain; + block_fn(sctx, data, len / SM3_BLOCK_SIZE); + return remain; } -static inline int sm3_base_do_finalize(struct shash_desc *desc, - sm3_block_fn *block_fn) +static inline int sm3_base_do_finup(struct shash_desc *desc, + const u8 *src, unsigned int len, + sm3_block_fn *block_fn) { - const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64); + unsigned int bit_offset = SM3_BLOCK_SIZE / 8 - 1; struct sm3_state *sctx = shash_desc_ctx(desc); - __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + union { + __be64 b64[SM3_BLOCK_SIZE / 4]; + u8 u8[SM3_BLOCK_SIZE * 2]; + } block = {}; - sctx->buffer[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial); - partial = 0; + if (len >= SM3_BLOCK_SIZE) { + int remain; - block_fn(sctx, sctx->buffer, 1); + remain = sm3_base_do_update_blocks(desc, src, len, block_fn); + src += len - remain; + len = remain; } - memset(sctx->buffer + partial, 0x0, bit_offset - partial); - *bits = cpu_to_be64(sctx->count << 3); - block_fn(sctx, sctx->buffer, 1); + if (len >= bit_offset * 8) + bit_offset += SM3_BLOCK_SIZE / 8; + memcpy(&block, src, len); + block.u8[len] = 0x80; + sctx->count += len; + block.b64[bit_offset] = cpu_to_be64(sctx->count << 3); + block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SM3_BLOCK_SIZE); + memzero_explicit(&block, sizeof(block)); return 0; } @@ -104,8 +76,6 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], digest++); - - memzero_explicit(sctx, sizeof(*sctx)); return 0; } diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h index cae1b4a01971..570f720a113b 100644 --- a/include/crypto/streebog.h +++ b/include/crypto/streebog.h @@ -23,15 +23,10 @@ struct streebog_uint512 { }; struct streebog_state { - union { - u8 buffer[STREEBOG_BLOCK_SIZE]; - struct streebog_uint512 m; - }; struct streebog_uint512 hash; struct streebog_uint512 h; struct streebog_uint512 N; struct streebog_uint512 Sigma; - size_t fillsize; }; #endif /* !_CRYPTO_STREEBOG_H_ */ diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h index abf0bd76e370..68606fa5fe73 100644 --- a/include/hyperv/hvgdk_mini.h +++ b/include/hyperv/hvgdk_mini.h @@ -1013,7 +1013,7 @@ enum hv_register_name { /* * To support arch-generic code calling hv_set/get_register: - * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl + * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrq * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall */ #define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0) diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index a946e0203e6d..8f7931eb7d16 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -104,6 +104,16 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); #else /* ARCH_NEEDS_WEAK_PER_CPU */ +#ifdef MODULE + +#define DEFINE_ALLOC_TAG(_alloc_tag) \ + static struct alloc_tag _alloc_tag __used __aligned(8) \ + __section(ALLOC_TAG_SECTION_NAME) = { \ + .ct = CODE_TAG_INIT, \ + .counters = NULL }; + +#else /* MODULE */ + #define DEFINE_ALLOC_TAG(_alloc_tag) \ static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ static struct alloc_tag _alloc_tag __used __aligned(8) \ @@ -111,6 +121,8 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); .ct = CODE_TAG_INIT, \ .counters = &_alloc_tag_cntr }; +#endif /* MODULE */ + #endif /* ARCH_NEEDS_WEAK_PER_CPU */ DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 1625c8529e70..65abd5ab8836 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -90,7 +90,6 @@ struct linux_binfmt { struct list_head lh; struct module *module; int (*load_binary)(struct linux_binprm *); - int (*load_shlib)(struct file *); #ifdef CONFIG_COREDUMP int (*core_dump)(struct coredump_params *cprm); unsigned long min_coredump; /* minimal dump size */ diff --git a/include/linux/bio.h b/include/linux/bio.h index b786ec5bcc81..9c37c66ef9ca 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -403,7 +403,6 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) struct request_queue; -extern int submit_bio_wait(struct bio *bio); void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, unsigned short max_vecs, blk_opf_t opf); extern void bio_uninit(struct bio *); @@ -418,6 +417,30 @@ void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, size_t off); +void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len); + +/** + * bio_add_max_vecs - number of bio_vecs needed to add data to a bio + * @kaddr: kernel virtual address to add + * @len: length in bytes to add + * + * Calculate how many bio_vecs need to be allocated to add the kernel virtual + * address range in [@kaddr:@len] in the worse case. + */ +static inline unsigned int bio_add_max_vecs(void *kaddr, unsigned int len) +{ + if (is_vmalloc_addr(kaddr)) + return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE); + return 1; +} + +unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len); +bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len); + +int submit_bio_wait(struct bio *bio); +int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data, + size_t len, enum req_op op); + int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter); void __bio_release_pages(struct bio *bio, bool mark_dirty); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8eb9b3310167..de8c85a03bb7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -9,6 +9,7 @@ #include <linux/prefetch.h> #include <linux/srcu.h> #include <linux/rw_hint.h> +#include <linux/rwsem.h> struct blk_mq_tags; struct blk_flush_queue; @@ -506,6 +507,9 @@ enum hctx_type { * request_queue.tag_set_list. * @srcu: Use as lock when type of the request queue is blocking * (BLK_MQ_F_BLOCKING). + * @update_nr_hwq_lock: + * Synchronize updating nr_hw_queues with add/del disk & + * switching elevator. */ struct blk_mq_tag_set { const struct blk_mq_ops *ops; @@ -527,6 +531,8 @@ struct blk_mq_tag_set { struct mutex tag_list_lock; struct list_head tag_list; struct srcu_struct *srcu; + + struct rw_semaphore update_nr_hwq_lock; }; /** @@ -1031,8 +1037,8 @@ int blk_rq_map_user_io(struct request *, struct rq_map_data *, int blk_rq_map_user_iov(struct request_queue *, struct request *, struct rq_map_data *, const struct iov_iter *, gfp_t); int blk_rq_unmap_user(struct bio *); -int blk_rq_map_kern(struct request_queue *, struct request *, void *, - unsigned int, gfp_t); +int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len, + gfp_t gfp); int blk_rq_append_bio(struct request *rq, struct bio *bio); void blk_execute_rq_nowait(struct request *rq, bool at_head); blk_status_t blk_execute_rq(struct request *rq, bool at_head); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index dce7615c35e7..3d1577f07c1c 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -220,6 +220,7 @@ struct bio { unsigned short bi_flags; /* BIO_* below */ unsigned short bi_ioprio; enum rw_hint bi_write_hint; + u8 bi_write_stream; blk_status_t bi_status; atomic_t __bi_remaining; @@ -286,7 +287,6 @@ struct bio { enum { BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */ BIO_CLONED, /* doesn't own data */ - BIO_BOUNCED, /* bio is a bounce bio */ BIO_QUIET, /* Make BIO Quiet */ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ BIO_REFFED, /* bio has elevated ->bi_cnt */ @@ -296,6 +296,14 @@ enum { * of this bio. */ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ + /* + * This bio has completed bps throttling at the single tg granularity, + * which is different from BIO_BPS_THROTTLED. When the bio is enqueued + * into the sq->queued of the upper tg, or is about to be dispatched, + * this flag needs to be cleared. Since blk-throttle and rq_qos are not + * on the same hierarchical level, reuse the value. + */ + BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED, BIO_QOS_MERGED, /* but went through rq_qos merge path */ BIO_REMAPPED, BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9a1f0ee40b56..332b56f323d9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -182,7 +182,6 @@ struct gendisk { struct list_head slave_bdevs; #endif struct timer_rand_state *random; - atomic_t sync_io; /* RAID */ struct disk_events *ev; #ifdef CONFIG_BLK_DEV_ZONED @@ -218,6 +217,8 @@ struct gendisk { * devices that do not have multiple independent access ranges. */ struct blk_independent_access_ranges *ia_ranges; + + struct mutex rqos_state_mutex; /* rqos state change mutex */ }; /** @@ -331,9 +332,6 @@ typedef unsigned int __bitwise blk_features_t; /* skip this queue in blk_mq_(un)quiesce_tagset */ #define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13)) -/* bounce all highmem pages */ -#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14)) - /* undocumented magic for bcache */ #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ ((__force blk_features_t)(1u << 15)) @@ -347,7 +345,7 @@ typedef unsigned int __bitwise blk_features_t; */ #define BLK_FEAT_INHERIT_MASK \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ - BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \ + BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) /* internal flags in queue_limits.flags */ @@ -405,6 +403,9 @@ struct queue_limits { unsigned short max_integrity_segments; unsigned short max_discard_segments; + unsigned short max_write_streams; + unsigned int write_stream_granularity; + unsigned int max_open_zones; unsigned int max_active_zones; @@ -644,6 +645,8 @@ enum { QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ + QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */ + QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */ QUEUE_FLAG_MAX }; @@ -679,6 +682,10 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) #define blk_queue_skip_tagset_quiesce(q) \ ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE) +#define blk_queue_disable_wbt(q) \ + test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags) +#define blk_queue_no_elv_switch(q) \ + test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); @@ -1288,6 +1295,13 @@ static inline unsigned int bdev_max_segments(struct block_device *bdev) return queue_max_segments(bdev_get_queue(bdev)); } +static inline unsigned short bdev_max_write_streams(struct block_device *bdev) +{ + if (bdev_is_partition(bdev)) + return 0; + return bdev_limits(bdev)->max_write_streams; +} + static inline unsigned queue_logical_block_size(const struct request_queue *q) { return q->limits.logical_block_size; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e7da3c3b098b..166d6de50dbf 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -785,6 +785,17 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, struct cgroup_namespace *ns); +static inline void get_cgroup_ns(struct cgroup_namespace *ns) +{ + refcount_inc(&ns->ns.count); +} + +static inline void put_cgroup_ns(struct cgroup_namespace *ns) +{ + if (refcount_dec_and_test(&ns->ns.count)) + free_cgroup_ns(ns); +} + #else /* !CONFIG_CGROUPS */ static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } @@ -795,19 +806,10 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, return old_ns; } -#endif /* !CONFIG_CGROUPS */ +static inline void get_cgroup_ns(struct cgroup_namespace *ns) { } +static inline void put_cgroup_ns(struct cgroup_namespace *ns) { } -static inline void get_cgroup_ns(struct cgroup_namespace *ns) -{ - if (ns) - refcount_inc(&ns->ns.count); -} - -static inline void put_cgroup_ns(struct cgroup_namespace *ns) -{ - if (ns && refcount_dec_and_test(&ns->ns.count)) - free_cgroup_ns(ns); -} +#endif /* !CONFIG_CGROUPS */ #ifdef CONFIG_CGROUPS diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 7e57047e1564..7093e1d08af0 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -216,6 +216,25 @@ const volatile void * __must_check_fn(const volatile void *val) #define return_ptr(p) return no_free_ptr(p) +/* + * Only for situations where an allocation is handed in to another function + * and consumed by that function on success. + * + * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL); + * + * setup(f); + * if (some_condition) + * return -EINVAL; + * .... + * ret = bar(f); + * if (!ret) + * retain_and_null_ptr(f); + * return ret; + * + * After retain_and_null_ptr(f) the variable f is NULL and cannot be + * dereferenced anymore. + */ +#define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL)) /* * DEFINE_CLASS(name, type, exit, init, init_args...): diff --git a/include/linux/codetag.h b/include/linux/codetag.h index d14dbd26b370..0ee4c21c6dbc 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -36,10 +36,10 @@ union codetag_ref { struct codetag_type_desc { const char *section; size_t tag_size; - void (*module_load)(struct codetag_type *cttype, - struct codetag_module *cmod); - void (*module_unload)(struct codetag_type *cttype, - struct codetag_module *cmod); + void (*module_load)(struct module *mod, + struct codetag *start, struct codetag *end); + void (*module_unload)(struct module *mod, + struct codetag *start, struct codetag *end); #ifdef CONFIG_MODULES void (*module_replaced)(struct module *mod, struct module *new_mod); bool (*needs_section_mem)(struct module *mod, unsigned long size); diff --git a/include/linux/configfs.h b/include/linux/configfs.h index c771e9d0d0b9..698520b1bfdb 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -120,15 +120,19 @@ struct configfs_attribute { ssize_t (*store)(struct config_item *, const char *, size_t); }; -#define CONFIGFS_ATTR(_pfx, _name) \ +#define CONFIGFS_ATTR_PERM(_pfx, _name, _perm) \ static struct configfs_attribute _pfx##attr_##_name = { \ .ca_name = __stringify(_name), \ - .ca_mode = S_IRUGO | S_IWUSR, \ + .ca_mode = _perm, \ .ca_owner = THIS_MODULE, \ .show = _pfx##_name##_show, \ .store = _pfx##_name##_store, \ } +#define CONFIGFS_ATTR(_pfx, _name) CONFIGFS_ATTR_PERM( \ + _pfx, _name, S_IRUGO | S_IWUSR \ +) + #define CONFIGFS_ATTR_RO(_pfx, _name) \ static struct configfs_attribute _pfx##attr_##_name = { \ .ca_name = __stringify(_name), \ diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 77e6e195d1d6..76e41805b92d 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -28,6 +28,7 @@ struct coredump_params { int vma_count; size_t vma_data_size; struct core_vma_metadata *vma_meta; + struct pid *pid; }; extern unsigned int core_file_note_size_limit; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 3aa955102b34..e6089abc28e2 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -78,6 +78,8 @@ extern ssize_t cpu_show_gds(struct device *dev, extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_old_microcode(struct device *dev, + struct device_attribute *attr, char *buf); extern ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 1987400000b4..df366ee15456 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -60,7 +60,6 @@ enum cpuhp_state { /* PREPARE section invoked on a control CPU */ CPUHP_OFFLINE = 0, CPUHP_CREATE_THREADS, - CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_AMD_UNCORE_PREP, CPUHP_PERF_POWER, diff --git a/include/linux/crc16.h b/include/linux/crc16.h index 9fa74529b317..b861d969b161 100644 --- a/include/linux/crc16.h +++ b/include/linux/crc16.h @@ -15,14 +15,7 @@ #include <linux/types.h> -extern u16 const crc16_table[256]; - -extern u16 crc16(u16 crc, const u8 *buffer, size_t len); - -static inline u16 crc16_byte(u16 crc, const u8 data) -{ - return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; -} +u16 crc16(u16 crc, const u8 *p, size_t len); #endif /* __CRC16_H */ diff --git a/include/linux/crc32.h b/include/linux/crc32.h index 69c2e8bb3782..569dc13f139f 100644 --- a/include/linux/crc32.h +++ b/include/linux/crc32.h @@ -1,7 +1,4 @@ -/* - * crc32.h - * See linux/lib/crc32.c for license and changes - */ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_CRC32_H #define _LINUX_CRC32_H diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 1e3809d28abd..b50f1954d1bb 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -14,8 +14,7 @@ #include <linux/completion.h> #include <linux/errno.h> -#include <linux/list.h> -#include <linux/refcount.h> +#include <linux/refcount_types.h> #include <linux/slab.h> #include <linux/types.h> @@ -51,6 +50,15 @@ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 /* + * Set if the algorithm data structure should be duplicated into + * kmalloc memory before registration. This is useful for hardware + * that can be disconnected at will. Do not use this if the data + * structure is embedded into a bigger one. Duplicate the overall + * data structure in the driver in that case. + */ +#define CRYPTO_ALG_DUP_FIRST 0x00000200 + +/* * Set if the algorithm has passed automated run-time testing. Note that * if there is no run-time testing for a given algorithm it is considered * to have passed. @@ -125,8 +133,10 @@ */ #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 -/* Set if the algorithm supports request chains and virtual addresses. */ -#define CRYPTO_ALG_REQ_CHAIN 0x00040000 +/* Set if the algorithm supports virtual addresses. */ +#define CRYPTO_ALG_REQ_VIRT 0x00040000 + +/* The high bits 0xff000000 are reserved for type-specific flags. */ /* * Transform masks and values (for crt_flags). @@ -179,7 +189,6 @@ struct crypto_async_request { struct crypto_tfm *tfm; u32 flags; - int err; }; /** @@ -278,6 +287,7 @@ struct cipher_alg { * to the alignmask of the algorithm being used, in order to * avoid the API having to realign them. Note: the alignmask is * not supported for hash algorithms and is always 0 for them. + * @cra_reqsize: Size of the request context for this algorithm. * @cra_priority: Priority of this transformation implementation. In case * multiple transformations with same @cra_name are available to * the Crypto API, the kernel will use the one with highest @@ -302,17 +312,8 @@ struct cipher_alg { * by @cra_type and @cra_flags above, the associated structure must be * filled with callbacks. This field might be empty. This is the case * for ahash, shash. - * @cra_init: Initialize the cryptographic transformation object. This function - * is used to initialize the cryptographic transformation object. - * This function is called only once at the instantiation time, right - * after the transformation context was allocated. In case the - * cryptographic hardware has some special requirements which need to - * be handled by software, this function shall check for the precise - * requirement of the transformation and put any software fallbacks - * in place. - * @cra_exit: Deinitialize the cryptographic transformation object. This is a - * counterpart to @cra_init, used to remove various changes set in - * @cra_init. + * @cra_init: Deprecated, do not use. + * @cra_exit: Deprecated, do not use. * @cra_u.cipher: Union member which contains a single-block symmetric cipher * definition. See @struct @cipher_alg. * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE @@ -333,6 +334,7 @@ struct crypto_alg { unsigned int cra_blocksize; unsigned int cra_ctxsize; unsigned int cra_alignmask; + unsigned int cra_reqsize; int cra_priority; refcount_t cra_refcnt; @@ -409,9 +411,11 @@ struct crypto_tfm { u32 crt_flags; int node; - + + struct crypto_tfm *fb; + void (*exit)(struct crypto_tfm *tfm); - + struct crypto_alg *__crt_alg; void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; @@ -452,6 +456,11 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_alignmask; } +static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_reqsize; +} + static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) { return tfm->crt_flags; @@ -473,22 +482,44 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) return __alignof__(tfm->__crt_ctx); } -static inline void crypto_reqchain_init(struct crypto_async_request *req) +static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm) { - req->err = -EINPROGRESS; - INIT_LIST_HEAD(&req->list); + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; } -static inline void crypto_request_chain(struct crypto_async_request *req, - struct crypto_async_request *head) +static inline bool crypto_req_on_stack(struct crypto_async_request *req) { - req->err = -EINPROGRESS; - list_add_tail(&req->list, &head->list); + return req->flags & CRYPTO_TFM_REQ_ON_STACK; } -static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm) +static inline void crypto_request_set_callback( + struct crypto_async_request *req, u32 flags, + crypto_completion_t compl, void *data) { - return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; + u32 keep = CRYPTO_TFM_REQ_ON_STACK; + + req->complete = compl; + req->data = data; + req->flags &= keep; + req->flags |= flags & ~keep; +} + +static inline void crypto_request_set_tfm(struct crypto_async_request *req, + struct crypto_tfm *tfm) +{ + req->tfm = tfm; + req->flags &= ~CRYPTO_TFM_REQ_ON_STACK; +} + +struct crypto_async_request *crypto_request_clone( + struct crypto_async_request *req, size_t total, gfp_t gfp); + +static inline void crypto_stack_request_init(struct crypto_async_request *req, + struct crypto_tfm *tfm) +{ + req->flags = 0; + crypto_request_set_tfm(req, tfm); + req->flags |= CRYPTO_TFM_REQ_ON_STACK; } #endif /* _LINUX_CRYPTO_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index e9f07e37dd6f..e29823c701ac 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -57,7 +57,8 @@ struct qstr { }; #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } -#define QSTR(n) (struct qstr)QSTR_INIT(n, strlen(n)) +#define QSTR_LEN(n,l) (struct qstr)QSTR_INIT(n,l) +#define QSTR(n) QSTR_LEN(n, strlen(n)) extern const struct qstr empty_name; extern const struct qstr slash_name; @@ -281,7 +282,6 @@ extern void d_exchange(struct dentry *, struct dentry *); extern struct dentry *d_ancestor(struct dentry *, struct dentry *); extern struct dentry *d_lookup(const struct dentry *, const struct qstr *); -extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); static inline unsigned d_count(const struct dentry *dentry) { diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h index d02f32b7514e..0864773a57e8 100644 --- a/include/linux/device_cgroup.h +++ b/include/linux/device_cgroup.h @@ -18,15 +18,16 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask) { short type, access = 0; + if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))) + return 0; + if (likely(!inode->i_rdev)) return 0; if (S_ISBLK(inode->i_mode)) type = DEVCG_DEV_BLOCK; - else if (S_ISCHR(inode->i_mode)) + else /* S_ISCHR by the test above */ type = DEVCG_DEV_CHAR; - else - return 0; if (mask & MAY_WRITE) access |= DEVCG_ACC_WRITE; diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index f632ecfb4238..06c4de602b2f 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h @@ -11,6 +11,7 @@ #ifndef LINUX_DMAPOOL_H #define LINUX_DMAPOOL_H +#include <linux/nodemask_types.h> #include <linux/scatterlist.h> #include <asm/io.h> @@ -18,8 +19,8 @@ struct device; #ifdef CONFIG_HAS_DMA -struct dma_pool *dma_pool_create(const char *name, struct device *dev, - size_t size, size_t align, size_t allocation); +struct dma_pool *dma_pool_create_node(const char *name, struct device *dev, + size_t size, size_t align, size_t boundary, int node); void dma_pool_destroy(struct dma_pool *pool); @@ -35,9 +36,12 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, void dmam_pool_destroy(struct dma_pool *pool); #else /* !CONFIG_HAS_DMA */ -static inline struct dma_pool *dma_pool_create(const char *name, - struct device *dev, size_t size, size_t align, size_t allocation) -{ return NULL; } +static inline struct dma_pool *dma_pool_create_node(const char *name, + struct device *dev, size_t size, size_t align, size_t boundary, + int node) +{ + return NULL; +} static inline void dma_pool_destroy(struct dma_pool *pool) { } static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { return NULL; } @@ -49,6 +53,13 @@ static inline struct dma_pool *dmam_pool_create(const char *name, static inline void dmam_pool_destroy(struct dma_pool *pool) { } #endif /* !CONFIG_HAS_DMA */ +static inline struct dma_pool *dma_pool_create(const char *name, + struct device *dev, size_t size, size_t align, size_t boundary) +{ + return dma_pool_create_node(name, dev, size, align, boundary, + NUMA_NO_NODE); +} + static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index fc61d0205c97..f94f3fdf15fc 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -14,6 +14,7 @@ #include <linux/kmsan.h> #include <asm/entry-common.h> +#include <asm/syscall.h> /* * Define dummy _TIF work flags if not defined by the architecture or for @@ -367,6 +368,15 @@ static __always_inline void exit_to_user_mode(void) } /** + * syscall_exit_work - Handle work before returning to user mode + * @regs: Pointer to current pt_regs + * @work: Current thread syscall work + * + * Do one-time syscall specific work. + */ +void syscall_exit_work(struct pt_regs *regs, unsigned long work); + +/** * syscall_exit_to_user_mode_work - Handle work before returning to user mode * @regs: Pointer to currents pt_regs * @@ -379,7 +389,30 @@ static __always_inline void exit_to_user_mode(void) * make the final state transitions. Interrupts must stay disabled between * return from this function and the invocation of exit_to_user_mode(). */ -void syscall_exit_to_user_mode_work(struct pt_regs *regs); +static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs) +{ + unsigned long work = READ_ONCE(current_thread_info()->syscall_work); + unsigned long nr = syscall_get_nr(current, regs); + + CT_WARN_ON(ct_state() != CT_STATE_KERNEL); + + if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { + if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) + local_irq_enable(); + } + + rseq_syscall(regs); + + /* + * Do one-time syscall specific work. If these work items are + * enabled, we want to run them exactly once per syscall exit with + * interrupts enabled. + */ + if (unlikely(work & SYSCALL_WORK_EXIT)) + syscall_exit_work(regs, work); + local_irq_disable_exit_to_user(); + exit_to_user_mode_prepare(regs); +} /** * syscall_exit_to_user_mode - Handle work before returning to user mode @@ -410,7 +443,13 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs); * exit_to_user_mode(). This function is preferred unless there is a * compelling architectural reason to use the separate functions. */ -void syscall_exit_to_user_mode(struct pt_regs *regs); +static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs) +{ + instrumentation_begin(); + syscall_exit_to_user_mode_work(regs); + instrumentation_end(); + exit_to_user_mode(); +} /** * irqentry_enter_from_user_mode - Establish state before invoking the irq handler diff --git a/include/linux/file.h b/include/linux/file.h index 302f11355b10..af1768d934a0 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -59,7 +59,7 @@ static inline struct fd CLONED_FD(struct file *f) static inline void fdput(struct fd fd) { - if (fd.word & FDPUT_FPUT) + if (unlikely(fd.word & FDPUT_FPUT)) fput(fd_file(fd)); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 016b0fe1536e..0db87f8e676c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -408,6 +408,7 @@ struct kiocb { void *private; int ki_flags; u16 ki_ioprio; /* See linux/ioprio.h */ + u8 ki_write_stream; union { /* * Only used for async buffered reads, where it denotes the @@ -433,7 +434,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) } struct address_space_operations { - int (*writepage)(struct page *page, struct writeback_control *wbc); int (*read_folio)(struct file *, struct folio *); /* Write back some dirty pages from this mapping. */ @@ -867,6 +867,11 @@ static inline void inode_lock(struct inode *inode) down_write(&inode->i_rwsem); } +static inline __must_check int inode_lock_killable(struct inode *inode) +{ + return down_write_killable(&inode->i_rwsem); +} + static inline void inode_unlock(struct inode *inode) { up_write(&inode->i_rwsem); @@ -877,6 +882,11 @@ static inline void inode_lock_shared(struct inode *inode) down_read(&inode->i_rwsem); } +static inline __must_check int inode_lock_shared_killable(struct inode *inode) +{ + return down_read_killable(&inode->i_rwsem); +} + static inline void inode_unlock_shared(struct inode *inode) { up_read(&inode->i_rwsem); @@ -1307,6 +1317,7 @@ struct sb_writers { unsigned short frozen; /* Is sb frozen? */ int freeze_kcount; /* How many kernel freeze requests? */ int freeze_ucount; /* How many userspace freeze requests? */ + const void *freeze_owner; /* Owner of the freeze */ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; }; @@ -1780,7 +1791,7 @@ static inline void __sb_end_write(struct super_block *sb, int level) static inline void __sb_start_write(struct super_block *sb, int level) { - percpu_down_read(sb->s_writers.rw_sem + level - 1); + percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true); } static inline bool __sb_start_write_trylock(struct super_block *sb, int level) @@ -2071,8 +2082,18 @@ typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, struct dir_context { filldir_t actor; loff_t pos; + /* + * Filesystems MUST NOT MODIFY count, but may use as a hint: + * 0 unknown + * > 0 space in buffer (assume at least one entry) + * INT_MAX unlimited + */ + int count; }; +/* If OR-ed with d_type, pending signals are not checked */ +#define FILLDIR_FLAG_NOINTR 0x1000 + /* * These flags let !MMU mmap() govern direct device mapping vs immediate * copying more easily for MAP_PRIVATE, especially for ROM filesystems. @@ -2186,7 +2207,7 @@ struct file_operations { /* Supports asynchronous lock callbacks */ #define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6)) /* File system supports uncached read/write buffered IO */ -#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7)) +#define FOP_DONTCACHE 0 /* ((__force fop_flags_t)(1 << 7)) */ /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, @@ -2269,6 +2290,7 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed + * @FREEZE_EXCL: a freeze that can only be undone by the owner * * Indicate who the owner of the freeze or thaw request is and whether * the freeze needs to be exclusive or can nest. @@ -2282,6 +2304,7 @@ enum freeze_holder { FREEZE_HOLDER_KERNEL = (1U << 0), FREEZE_HOLDER_USERSPACE = (1U << 1), FREEZE_MAY_NEST = (1U << 2), + FREEZE_EXCL = (1U << 3), }; struct super_operations { @@ -2295,9 +2318,9 @@ struct super_operations { void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); - int (*freeze_super) (struct super_block *, enum freeze_holder who); + int (*freeze_super) (struct super_block *, enum freeze_holder who, const void *owner); int (*freeze_fs) (struct super_block *); - int (*thaw_super) (struct super_block *, enum freeze_holder who); + int (*thaw_super) (struct super_block *, enum freeze_holder who, const void *owner); int (*unfreeze_fs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); @@ -2344,6 +2367,7 @@ struct super_operations { #define S_CASEFOLD (1 << 15) /* Casefolded file */ #define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */ #define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */ +#define S_ANON_INODE (1 << 19) /* Inode is an anonymous inode */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -2400,6 +2424,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) +#define IS_ANON_FILE(inode) ((inode)->i_flags & S_ANON_INODE) static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap, struct inode *inode) @@ -2705,8 +2730,10 @@ extern int unregister_filesystem(struct file_system_type *); extern int vfs_statfs(const struct path *, struct kstatfs *); extern int user_statfs(const char __user *, struct kstatfs *); extern int fd_statfs(int, struct kstatfs *); -int freeze_super(struct super_block *super, enum freeze_holder who); -int thaw_super(struct super_block *super, enum freeze_holder who); +int freeze_super(struct super_block *super, enum freeze_holder who, + const void *freeze_owner); +int thaw_super(struct super_block *super, enum freeze_holder who, + const void *freeze_owner); extern __printf(2, 3) int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); extern int super_setup_bdi(struct super_block *sb); @@ -3475,7 +3502,8 @@ void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *); void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); void generic_fill_statx_atomic_writes(struct kstat *stat, unsigned int unit_min, - unsigned int unit_max); + unsigned int unit_max, + unsigned int unit_max_opt); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); @@ -3515,9 +3543,11 @@ extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); extern void drop_super(struct super_block *sb); extern void drop_super_exclusive(struct super_block *sb); -extern void iterate_supers(void (*)(struct super_block *, void *), void *); +extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg); extern void iterate_supers_type(struct file_system_type *, void (*)(struct super_block *, void *), void *); +void filesystems_freeze(void); +void filesystems_thaw(void); extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 53e566efd5fd..5a0e897cae80 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -87,14 +87,9 @@ extern int lookup_constant(const struct constant_table tbl[], const char *name, extern const struct constant_table bool_names[]; #ifdef CONFIG_VALIDATE_FS_PARSER -extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, - int low, int high, int special); extern bool fs_validate_description(const char *name, const struct fs_parameter_spec *desc); #else -static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, - int low, int high, int special) -{ return true; } static inline bool fs_validate_description(const char *name, const struct fs_parameter_spec *desc) { return true; } @@ -125,8 +120,6 @@ static inline bool fs_validate_description(const char *name, #define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0, NULL) #define fsparam_u32oct(NAME, OPT) \ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8) -#define fsparam_u32hex(NAME, OPT) \ - __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16) #define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL) #define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL) #define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array) diff --git a/include/linux/futex.h b/include/linux/futex.h index b70df27d7e85..005b040c4791 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -4,11 +4,11 @@ #include <linux/sched.h> #include <linux/ktime.h> +#include <linux/mm_types.h> #include <uapi/linux/futex.h> struct inode; -struct mm_struct; struct task_struct; /* @@ -34,6 +34,7 @@ union futex_key { u64 i_seq; unsigned long pgoff; unsigned int offset; + /* unsigned int node; */ } shared; struct { union { @@ -42,11 +43,13 @@ union futex_key { }; unsigned long address; unsigned int offset; + /* unsigned int node; */ } private; struct { u64 ptr; unsigned long word; unsigned int offset; + unsigned int node; /* NOT hashed! */ } both; }; @@ -77,7 +80,25 @@ void futex_exec_release(struct task_struct *tsk); long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3); -#else +int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4); + +#ifdef CONFIG_FUTEX_PRIVATE_HASH +int futex_hash_allocate_default(void); +void futex_hash_free(struct mm_struct *mm); + +static inline void futex_mm_init(struct mm_struct *mm) +{ + RCU_INIT_POINTER(mm->futex_phash, NULL); + mutex_init(&mm->futex_hash_lock); +} + +#else /* !CONFIG_FUTEX_PRIVATE_HASH */ +static inline int futex_hash_allocate_default(void) { return 0; } +static inline void futex_hash_free(struct mm_struct *mm) { } +static inline void futex_mm_init(struct mm_struct *mm) { } +#endif /* CONFIG_FUTEX_PRIVATE_HASH */ + +#else /* !CONFIG_FUTEX */ static inline void futex_init_task(struct task_struct *tsk) { } static inline void futex_exit_recursive(struct task_struct *tsk) { } static inline void futex_exit_release(struct task_struct *tsk) { } @@ -88,6 +109,17 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val, { return -EINVAL; } +static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) +{ + return -EINVAL; +} +static inline int futex_hash_allocate_default(void) +{ + return 0; +} +static inline void futex_hash_free(struct mm_struct *mm) { } +static inline void futex_mm_init(struct mm_struct *mm) { } + #endif #endif diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 4c0294a9104d..b53233051bee 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -287,8 +287,9 @@ struct gpio_irq_chip { /** * @first: * - * Required for static IRQ allocation. If set, irq_domain_add_simple() - * will allocate and map all IRQs during initialization. + * Required for static IRQ allocation. If set, + * irq_domain_create_simple() will allocate and map all IRQs + * during initialization. */ unsigned int first; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 5c6bea81a90e..c698f8415675 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio, const char *from = kmap_local_folio(folio, offset); size_t chunk = len; - if (folio_test_highmem(folio) && + if (folio_test_partial_kmap(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); @@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset, char *to = kmap_local_folio(folio, offset); size_t chunk = len; - if (folio_test_highmem(folio) && + if (folio_test_partial_kmap(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); @@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio, { size_t len = folio_size(folio) - offset; - if (folio_test_highmem(folio)) { + if (folio_test_partial_kmap(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { @@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset, VM_BUG_ON(offset + len > folio_size(folio)); - if (folio_test_highmem(folio)) { + if (folio_test_partial_kmap(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { @@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, size_t offset = offset_in_folio(folio, pos); char *from = kmap_local_folio(folio, offset); - if (folio_test_highmem(folio)) { + if (folio_test_partial_kmap(folio)) { offset = offset_in_page(offset); len = min_t(size_t, len, PAGE_SIZE - offset); } else diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 8f3ac832ee7f..4861a7e304bb 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -275,6 +275,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, bool is_hugetlb_entry_migration(pte_t pte); bool is_hugetlb_entry_hwpoisoned(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); +void fixup_hugetlb_reservations(struct vm_area_struct *vma); #else /* !CONFIG_HUGETLB_PAGE */ @@ -468,6 +469,10 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } +static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) +{ +} + #endif /* !CONFIG_HUGETLB_PAGE */ #ifndef pgd_write diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c782a74d2a30..51b6484c0493 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -140,7 +140,7 @@ extern irqreturn_t no_action(int cpl, void *dev_id); /* * If a (PCI) device interrupt is not connected we set dev->irq to * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we - * can distingiush that case from other error returns. + * can distinguish that case from other error returns. * * 0x80000000 is guaranteed to be outside the available range of interrupts * and easy to distinguish from other possible incorrect values. diff --git a/include/linux/io.h b/include/linux/io.h index 6a6bc4d46d0a..0642c7ee41db 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -183,4 +183,25 @@ static inline void arch_io_free_memtype_wc(resource_size_t base, int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, resource_size_t size); +#ifdef CONFIG_STRICT_DEVMEM +static inline int range_is_allowed(unsigned long pfn, unsigned long size) +{ + u64 from = ((u64)pfn) << PAGE_SHIFT; + u64 to = from + size; + u64 cursor = from; + + while (cursor < to) { + if (!devmem_is_allowed(pfn)) + return 0; + cursor += PAGE_SIZE; + pfn++; + } + return 1; +} +#else +static inline int range_is_allowed(unsigned long pfn, unsigned long size) +{ + return 1; +} +#endif #endif /* _LINUX_IO_H */ diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 0634a3de1782..53408124c1e5 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -140,6 +140,15 @@ static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_ur return cmd_to_io_kiocb(cmd)->async_data; } +/* + * Return uring_cmd's context reference as its context handle for driver to + * track per-context resource, such as registered kernel IO buffer + */ +static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd) +{ + return cmd_to_io_kiocb(cmd)->ctx; +} + int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq, void (*release)(void *), unsigned int index, unsigned int issue_flags); diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index b44d201520d8..2922635986f5 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -40,8 +40,6 @@ enum io_uring_cmd_flags { IO_URING_F_TASK_DEAD = (1 << 13), }; -struct io_zcrx_ifq; - struct io_wq_work_node { struct io_wq_work_node *next; }; @@ -343,7 +341,6 @@ struct io_ring_ctx { unsigned cached_cq_tail; unsigned cq_entries; struct io_ev_fd __rcu *io_ev_fd; - unsigned cq_extra; void *cq_wait_arg; size_t cq_wait_size; @@ -394,7 +391,8 @@ struct io_ring_ctx { struct wait_queue_head poll_wq; struct io_restriction restrictions; - struct io_zcrx_ifq *ifq; + /* Stores zcrx object pointers of type struct io_zcrx_ifq */ + struct xarray zcrx_ctxs; u32 pers_next; struct xarray personalities; @@ -418,6 +416,7 @@ struct io_ring_ctx { struct callback_head poll_wq_task_work; struct list_head defer_list; + unsigned nr_drained; struct io_alloc_cache msg_cache; spinlock_t msg_lock; @@ -436,6 +435,7 @@ struct io_ring_ctx { /* protected by ->completion_lock */ unsigned evfd_last_cq_tail; + unsigned nr_req_allocated; /* * Protection for resize vs mmap races - both the mmap and resize @@ -448,8 +448,6 @@ struct io_ring_ctx { struct io_mapped_region ring_region; /* used for optimised request parameter and wait argument passing */ struct io_mapped_region param_region; - /* just one zcrx per ring for now, will move to io_zcrx_ifq eventually */ - struct io_mapped_region zcrx_region; }; /* @@ -653,8 +651,7 @@ struct io_kiocb { u8 iopoll_completed; /* * Can be either a fixed buffer index, or used with provided buffers. - * For the latter, before issue it points to the buffer group ID, - * and after selection it points to the buffer ID itself. + * For the latter, it points to the selected buffer ID. */ u16 buf_index; @@ -713,7 +710,7 @@ struct io_kiocb { const struct cred *creds; struct io_wq_work work; - struct { + struct io_big_cqe { u64 extra1; u64 extra2; } big_cqe; diff --git a/include/linux/irq.h b/include/linux/irq.h index dd5df1e2d032..1d6b606a81ef 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -597,7 +597,6 @@ enum { struct irqaction; extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); -extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE extern void irq_cpu_online(void); @@ -700,7 +699,7 @@ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret); extern int noirqdebug_setup(char *str); /* Checks whether the interrupt can be requested by request_irq(): */ -extern int can_request_irq(unsigned int irq, unsigned long irqflags); +extern bool can_request_irq(unsigned int irq, unsigned long irqflags); /* Dummy irq-chip implementations: */ extern struct irq_chip no_irq_chip; @@ -1222,31 +1221,6 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) -#ifdef CONFIG_SMP -static inline void irq_gc_lock(struct irq_chip_generic *gc) -{ - raw_spin_lock(&gc->lock); -} - -static inline void irq_gc_unlock(struct irq_chip_generic *gc) -{ - raw_spin_unlock(&gc->lock); -} -#else -static inline void irq_gc_lock(struct irq_chip_generic *gc) { } -static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } -#endif - -/* - * The irqsave variants are for usage in non interrupt code. Do not use - * them in irq_chip callbacks. Use irq_gc_lock() instead. - */ -#define irq_gc_lock_irqsave(gc, flags) \ - raw_spin_lock_irqsave(&(gc)->lock, flags) - -#define irq_gc_unlock_irqrestore(gc, flags) \ - raw_spin_unlock_irqrestore(&(gc)->lock, flags) - static inline void irq_reg_writel(struct irq_chip_generic *gc, u32 val, int reg_offset) { diff --git a/drivers/irqchip/irq-msi-lib.h b/include/linux/irqchip/irq-msi-lib.h index 681ceabb7bc7..dd8d1d138544 100644 --- a/drivers/irqchip/irq-msi-lib.h +++ b/include/linux/irqchip/irq-msi-lib.h @@ -2,8 +2,8 @@ // Copyright (C) 2022 Linutronix GmbH // Copyright (C) 2022 Intel -#ifndef _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H -#define _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H +#ifndef _IRQCHIP_IRQ_MSI_LIB_H +#define _IRQCHIP_IRQ_MSI_LIB_H #include <linux/bits.h> #include <linux/irqdomain.h> @@ -24,4 +24,4 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain, struct irq_domain *real_parent, struct msi_domain_info *info); -#endif /* _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H */ +#endif /* _IRQCHIP_IRQ_MSI_LIB_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index bb7111105296..7387d183029b 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -1,30 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * irq_domain - IRQ translation domains + * irq_domain - IRQ Translation Domains * - * Translation infrastructure between hw and linux irq numbers. This is - * helpful for interrupt controllers to implement mapping between hardware - * irq numbers and the Linux irq number space. - * - * irq_domains also have hooks for translating device tree or other - * firmware interrupt representations into a hardware irq number that - * can be mapped back to a Linux irq number without any extra platform - * support code. - * - * Interrupt controller "domain" data structure. This could be defined as a - * irq domain controller. That is, it handles the mapping between hardware - * and virtual interrupt numbers for a given interrupt domain. The domain - * structure is generally created by the PIC code for a given PIC instance - * (though a domain can cover more than one PIC if they have a flat number - * model). It's the domain callbacks that are responsible for setting the - * irq_chip on a given irq_desc after it's been mapped. - * - * The host code and data structures use a fwnode_handle pointer to - * identify the domain. In some cases, and in order to preserve source - * code compatibility, this fwnode pointer is "upgraded" to a DT - * device_node. For those firmware infrastructures that do not provide - * a unique identifier for an interrupt controller, the irq_domain - * code offers a fwnode allocator. + * See Documentation/core-api/irq/irq-domain.rst for the details. */ #ifndef _LINUX_IRQDOMAIN_H @@ -61,9 +39,9 @@ struct msi_parent_ops; * pass a device-specific description of an interrupt. */ struct irq_fwspec { - struct fwnode_handle *fwnode; - int param_count; - u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; + struct fwnode_handle *fwnode; + int param_count; + u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; }; /* Conversion function from of_phandle_args fields to fwspec */ @@ -72,26 +50,26 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, /** * struct irq_domain_ops - Methods for irq_domain objects - * @match: Match an interrupt controller device node to a domain, returns - * 1 on a match - * @select: Match an interrupt controller fw specification. It is more generic - * than @match as it receives a complete struct irq_fwspec. Therefore, - * @select is preferred if provided. Returns 1 on a match. - * @map: Create or update a mapping between a virtual irq number and a hw - * irq number. This is called only once for a given mapping. - * @unmap: Dispose of such a mapping - * @xlate: Given a device tree node and interrupt specifier, decode - * the hardware irq number and linux irq type value. - * @alloc: Allocate @nr_irqs interrupts starting from @virq. - * @free: Free @nr_irqs interrupts starting from @virq. - * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only - * reserve the vector. If unset, assign the vector (called from - * request_irq()). - * @deactivate: Disarm one interrupt (@irqd). - * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and - * linux irq type value (@out_type). This is a generalised @xlate - * (over struct irq_fwspec) and is preferred if provided. - * @debug_show: For domains to show specific data for an interrupt in debugfs. + * @match: Match an interrupt controller device node to a domain, returns + * 1 on a match + * @select: Match an interrupt controller fw specification. It is more generic + * than @match as it receives a complete struct irq_fwspec. Therefore, + * @select is preferred if provided. Returns 1 on a match. + * @map: Create or update a mapping between a virtual irq number and a hw + * irq number. This is called only once for a given mapping. + * @unmap: Dispose of such a mapping + * @xlate: Given a device tree node and interrupt specifier, decode + * the hardware irq number and linux irq type value. + * @alloc: Allocate @nr_irqs interrupts starting from @virq. + * @free: Free @nr_irqs interrupts starting from @virq. + * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only + * reserve the vector. If unset, assign the vector (called from + * request_irq()). + * @deactivate: Disarm one interrupt (@irqd). + * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and + * linux irq type value (@out_type). This is a generalised @xlate + * (over struct irq_fwspec) and is preferred if provided. + * @debug_show: For domains to show specific data for an interrupt in debugfs. * * Functions below are provided by the driver and called whenever a new mapping * is created or an old mapping is disposed. The driver can then proceed to @@ -99,29 +77,29 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, * to setup the irq_desc when returning from map(). */ struct irq_domain_ops { - int (*match)(struct irq_domain *d, struct device_node *node, - enum irq_domain_bus_token bus_token); - int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec, - enum irq_domain_bus_token bus_token); - int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); - void (*unmap)(struct irq_domain *d, unsigned int virq); - int (*xlate)(struct irq_domain *d, struct device_node *node, - const u32 *intspec, unsigned int intsize, - unsigned long *out_hwirq, unsigned int *out_type); + int (*match)(struct irq_domain *d, struct device_node *node, + enum irq_domain_bus_token bus_token); + int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token); + int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); + void (*unmap)(struct irq_domain *d, unsigned int virq); + int (*xlate)(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* extended V2 interfaces to support hierarchy irq_domains */ - int (*alloc)(struct irq_domain *d, unsigned int virq, - unsigned int nr_irqs, void *arg); - void (*free)(struct irq_domain *d, unsigned int virq, - unsigned int nr_irqs); - int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); - void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); - int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, - unsigned long *out_hwirq, unsigned int *out_type); + int (*alloc)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg); + void (*free)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs); + int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); + void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); + int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type); #endif #ifdef CONFIG_GENERIC_IRQ_DEBUGFS - void (*debug_show)(struct seq_file *m, struct irq_domain *d, - struct irq_data *irqd, int ind); + void (*debug_show)(struct seq_file *m, struct irq_domain *d, + struct irq_data *irqd, int ind); #endif }; @@ -231,6 +209,9 @@ enum { /* Irq domain must destroy generic chips when removed */ IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10), + /* Address and data pair is mutable when irq_set_affinity() */ + IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the @@ -244,8 +225,7 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) return to_of_node(d->fwnode); } -static inline void irq_domain_set_pm_device(struct irq_domain *d, - struct device *dev) +static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev) { if (d) d->pm_dev = dev; @@ -261,14 +241,12 @@ enum { IRQCHIP_FWNODE_NAMED_ID, }; -static inline -struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name) +static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL); } -static inline -struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) +static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name, NULL); @@ -281,6 +259,8 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) void irq_domain_free_fwnode(struct fwnode_handle *fwnode); +DEFINE_FREE(irq_domain_free_fwnode, struct fwnode_handle *, if (_T) irq_domain_free_fwnode(_T)) + struct irq_domain_chip_generic_info; /** @@ -333,36 +313,19 @@ struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info); struct irq_domain *devm_irq_domain_instantiate(struct device *dev, const struct irq_domain_info *info); -struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, - unsigned int size, +struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data); -struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - irq_hw_number_t first_hwirq, - const struct irq_domain_ops *ops, - void *host_data); -struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, - unsigned int size, - unsigned int first_irq, - irq_hw_number_t first_hwirq, - const struct irq_domain_ops *ops, - void *host_data); + const struct irq_domain_ops *ops, void *host_data); +struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size, + unsigned int first_irq, irq_hw_number_t first_hwirq, + const struct irq_domain_ops *ops, void *host_data); struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); void irq_set_default_domain(struct irq_domain *domain); struct irq_domain *irq_get_default_domain(void); -int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, - irq_hw_number_t hwirq, int node, +int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity); -static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) -{ - return node ? &node->fwnode : NULL; -} - extern const struct fwnode_operations irqchip_fwnode_ops; static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode) @@ -370,12 +333,10 @@ static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode) return fwnode && fwnode->ops == &irqchip_fwnode_ops; } -void irq_domain_update_bus_token(struct irq_domain *domain, - enum irq_domain_bus_token bus_token); +void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token); -static inline -struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, - enum irq_domain_bus_token bus_token) +static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, + enum irq_domain_bus_token bus_token) { struct irq_fwspec fwspec = { .fwnode = fwnode, @@ -387,7 +348,7 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, static inline struct irq_domain *irq_find_matching_host(struct device_node *node, enum irq_domain_bus_token bus_token) { - return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); + return irq_find_matching_fwnode(of_fwnode_handle(node), bus_token); } static inline struct irq_domain *irq_find_host(struct device_node *node) @@ -401,128 +362,92 @@ static inline struct irq_domain *irq_find_host(struct device_node *node) return d; } -static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data) -{ - return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data); -} - -/** - * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. - * @of_node: pointer to interrupt controller's device tree node. - * @size: Number of interrupts in the domain. - * @ops: map/unmap domain callbacks - * @host_data: Controller private data pointer - */ -static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node, - unsigned int size, - const struct irq_domain_ops *ops, - void *host_data) -{ - struct irq_domain_info info = { - .fwnode = of_node_to_fwnode(of_node), - .size = size, - .hwirq_max = size, - .ops = ops, - .host_data = host_data, - }; - struct irq_domain *d; - - d = irq_domain_instantiate(&info); - return IS_ERR(d) ? NULL : d; -} - #ifdef CONFIG_IRQ_DOMAIN_NOMAP -static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, - unsigned int max_irq, - const struct irq_domain_ops *ops, - void *host_data) +static inline struct irq_domain *irq_domain_create_nomap(struct fwnode_handle *fwnode, + unsigned int max_irq, + const struct irq_domain_ops *ops, + void *host_data) { - struct irq_domain_info info = { - .fwnode = of_node_to_fwnode(of_node), + const struct irq_domain_info info = { + .fwnode = fwnode, .hwirq_max = max_irq, .direct_max = max_irq, .ops = ops, .host_data = host_data, }; - struct irq_domain *d; + struct irq_domain *d = irq_domain_instantiate(&info); - d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } unsigned int irq_create_direct_mapping(struct irq_domain *domain); #endif -static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, - const struct irq_domain_ops *ops, - void *host_data) -{ - struct irq_domain_info info = { - .fwnode = of_node_to_fwnode(of_node), - .hwirq_max = ~0U, - .ops = ops, - .host_data = host_data, - }; - struct irq_domain *d; - - d = irq_domain_instantiate(&info); - return IS_ERR(d) ? NULL : d; -} - +/** + * irq_domain_create_linear - Allocate and register a linear revmap irq_domain. + * @fwnode: pointer to interrupt controller's FW node. + * @size: Number of interrupts in the domain. + * @ops: map/unmap domain callbacks + * @host_data: Controller private data pointer + * + * Returns: Newly created irq_domain + */ static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, - unsigned int size, - const struct irq_domain_ops *ops, - void *host_data) + unsigned int size, + const struct irq_domain_ops *ops, + void *host_data) { - struct irq_domain_info info = { + const struct irq_domain_info info = { .fwnode = fwnode, .size = size, .hwirq_max = size, .ops = ops, .host_data = host_data, }; - struct irq_domain *d; + struct irq_domain *d = irq_domain_instantiate(&info); - d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, - const struct irq_domain_ops *ops, - void *host_data) + const struct irq_domain_ops *ops, + void *host_data) { - struct irq_domain_info info = { + const struct irq_domain_info info = { .fwnode = fwnode, .hwirq_max = ~0, .ops = ops, .host_data = host_data, }; - struct irq_domain *d; + struct irq_domain *d = irq_domain_instantiate(&info); - d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } void irq_domain_remove(struct irq_domain *domain); -int irq_domain_associate(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq); -void irq_domain_associate_many(struct irq_domain *domain, - unsigned int irq_base, +int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq); +void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); -unsigned int irq_create_mapping_affinity(struct irq_domain *domain, - irq_hw_number_t hwirq, +unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq, const struct irq_affinity_desc *affinity); unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); void irq_dispose_mapping(unsigned int virq); -static inline unsigned int irq_create_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) +/** + * irq_create_mapping - Map a hardware interrupt into linux irq space + * @domain: domain owning this hardware interrupt or NULL for default domain + * @hwirq: hardware irq number in that domain space + * + * Only one mapping per hardware interrupt is permitted. + * + * If the sense/trigger is to be specified, set_irq_type() should be called + * on the number returned from that call. + * + * Returns: Linux irq number or 0 on error + */ +static inline unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { return irq_create_mapping_affinity(domain, hwirq, NULL); } @@ -531,6 +456,13 @@ struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, unsigned int *irq); +/** + * irq_resolve_mapping - Find a linux irq from a hw irq number. + * @domain: domain owning this hardware interrupt + * @hwirq: hardware irq number in that domain space + * + * Returns: Interrupt descriptor + */ static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { @@ -539,8 +471,10 @@ static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain, /** * irq_find_mapping() - Find a linux irq from a hw irq number. - * @domain: domain owning this hardware interrupt - * @hwirq: hardware irq number in that domain space + * @domain: domain owning this hardware interrupt + * @hwirq: hardware irq number in that domain space + * + * Returns: Linux irq number or 0 if not found */ static inline unsigned int irq_find_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) @@ -553,107 +487,115 @@ static inline unsigned int irq_find_mapping(struct irq_domain *domain, return 0; } -static inline unsigned int irq_linear_revmap(struct irq_domain *domain, - irq_hw_number_t hwirq) -{ - return irq_find_mapping(domain, hwirq); -} - extern const struct irq_domain_ops irq_domain_simple_ops; /* stock xlate functions */ int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type); + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type); + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type); - -int irq_domain_translate_twocell(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *out_hwirq, - unsigned int *out_type); - -int irq_domain_translate_onecell(struct irq_domain *d, - struct irq_fwspec *fwspec, - unsigned long *out_hwirq, - unsigned int *out_type); + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); +int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); + +int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type); +int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type); +int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type); /* IPI functions */ int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); /* V2 interfaces to support hierarchy IRQ domains. */ -struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, - unsigned int virq); -void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq, - const struct irq_chip *chip, - void *chip_data, irq_flow_handler_t handler, +struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq); +void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, + const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); void irq_domain_reset_irq_data(struct irq_data *irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, - unsigned int flags, - unsigned int size, - struct fwnode_handle *fwnode, - const struct irq_domain_ops *ops, - void *host_data); - -static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, - unsigned int flags, - unsigned int size, - struct device_node *node, - const struct irq_domain_ops *ops, - void *host_data) -{ - return irq_domain_create_hierarchy(parent, flags, size, - of_node_to_fwnode(node), - ops, host_data); -} - -int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, - unsigned int nr_irqs, int node, void *arg, - bool realloc, +/** + * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy + * @parent: Parent irq domain to associate with the new domain + * @flags: Irq domain flags associated to the domain + * @size: Size of the domain. See below + * @fwnode: Optional fwnode of the interrupt controller + * @ops: Pointer to the interrupt domain callbacks + * @host_data: Controller private data pointer + * + * If @size is 0 a tree domain is created, otherwise a linear domain. + * + * If successful the parent is associated to the new domain and the + * domain flags are set. + * + * Returns: A pointer to IRQ domain, or %NULL on failure. + */ +static inline struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, + unsigned int flags, unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data) +{ + const struct irq_domain_info info = { + .fwnode = fwnode, + .size = size, + .hwirq_max = size ? : ~0U, + .ops = ops, + .host_data = host_data, + .domain_flags = flags, + .parent = parent, + }; + struct irq_domain *d = irq_domain_instantiate(&info); + + return IS_ERR(d) ? NULL : d; +} + +int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, + int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity); void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); int irq_domain_activate_irq(struct irq_data *irq_data, bool early); void irq_domain_deactivate_irq(struct irq_data *irq_data); -static inline int irq_domain_alloc_irqs(struct irq_domain *domain, - unsigned int nr_irqs, int node, void *arg) +/** + * irq_domain_alloc_irqs - Allocate IRQs from domain + * @domain: domain to allocate from + * @nr_irqs: number of IRQs to allocate + * @node: NUMA node id for memory allocation + * @arg: domain specific argument + * + * See __irq_domain_alloc_irqs()' documentation. + */ +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, + int node, void *arg) { - return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, - NULL); + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL); } -int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, - unsigned int virq, - irq_hw_number_t hwirq, - const struct irq_chip *chip, +int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data); -void irq_domain_free_irqs_common(struct irq_domain *domain, - unsigned int virq, +void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); -void irq_domain_free_irqs_top(struct irq_domain *domain, - unsigned int virq, unsigned int nr_irqs); +void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg); int irq_domain_pop_irq(struct irq_domain *domain, int virq); -int irq_domain_alloc_irqs_parent(struct irq_domain *domain, - unsigned int irq_base, +int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); -void irq_domain_free_irqs_parent(struct irq_domain *domain, - unsigned int irq_base, +void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs); -int irq_domain_disconnect_hierarchy(struct irq_domain *domain, - unsigned int virq); +int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq); static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { @@ -662,8 +604,7 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) static inline bool irq_domain_is_ipi(struct irq_domain *domain) { - return domain->flags & - (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE); + return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE); } static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) @@ -691,15 +632,18 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain) return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE; } +static inline bool irq_domain_is_msi_immutable(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_MSI_IMMUTABLE; +} #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -static inline int irq_domain_alloc_irqs(struct irq_domain *domain, - unsigned int nr_irqs, int node, void *arg) +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, + int node, void *arg) { return -1; } -static inline void irq_domain_free_irqs(unsigned int virq, - unsigned int nr_irqs) { } +static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { } static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { @@ -739,8 +683,7 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain) #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #ifdef CONFIG_GENERIC_MSI_IRQ -int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, - unsigned int type); +int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type); void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq); #else static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, @@ -755,10 +698,50 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig } #endif +/* Deprecated functions. Will be removed in the merge window */ +static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) +{ + return node ? &node->fwnode : NULL; +} + +static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(of_node), + .hwirq_max = ~0U, + .ops = ops, + .host_data = host_data, + }; + struct irq_domain *d; + + d = irq_domain_instantiate(&info); + return IS_ERR(d) ? NULL : d; +} + +static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node, + unsigned int size, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(of_node), + .size = size, + .hwirq_max = size, + .ops = ops, + .host_data = host_data, + }; + struct irq_domain *d; + + d = irq_domain_instantiate(&info); + return IS_ERR(d) ? NULL : d; +} + #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } -static inline struct irq_domain *irq_find_matching_fwnode( - struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) +static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, + enum irq_domain_bus_token bus_token) { return NULL; } diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h index 013794fb5da0..065c185f2763 100644 --- a/include/linux/livepatch_sched.h +++ b/include/linux/livepatch_sched.h @@ -3,27 +3,23 @@ #define _LINUX_LIVEPATCH_SCHED_H_ #include <linux/jump_label.h> -#include <linux/static_call_types.h> +#include <linux/sched.h> #ifdef CONFIG_LIVEPATCH void __klp_sched_try_switch(void); -#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) - DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key); -static __always_inline void klp_sched_try_switch(void) +static __always_inline void klp_sched_try_switch(struct task_struct *curr) { - if (static_branch_unlikely(&klp_sched_try_switch_key)) + if (static_branch_unlikely(&klp_sched_try_switch_key) && + READ_ONCE(curr->__state) & TASK_FREEZABLE) __klp_sched_try_switch(); } -#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ - #else /* !CONFIG_LIVEPATCH */ -static inline void klp_sched_try_switch(void) {} -static inline void __klp_sched_try_switch(void) {} +static inline void klp_sched_try_switch(struct task_struct *curr) {} #endif /* CONFIG_LIVEPATCH */ #endif /* _LINUX_LIVEPATCH_SCHED_H_ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index bf55206935c4..fdda6b16263b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -385,7 +385,7 @@ extern unsigned int kobjsize(const void *objp); #endif #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR -# define VM_UFFD_MINOR_BIT 38 +# define VM_UFFD_MINOR_BIT 41 # define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ # define VM_UFFD_MINOR VM_NONE diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 56d07edd01f9..32ba5126e221 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -31,6 +31,7 @@ #define INIT_PASID 0 struct address_space; +struct futex_private_hash; struct mem_cgroup; /* @@ -1031,7 +1032,11 @@ struct mm_struct { */ seqcount_t mm_lock_seq; #endif - +#ifdef CONFIG_FUTEX_PRIVATE_HASH + struct mutex futex_hash_lock; + struct futex_private_hash __rcu *futex_phash; + struct futex_private_hash *futex_phash_new; +#endif unsigned long hiwater_rss; /* High-watermark of RSS usage */ unsigned long hiwater_vm; /* High-water virtual memory usage */ diff --git a/include/linux/mman.h b/include/linux/mman.h index bce214fece16..f4c6346a8fcd 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -155,7 +155,9 @@ calc_vm_flag_bits(struct file *file, unsigned long flags) return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | +#endif arch_calc_vm_flag_bits(file, flags); } diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 4706c6769902..e0eddfd306ef 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -7,6 +7,7 @@ #include <linux/rwsem.h> #include <linux/tracepoint-defs.h> #include <linux/types.h> +#include <linux/cleanup.h> #define MMAP_LOCK_INITIALIZER(name) \ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), @@ -211,6 +212,9 @@ static inline void mmap_read_unlock(struct mm_struct *mm) up_read(&mm->mmap_lock); } +DEFINE_GUARD(mmap_read_lock, struct mm_struct *, + mmap_read_lock(_T), mmap_read_unlock(_T)) + static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) { __mmap_lock_trace_released(mm, false); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6ccec1bf2896..b1c459f7a485 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -148,7 +148,6 @@ enum zone_stat_item { NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ /* Second 128 byte cacheline */ - NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) NR_ZSPAGES, /* allocated in zsmalloc */ #endif diff --git a/include/linux/mount.h b/include/linux/mount.h index dcc17ce8a959..6904ad33ee7a 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -22,48 +22,51 @@ struct fs_context; struct file; struct path; -#define MNT_NOSUID 0x01 -#define MNT_NODEV 0x02 -#define MNT_NOEXEC 0x04 -#define MNT_NOATIME 0x08 -#define MNT_NODIRATIME 0x10 -#define MNT_RELATIME 0x20 -#define MNT_READONLY 0x40 /* does the user want this to be r/o? */ -#define MNT_NOSYMFOLLOW 0x80 - -#define MNT_SHRINKABLE 0x100 -#define MNT_WRITE_HOLD 0x200 - -#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ -#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ -/* - * MNT_SHARED_MASK is the set of flags that should be cleared when a - * mount becomes shared. Currently, this is only the flag that says a - * mount cannot be bind mounted, since this is how we create a mount - * that shares events with another mount. If you add a new MNT_* - * flag, consider how it interacts with shared mounts. - */ -#define MNT_SHARED_MASK (MNT_UNBINDABLE) -#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \ - | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \ - | MNT_READONLY | MNT_NOSYMFOLLOW) -#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) - -#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ - MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) - -#define MNT_INTERNAL 0x4000 - -#define MNT_LOCK_ATIME 0x040000 -#define MNT_LOCK_NOEXEC 0x080000 -#define MNT_LOCK_NOSUID 0x100000 -#define MNT_LOCK_NODEV 0x200000 -#define MNT_LOCK_READONLY 0x400000 -#define MNT_LOCKED 0x800000 -#define MNT_DOOMED 0x1000000 -#define MNT_SYNC_UMOUNT 0x2000000 -#define MNT_MARKED 0x4000000 -#define MNT_UMOUNT 0x8000000 +enum mount_flags { + MNT_NOSUID = 0x01, + MNT_NODEV = 0x02, + MNT_NOEXEC = 0x04, + MNT_NOATIME = 0x08, + MNT_NODIRATIME = 0x10, + MNT_RELATIME = 0x20, + MNT_READONLY = 0x40, /* does the user want this to be r/o? */ + MNT_NOSYMFOLLOW = 0x80, + + MNT_SHRINKABLE = 0x100, + MNT_WRITE_HOLD = 0x200, + + MNT_SHARED = 0x1000, /* if the vfsmount is a shared mount */ + MNT_UNBINDABLE = 0x2000, /* if the vfsmount is a unbindable mount */ + + MNT_INTERNAL = 0x4000, + + MNT_LOCK_ATIME = 0x040000, + MNT_LOCK_NOEXEC = 0x080000, + MNT_LOCK_NOSUID = 0x100000, + MNT_LOCK_NODEV = 0x200000, + MNT_LOCK_READONLY = 0x400000, + MNT_LOCKED = 0x800000, + MNT_DOOMED = 0x1000000, + MNT_SYNC_UMOUNT = 0x2000000, + MNT_MARKED = 0x4000000, + MNT_UMOUNT = 0x8000000, + + /* + * MNT_SHARED_MASK is the set of flags that should be cleared when a + * mount becomes shared. Currently, this is only the flag that says a + * mount cannot be bind mounted, since this is how we create a mount + * that shares events with another mount. If you add a new MNT_* + * flag, consider how it interacts with shared mounts. + */ + MNT_SHARED_MASK = MNT_UNBINDABLE, + MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC + | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME + | MNT_READONLY | MNT_NOSYMFOLLOW, + MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME, + + MNT_INTERNAL_FLAGS = MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED, +}; struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index 58a2401e4b55..0075f6e5c3da 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -262,6 +262,11 @@ struct mr_table { int mroute_reg_vif_num; }; +static inline bool mr_can_free_table(struct net *net) +{ + return !check_net(net) || !net_initialized(net); +} + #ifdef CONFIG_IP_MROUTE_COMMON void vif_device_init(struct vif_device *v, struct net_device *dev, diff --git a/include/linux/msi.h b/include/linux/msi.h index 86e42742fd0f..6863540f4b71 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -229,8 +229,11 @@ struct msi_dev_domain { int msi_setup_device_data(struct device *dev); -void msi_lock_descs(struct device *dev); -void msi_unlock_descs(struct device *dev); +void __msi_lock_descs(struct device *dev); +void __msi_unlock_descs(struct device *dev); + +DEFINE_LOCK_GUARD_1(msi_descs_lock, struct device, __msi_lock_descs(_T->lock), + __msi_unlock_descs(_T->lock)); struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid, enum msi_desc_filter filter); @@ -420,6 +423,7 @@ struct msi_domain_info; * @msi_init: Domain specific init function for MSI interrupts * @msi_free: Domain specific function to free a MSI interrupts * @msi_prepare: Prepare the allocation of the interrupts in the domain + * @msi_teardown: Reverse the effects of @msi_prepare * @prepare_desc: Optional function to prepare the allocated MSI descriptor * in the domain * @set_desc: Set the msi descriptor for an interrupt @@ -435,8 +439,9 @@ struct msi_domain_info; * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying * irqdomain. * - * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the - * msi_domain_alloc/free_irqs*() variants. + * @msi_check, @msi_prepare, @msi_teardown, @prepare_desc and + * @set_desc are callbacks used by the msi_domain_alloc/free_irqs*() + * variants. * * @domain_alloc_irqs, @domain_free_irqs can be used to override the * default allocation/free functions (__msi_domain_alloc/free_irqs). This @@ -458,6 +463,8 @@ struct msi_domain_ops { int (*msi_prepare)(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg); + void (*msi_teardown)(struct irq_domain *domain, + msi_alloc_info_t *arg); void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg, struct msi_desc *desc); void (*set_desc)(msi_alloc_info_t *arg, @@ -486,6 +493,7 @@ struct msi_domain_ops { * @handler: Optional: associated interrupt flow handler * @handler_data: Optional: associated interrupt flow handler data * @handler_name: Optional: associated interrupt flow handler name + * @alloc_data: Optional: associated interrupt allocation data * @data: Optional: domain specific data */ struct msi_domain_info { @@ -498,6 +506,7 @@ struct msi_domain_info { irq_flow_handler_t handler; void *handler_data; const char *handler_name; + msi_alloc_info_t *alloc_data; void *data; }; @@ -507,12 +516,14 @@ struct msi_domain_info { * @chip: Interrupt chip for this domain * @ops: MSI domain ops * @info: MSI domain info data + * @alloc_info: MSI domain allocation data (architecture specific) */ struct msi_domain_template { char name[48]; struct irq_chip chip; struct msi_domain_ops ops; struct msi_domain_info info; + msi_alloc_info_t alloc_info; }; /* @@ -625,6 +636,10 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); +struct irq_domain_info; +struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info, + const struct msi_parent_ops *msi_parent_ops); + bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, const struct msi_domain_template *template, unsigned int hwsize, void *domain_data, diff --git a/include/linux/namei.h b/include/linux/namei.h index bbaf55fb3101..5d085428e471 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -70,17 +70,16 @@ int vfs_path_parent_lookup(struct filename *filename, unsigned int flags, int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); -extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); -extern struct dentry *lookup_one_len(const char *, struct dentry *, int); -extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); -extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int); -struct dentry *lookup_one(struct mnt_idmap *, const char *, struct dentry *, int); +extern struct dentry *try_lookup_noperm(struct qstr *, struct dentry *); +extern struct dentry *lookup_noperm(struct qstr *, struct dentry *); +extern struct dentry *lookup_noperm_unlocked(struct qstr *, struct dentry *); +extern struct dentry *lookup_noperm_positive_unlocked(struct qstr *, struct dentry *); +struct dentry *lookup_one(struct mnt_idmap *, struct qstr *, struct dentry *); struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap, - const char *name, struct dentry *base, - int len); + struct qstr *name, struct dentry *base); struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap, - const char *name, - struct dentry *base, int len); + struct qstr *name, + struct dentry *base); extern int follow_down_one(struct path *); extern int follow_down(struct path *path, unsigned int flags); diff --git a/include/linux/net.h b/include/linux/net.h index 0ff950eecc6b..f60fff91e1df 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -70,6 +70,7 @@ enum sock_type { SOCK_DCCP = 6, SOCK_PACKET = 10, }; +#endif /* ARCH_HAS_SOCKET_TYPES */ #define SOCK_MAX (SOCK_PACKET + 1) /* Mask which covers at least up to SOCK_MASK-1. The @@ -81,8 +82,7 @@ enum sock_type { #ifndef SOCK_NONBLOCK #define SOCK_NONBLOCK O_NONBLOCK #endif - -#endif /* ARCH_HAS_SOCKET_TYPES */ +#define SOCK_COREDUMP O_NOCTTY /** * enum sock_shutdown_cmd - Shutdown types diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 2479ed10f53e..51308f65b72f 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -303,6 +303,7 @@ enum nvme_ctrl_attr { NVME_CTRL_ATTR_TBKAS = (1 << 6), NVME_CTRL_ATTR_ELBAS = (1 << 15), NVME_CTRL_ATTR_RHII = (1 << 18), + NVME_CTRL_ATTR_FDPS = (1 << 19), }; struct nvme_id_ctrl { @@ -689,6 +690,44 @@ struct nvme_rotational_media_log { __u8 rsvd24[488]; }; +struct nvme_fdp_config { + __u8 flags; +#define FDPCFG_FDPE (1U << 0) + __u8 fdpcidx; + __le16 reserved; +}; + +struct nvme_fdp_ruh_desc { + __u8 ruht; + __u8 reserved[3]; +}; + +struct nvme_fdp_config_desc { + __le16 dsze; + __u8 fdpa; + __u8 vss; + __le32 nrg; + __le16 nruh; + __le16 maxpids; + __le32 nns; + __le64 runs; + __le32 erutl; + __u8 rsvd28[36]; + struct nvme_fdp_ruh_desc ruhs[]; +}; + +struct nvme_fdp_config_log { + __le16 numfdpc; + __u8 ver; + __u8 rsvd3; + __le32 sze; + __u8 rsvd8[8]; + /* + * This is followed by variable number of nvme_fdp_config_desc + * structures, but sparse doesn't like nested variable sized arrays. + */ +}; + struct nvme_smart_log { __u8 critical_warning; __u8 temperature[2]; @@ -915,6 +954,7 @@ enum nvme_opcode { nvme_cmd_resv_register = 0x0d, nvme_cmd_resv_report = 0x0e, nvme_cmd_resv_acquire = 0x11, + nvme_cmd_io_mgmt_recv = 0x12, nvme_cmd_resv_release = 0x15, nvme_cmd_zone_mgmt_send = 0x79, nvme_cmd_zone_mgmt_recv = 0x7a, @@ -936,6 +976,7 @@ enum nvme_opcode { nvme_opcode_name(nvme_cmd_resv_register), \ nvme_opcode_name(nvme_cmd_resv_report), \ nvme_opcode_name(nvme_cmd_resv_acquire), \ + nvme_opcode_name(nvme_cmd_io_mgmt_recv), \ nvme_opcode_name(nvme_cmd_resv_release), \ nvme_opcode_name(nvme_cmd_zone_mgmt_send), \ nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \ @@ -1087,6 +1128,7 @@ enum { NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, NVME_RW_PRINFO_PRACT = 1 << 13, NVME_RW_DTYPE_STREAMS = 1 << 4, + NVME_RW_DTYPE_DPLCMT = 2 << 4, NVME_WZ_DEAC = 1 << 9, }; @@ -1174,6 +1216,38 @@ struct nvme_zone_mgmt_recv_cmd { __le32 cdw14[2]; }; +struct nvme_io_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 mo; + __u8 rsvd11; + __u16 mos; + __le32 numd; + __le32 cdw12[4]; +}; + +enum { + NVME_IO_MGMT_RECV_MO_RUHS = 1, +}; + +struct nvme_fdp_ruh_status_desc { + __le16 pid; + __le16 ruhid; + __le32 earutr; + __le64 ruamw; + __u8 reserved[16]; +}; + +struct nvme_fdp_ruh_status { + __u8 rsvd0[14]; + __le16 nruhsd; + struct nvme_fdp_ruh_status_desc ruhsd[]; +}; + enum { NVME_ZRA_ZONE_REPORT = 0, NVME_ZRASF_ZONE_REPORT_ALL = 0, @@ -1309,6 +1383,7 @@ enum { NVME_FEAT_PLM_WINDOW = 0x14, NVME_FEAT_HOST_BEHAVIOR = 0x16, NVME_FEAT_SANITIZE = 0x17, + NVME_FEAT_FDP = 0x1d, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_RESV_MASK = 0x82, @@ -1329,6 +1404,7 @@ enum { NVME_LOG_ANA = 0x0c, NVME_LOG_FEATURES = 0x12, NVME_LOG_RMI = 0x16, + NVME_LOG_FDP_CONFIGS = 0x20, NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), @@ -1923,6 +1999,7 @@ struct nvme_command { struct nvmf_auth_receive_command auth_receive; struct nvme_dbbuf dbbuf; struct nvme_directive_cmd directive; + struct nvme_io_mgmt_recv_cmd imr; }; }; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e6a21b62dcce..3b814ce08331 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -615,6 +615,13 @@ FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE) PAGEFLAG_FALSE(HighMem, highmem) #endif +/* Does kmap_local_folio() only allow access to one page of the folio? */ +#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP +#define folio_test_partial_kmap(f) true +#else +#define folio_test_partial_kmap(f) folio_test_highmem(f) +#endif + #ifdef CONFIG_SWAP static __always_inline bool folio_test_swapcache(const struct folio *folio) { diff --git a/include/linux/panic.h b/include/linux/panic.h index 2494d51707ef..4adc65766935 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -20,8 +20,6 @@ extern bool panic_triggering_all_cpu_backtrace; extern int panic_timeout; extern unsigned long panic_print; extern int panic_on_oops; -extern int panic_on_unrecovered_nmi; -extern int panic_on_io_nmi; extern int panic_on_warn; extern unsigned long panic_on_taint; diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index c5e9cac0575e..eeeff2a04529 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -79,4 +79,6 @@ static inline void part_stat_set_all(struct block_device *part, int value) #define part_stat_local_read_cpu(part, field, cpu) \ local_read(&(part_stat_get_cpu(part, field, cpu))) +unsigned int bdev_count_inflight(struct block_device *part); + #endif /* _LINUX_PART_STAT_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 51e2bd6405cd..b231cbc67a35 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1671,7 +1671,7 @@ void pci_disable_msi(struct pci_dev *dev); int pci_msix_vec_count(struct pci_dev *dev); void pci_disable_msix(struct pci_dev *dev); void pci_restore_msi_state(struct pci_dev *dev); -int pci_msi_enabled(void); +bool pci_msi_enabled(void); int pci_enable_msi(struct pci_dev *dev); int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec); @@ -1704,7 +1704,7 @@ static inline void pci_disable_msi(struct pci_dev *dev) { } static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } static inline void pci_disable_msix(struct pci_dev *dev) { } static inline void pci_restore_msi_state(struct pci_dev *dev) { } -static inline int pci_msi_enabled(void) { return 0; } +static inline bool pci_msi_enabled(void) { return false; } static inline int pci_enable_msi(struct pci_dev *dev) { return -ENOSYS; } static inline int pci_enable_msix_range(struct pci_dev *dev, diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index af7d75ede619..288f5235649a 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -43,9 +43,10 @@ is_static struct percpu_rw_semaphore name = { \ #define DEFINE_STATIC_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, static) -extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); +extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool, bool); -static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +static inline void percpu_down_read_internal(struct percpu_rw_semaphore *sem, + bool freezable) { might_sleep(); @@ -63,7 +64,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) if (likely(rcu_sync_is_idle(&sem->rss))) this_cpu_inc(*sem->read_count); else - __percpu_down_read(sem, false); /* Unconditional memory barrier */ + __percpu_down_read(sem, false, freezable); /* Unconditional memory barrier */ /* * The preempt_enable() prevents the compiler from * bleeding the critical section out. @@ -71,6 +72,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) preempt_enable(); } +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +{ + percpu_down_read_internal(sem, false); +} + +static inline void percpu_down_read_freezable(struct percpu_rw_semaphore *sem, + bool freeze) +{ + percpu_down_read_internal(sem, freeze); +} + static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) { bool ret = true; @@ -82,7 +94,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) if (likely(rcu_sync_is_idle(&sem->rss))) this_cpu_inc(*sem->read_count); else - ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ + ret = __percpu_down_read(sem, true, false); /* Unconditional memory barrier */ preempt_enable(); /* * The barrier() from preempt_enable() prevents the compiler from diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 52b5ea663b9f..85bf8dd9f087 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -15,11 +15,7 @@ /* enough to cover all DEFINE_PER_CPUs in modules */ #ifdef CONFIG_MODULES -#ifdef CONFIG_MEM_ALLOC_PROFILING -#define PERCPU_MODULE_RESERVE (8 << 13) -#else #define PERCPU_MODULE_RESERVE (8 << 10) -#endif #else #define PERCPU_MODULE_RESERVE 0 #endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 0069ba6866a4..52dc7cfab0e0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -26,18 +26,9 @@ # include <asm/local64.h> #endif -#define PERF_GUEST_ACTIVE 0x01 -#define PERF_GUEST_USER 0x02 - -struct perf_guest_info_callbacks { - unsigned int (*state)(void); - unsigned long (*get_ip)(void); - unsigned int (*handle_intel_pt_intr)(void); -}; - #ifdef CONFIG_HAVE_HW_BREAKPOINT -#include <linux/rhashtable-types.h> -#include <asm/hw_breakpoint.h> +# include <linux/rhashtable-types.h> +# include <asm/hw_breakpoint.h> #endif #include <linux/list.h> @@ -62,19 +53,20 @@ struct perf_guest_info_callbacks { #include <linux/security.h> #include <linux/static_call.h> #include <linux/lockdep.h> + #include <asm/local.h> struct perf_callchain_entry { - __u64 nr; - __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ + u64 nr; + u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ }; struct perf_callchain_entry_ctx { - struct perf_callchain_entry *entry; - u32 max_stack; - u32 nr; - short contexts; - bool contexts_maxed; + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short contexts; + bool contexts_maxed; }; typedef unsigned long (*perf_copy_f)(void *dst, const void *src, @@ -121,8 +113,8 @@ static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) * already stored in age order, the hw_idx should be 0. */ struct perf_branch_stack { - __u64 nr; - __u64 hw_idx; + u64 nr; + u64 hw_idx; struct perf_branch_entry entries[]; }; @@ -132,10 +124,10 @@ struct task_struct; * extra PMU register associated with an event */ struct hw_perf_event_extra { - u64 config; /* register value */ - unsigned int reg; /* register address or index */ - int alloc; /* extra register already allocated */ - int idx; /* index in shared_regs->regs[] */ + u64 config; /* register value */ + unsigned int reg; /* register address or index */ + int alloc; /* extra register already allocated */ + int idx; /* index in shared_regs->regs[] */ }; /** @@ -144,8 +136,8 @@ struct hw_perf_event_extra { * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific * usage. */ -#define PERF_EVENT_FLAG_ARCH 0x000fffff -#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000 +#define PERF_EVENT_FLAG_ARCH 0x0fffffff +#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000 static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0); @@ -157,7 +149,9 @@ struct hw_perf_event { union { struct { /* hardware */ u64 config; + u64 config1; u64 last_tag; + u64 dyn_constraint; unsigned long config_base; unsigned long event_base; int event_base_rdpmc; @@ -225,9 +219,14 @@ struct hw_perf_event { /* * hw_perf_event::state flags; used to track the PERF_EF_* state. */ -#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ -#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ -#define PERF_HES_ARCH 0x04 + +/* the counter is stopped */ +#define PERF_HES_STOPPED 0x01 + +/* event->count up-to-date */ +#define PERF_HES_UPTODATE 0x02 + +#define PERF_HES_ARCH 0x04 int state; @@ -276,7 +275,7 @@ struct hw_perf_event { */ u64 freq_time_stamp; u64 freq_count_stamp; -#endif +#endif /* CONFIG_PERF_EVENTS */ }; struct perf_event; @@ -285,28 +284,33 @@ struct perf_event_pmu_context; /* * Common implementation detail of pmu::{start,commit,cancel}_txn */ -#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ -#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ + +/* txn to add/schedule event on PMU */ +#define PERF_PMU_TXN_ADD 0x1 + +/* txn to read event group from PMU */ +#define PERF_PMU_TXN_READ 0x2 /** * pmu::capabilities flags */ -#define PERF_PMU_CAP_NO_INTERRUPT 0x0001 -#define PERF_PMU_CAP_NO_NMI 0x0002 -#define PERF_PMU_CAP_AUX_NO_SG 0x0004 -#define PERF_PMU_CAP_EXTENDED_REGS 0x0008 -#define PERF_PMU_CAP_EXCLUSIVE 0x0010 -#define PERF_PMU_CAP_ITRACE 0x0020 -#define PERF_PMU_CAP_NO_EXCLUDE 0x0040 -#define PERF_PMU_CAP_AUX_OUTPUT 0x0080 -#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 -#define PERF_PMU_CAP_AUX_PAUSE 0x0200 +#define PERF_PMU_CAP_NO_INTERRUPT 0x0001 +#define PERF_PMU_CAP_NO_NMI 0x0002 +#define PERF_PMU_CAP_AUX_NO_SG 0x0004 +#define PERF_PMU_CAP_EXTENDED_REGS 0x0008 +#define PERF_PMU_CAP_EXCLUSIVE 0x0010 +#define PERF_PMU_CAP_ITRACE 0x0020 +#define PERF_PMU_CAP_NO_EXCLUDE 0x0040 +#define PERF_PMU_CAP_AUX_OUTPUT 0x0080 +#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 +#define PERF_PMU_CAP_AUX_PAUSE 0x0200 +#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400 /** * pmu::scope */ enum perf_pmu_scope { - PERF_PMU_SCOPE_NONE = 0, + PERF_PMU_SCOPE_NONE = 0, PERF_PMU_SCOPE_CORE, PERF_PMU_SCOPE_DIE, PERF_PMU_SCOPE_CLUSTER, @@ -325,6 +329,9 @@ struct perf_output_handle; struct pmu { struct list_head entry; + spinlock_t events_lock; + struct list_head events; + struct module *module; struct device *dev; struct device *parent; @@ -387,11 +394,21 @@ struct pmu { * Flags for ->add()/->del()/ ->start()/->stop(). There are * matching hw_perf_event::state flags. */ -#define PERF_EF_START 0x01 /* start the counter when adding */ -#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ -#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ -#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */ -#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */ + +/* start the counter when adding */ +#define PERF_EF_START 0x01 + +/* reload the counter when starting */ +#define PERF_EF_RELOAD 0x02 + +/* update the counter when stopping */ +#define PERF_EF_UPDATE 0x04 + +/* AUX area event, pause tracing */ +#define PERF_EF_PAUSE 0x08 + +/* AUX area event, resume tracing */ +#define PERF_EF_RESUME 0x10 /* * Adds/Removes a counter to/from the PMU, can be done inside a @@ -590,10 +607,10 @@ enum perf_addr_filter_action_t { * This is a hardware-agnostic filter configuration as specified by the user. */ struct perf_addr_filter { - struct list_head entry; - struct path path; - unsigned long offset; - unsigned long size; + struct list_head entry; + struct path path; + unsigned long offset; + unsigned long size; enum perf_addr_filter_action_t action; }; @@ -608,23 +625,24 @@ struct perf_addr_filter { * bundled together; see perf_event_addr_filters(). */ struct perf_addr_filters_head { - struct list_head list; - raw_spinlock_t lock; - unsigned int nr_file_filters; + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; }; struct perf_addr_filter_range { - unsigned long start; - unsigned long size; + unsigned long start; + unsigned long size; }; /** * enum perf_event_state - the states of an event: */ enum perf_event_state { - PERF_EVENT_STATE_DEAD = -4, - PERF_EVENT_STATE_EXIT = -3, - PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_DEAD = -5, + PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */ + PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */ + PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */ PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1, @@ -662,24 +680,24 @@ struct swevent_hlist { struct rcu_head rcu_head; }; -#define PERF_ATTACH_CONTEXT 0x0001 -#define PERF_ATTACH_GROUP 0x0002 -#define PERF_ATTACH_TASK 0x0004 -#define PERF_ATTACH_TASK_DATA 0x0008 -#define PERF_ATTACH_GLOBAL_DATA 0x0010 -#define PERF_ATTACH_SCHED_CB 0x0020 -#define PERF_ATTACH_CHILD 0x0040 -#define PERF_ATTACH_EXCLUSIVE 0x0080 -#define PERF_ATTACH_CALLCHAIN 0x0100 -#define PERF_ATTACH_ITRACE 0x0200 +#define PERF_ATTACH_CONTEXT 0x0001 +#define PERF_ATTACH_GROUP 0x0002 +#define PERF_ATTACH_TASK 0x0004 +#define PERF_ATTACH_TASK_DATA 0x0008 +#define PERF_ATTACH_GLOBAL_DATA 0x0010 +#define PERF_ATTACH_SCHED_CB 0x0020 +#define PERF_ATTACH_CHILD 0x0040 +#define PERF_ATTACH_EXCLUSIVE 0x0080 +#define PERF_ATTACH_CALLCHAIN 0x0100 +#define PERF_ATTACH_ITRACE 0x0200 struct bpf_prog; struct perf_cgroup; struct perf_buffer; struct pmu_event_list { - raw_spinlock_t lock; - struct list_head list; + raw_spinlock_t lock; + struct list_head list; }; /* @@ -689,12 +707,12 @@ struct pmu_event_list { * disabled is sufficient since it will hold-off the IPIs. */ #ifdef CONFIG_PROVE_LOCKING -#define lockdep_assert_event_ctx(event) \ +# define lockdep_assert_event_ctx(event) \ WARN_ON_ONCE(__lockdep_enabled && \ (this_cpu_read(hardirqs_enabled) && \ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD)) #else -#define lockdep_assert_event_ctx(event) +# define lockdep_assert_event_ctx(event) #endif #define for_each_sibling_event(sibling, event) \ @@ -852,9 +870,9 @@ struct perf_event { #ifdef CONFIG_EVENT_TRACING struct trace_event_call *tp_event; struct event_filter *filter; -#ifdef CONFIG_FUNCTION_TRACER +# ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops ftrace_ops; -#endif +# endif #endif #ifdef CONFIG_CGROUP_PERF @@ -865,6 +883,7 @@ struct perf_event { void *security; #endif struct list_head sb_list; + struct list_head pmu_list; /* * Certain events gets forwarded to another pmu internally by over- @@ -872,7 +891,7 @@ struct perf_event { * of it. event->orig_type contains original 'type' requested by * user. */ - __u32 orig_type; + u32 orig_type; #endif /* CONFIG_PERF_EVENTS */ }; @@ -937,8 +956,8 @@ static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc) } struct perf_event_groups { - struct rb_root tree; - u64 index; + struct rb_root tree; + u64 index; }; @@ -1155,7 +1174,7 @@ extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); -extern void perf_pmu_unregister(struct pmu *pmu); +extern int perf_pmu_unregister(struct pmu *pmu); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); @@ -1181,16 +1200,18 @@ extern void perf_pmu_resched(struct pmu *pmu); extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); + extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, - int cpu, - struct task_struct *task, - perf_overflow_handler_t callback, - void *context); + int cpu, + struct task_struct *task, + perf_overflow_handler_t callback, + void *context); + extern void perf_pmu_migrate_context(struct pmu *pmu, - int src_cpu, int dst_cpu); -int perf_event_read_local(struct perf_event *event, u64 *value, - u64 *enabled, u64 *running); + int src_cpu, int dst_cpu); +extern int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1407,14 +1428,14 @@ static inline u32 perf_sample_data_size(struct perf_sample_data *data, */ static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br) { - br->mispred = 0; - br->predicted = 0; - br->in_tx = 0; - br->abort = 0; - br->cycles = 0; - br->type = 0; - br->spec = PERF_BR_SPEC_NA; - br->reserved = 0; + br->mispred = 0; + br->predicted = 0; + br->in_tx = 0; + br->abort = 0; + br->cycles = 0; + br->type = 0; + br->spec = PERF_BR_SPEC_NA; + br->reserved = 0; } extern void perf_output_sample(struct perf_output_handle *handle, @@ -1603,7 +1624,17 @@ extern void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags); +#define PERF_GUEST_ACTIVE 0x01 +#define PERF_GUEST_USER 0x02 + +struct perf_guest_info_callbacks { + unsigned int (*state)(void); + unsigned long (*get_ip)(void); + unsigned int (*handle_intel_pt_intr)(void); +}; + #ifdef CONFIG_GUEST_PERF_EVENTS + extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state); @@ -1614,21 +1645,27 @@ static inline unsigned int perf_guest_state(void) { return static_call(__perf_guest_state)(); } + static inline unsigned long perf_guest_get_ip(void) { return static_call(__perf_guest_get_ip)(); } + static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return static_call(__perf_guest_handle_intel_pt_intr)(); } + extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); -#else + +#else /* !CONFIG_GUEST_PERF_EVENTS: */ + static inline unsigned int perf_guest_state(void) { return 0; } static inline unsigned long perf_guest_get_ip(void) { return 0; } static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; } -#endif /* CONFIG_GUEST_PERF_EVENTS */ + +#endif /* !CONFIG_GUEST_PERF_EVENTS */ extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); @@ -1658,6 +1695,7 @@ static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx * { if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; ++ctx->contexts; return 0; @@ -1671,6 +1709,7 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 { if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; ++ctx->nr; return 0; @@ -1697,7 +1736,7 @@ static inline int perf_is_paranoid(void) return sysctl_perf_event_paranoid > -1; } -int perf_allow_kernel(void); +extern int perf_allow_kernel(void); static inline int perf_allow_cpu(void) { @@ -1760,7 +1799,7 @@ static inline bool needs_branch_stack(struct perf_event *event) static inline bool has_aux(struct perf_event *event) { - return event->pmu->setup_aux; + return event->pmu && event->pmu->setup_aux; } static inline bool has_aux_action(struct perf_event *event) @@ -1819,7 +1858,7 @@ extern int perf_output_begin_backward(struct perf_output_handle *handle, extern void perf_output_end(struct perf_output_handle *handle); extern unsigned int perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len); + const void *buf, unsigned int len); extern unsigned int perf_output_skip(struct perf_output_handle *handle, unsigned int len); extern long perf_output_copy_aux(struct perf_output_handle *aux_handle, @@ -1836,7 +1875,9 @@ extern void perf_event_task_tick(void); extern int perf_event_account_interrupt(struct perf_event *event); extern int perf_event_period(struct perf_event *event, u64 value); extern u64 perf_event_pause(struct perf_event *event, bool reset); + #else /* !CONFIG_PERF_EVENTS: */ + static inline void * perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) { return NULL; } @@ -1914,19 +1955,14 @@ static inline void perf_event_disable(struct perf_event *event) { } static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } -static inline int perf_event_period(struct perf_event *event, u64 value) -{ - return -EINVAL; -} -static inline u64 perf_event_pause(struct perf_event *event, bool reset) -{ - return 0; -} -static inline int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) -{ - return 0; -} -#endif +static inline int +perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; } +static inline u64 +perf_event_pause(struct perf_event *event, bool reset) { return 0; } +static inline int +perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { return 0; } + +#endif /* !CONFIG_PERF_EVENTS */ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) extern void perf_restore_debug_store(void); @@ -1934,31 +1970,31 @@ extern void perf_restore_debug_store(void); static inline void perf_restore_debug_store(void) { } #endif -#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) +#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) struct perf_pmu_events_attr { - struct device_attribute attr; - u64 id; - const char *event_str; + struct device_attribute attr; + u64 id; + const char *event_str; }; struct perf_pmu_events_ht_attr { - struct device_attribute attr; - u64 id; - const char *event_str_ht; - const char *event_str_noht; + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; }; struct perf_pmu_events_hybrid_attr { - struct device_attribute attr; - u64 id; - const char *event_str; - u64 pmu_type; + struct device_attribute attr; + u64 id; + const char *event_str; + u64 pmu_type; }; struct perf_pmu_format_hybrid_attr { - struct device_attribute attr; - u64 pmu_type; + struct device_attribute attr; + u64 pmu_type; }; ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, @@ -2000,11 +2036,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) /* Performance counter hotplug functions */ #ifdef CONFIG_PERF_EVENTS -int perf_event_init_cpu(unsigned int cpu); -int perf_event_exit_cpu(unsigned int cpu); +extern int perf_event_init_cpu(unsigned int cpu); +extern int perf_event_exit_cpu(unsigned int cpu); #else -#define perf_event_init_cpu NULL -#define perf_event_exit_cpu NULL +# define perf_event_init_cpu NULL +# define perf_event_exit_cpu NULL #endif extern void arch_perf_update_userpage(struct perf_event *event, diff --git a/include/linux/pid.h b/include/linux/pid.h index 311ecebd7d56..453ae6d8a68d 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -77,7 +77,7 @@ struct file; struct pid *pidfd_pid(const struct file *file); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags); -int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret); +int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file); void do_notify_pidfd(struct task_struct *task); static inline struct pid *get_pid(struct pid *pid) diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h index 05e6f8f4a026..77e7db194914 100644 --- a/include/linux/pidfs.h +++ b/include/linux/pidfs.h @@ -2,11 +2,19 @@ #ifndef _LINUX_PID_FS_H #define _LINUX_PID_FS_H +struct coredump_params; + struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags); void __init pidfs_init(void); void pidfs_add_pid(struct pid *pid); void pidfs_remove_pid(struct pid *pid); void pidfs_exit(struct task_struct *tsk); +#ifdef CONFIG_COREDUMP +void pidfs_coredump(const struct coredump_params *cprm); +#endif extern const struct dentry_operations pidfs_dentry_operations; +int pidfs_register_pid(struct pid *pid); +void pidfs_get_pid(struct pid *pid); +void pidfs_put_pid(struct pid *pid); #endif /* _LINUX_PID_FS_H */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index f3cad182d4ef..0b3a36bdaa90 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -954,6 +954,7 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret); void *psp_copy_user_blob(u64 uaddr, u32 len); void *snp_alloc_firmware_page(gfp_t mask); void snp_free_firmware_page(void *addr); +void sev_platform_shutdown(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ @@ -988,6 +989,8 @@ static inline void *snp_alloc_firmware_page(gfp_t mask) static inline void snp_free_firmware_page(void *addr) { } +static inline void sev_platform_shutdown(void) { } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h index 6322d8c1c6b4..2fb2af6d9824 100644 --- a/include/linux/rcuref.h +++ b/include/linux/rcuref.h @@ -30,7 +30,11 @@ static inline void rcuref_init(rcuref_t *ref, unsigned int cnt) * rcuref_read - Read the number of held reference counts of a rcuref * @ref: Pointer to the reference count * - * Return: The number of held references (0 ... N) + * Return: The number of held references (0 ... N). The value 0 does not + * indicate that it is safe to schedule the object, protected by this reference + * counter, for deconstruction. + * If you want to know if the reference counter has been marked DEAD (as + * signaled by rcuref_put()) please use rcuread_is_dead(). */ static inline unsigned int rcuref_read(rcuref_t *ref) { @@ -40,6 +44,22 @@ static inline unsigned int rcuref_read(rcuref_t *ref) return c >= RCUREF_RELEASED ? 0 : c + 1; } +/** + * rcuref_is_dead - Check if the rcuref has been already marked dead + * @ref: Pointer to the reference count + * + * Return: True if the object has been marked DEAD. This signals that a previous + * invocation of rcuref_put() returned true on this reference counter meaning + * the protected object can safely be scheduled for deconstruction. + * Otherwise, returns false. + */ +static inline bool rcuref_is_dead(rcuref_t *ref) +{ + unsigned int c = atomic_read(&ref->refcnt); + + return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF); +} + extern __must_check bool rcuref_get_slowpath(rcuref_t *ref); /** diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h index 13f17676c5f4..7e50bbc94e47 100644 --- a/include/linux/restart_block.h +++ b/include/linux/restart_block.h @@ -26,7 +26,7 @@ struct restart_block { unsigned long arch_data; long (*fn)(struct restart_block *); union { - /* For futex_wait and futex_wait_requeue_pi */ + /* For futex_wait() */ struct { u32 __user *uaddr; u32 val; diff --git a/include/linux/sched.h b/include/linux/sched.h index f96ac1982893..45e5953b8f32 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -44,7 +44,6 @@ #include <linux/seqlock_types.h> #include <linux/kcsan.h> #include <linux/rv.h> -#include <linux/livepatch_sched.h> #include <linux/uidgid_types.h> #include <linux/tracepoint-defs.h> #include <asm/kmap_size.h> @@ -1646,22 +1645,15 @@ struct task_struct { struct user_event_mm *user_event_mm; #endif - /* - * New fields for task_struct should be added above here, so that - * they are included in the randomized portion of task_struct. - */ - randomized_struct_fields_end - /* CPU-specific state of this task: */ struct thread_struct thread; /* - * WARNING: on x86, 'thread_struct' contains a variable-sized - * structure. It *MUST* be at the end of 'task_struct'. - * - * Do not put anything below here! + * New fields for task_struct should be added above here, so that + * they are included in the randomized portion of task_struct. */ -}; + randomized_struct_fields_end +} __attribute__ ((aligned (64))); #define TASK_REPORT_IDLE (TASK_REPORT + 1) #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) @@ -2089,9 +2081,6 @@ extern int __cond_resched(void); #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) -void sched_dynamic_klp_enable(void); -void sched_dynamic_klp_disable(void); - DECLARE_STATIC_CALL(cond_resched, __cond_resched); static __always_inline int _cond_resched(void) @@ -2112,7 +2101,6 @@ static __always_inline int _cond_resched(void) static inline int _cond_resched(void) { - klp_sched_try_switch(); return __cond_resched(); } @@ -2122,7 +2110,6 @@ static inline int _cond_resched(void) static inline int _cond_resched(void) { - klp_sched_try_switch(); return 0; } diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 7b4301b7235f..198bb5cc1774 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -195,6 +195,8 @@ struct sched_domain_topology_level { }; extern void __init set_sched_topology(struct sched_domain_topology_level *tl); +extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio); + # define SD_INIT_NAME(type) .name = #type @@ -223,6 +225,10 @@ static inline bool cpus_share_resources(int this_cpu, int that_cpu) return true; } +static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) +{ +} + #endif /* !CONFIG_SMP */ #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 0b273a7b9f01..5f03a39a26f7 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -104,10 +104,11 @@ static inline bool shmem_mapping(struct address_space *mapping) return false; } #endif /* CONFIG_SHMEM */ -extern void shmem_unlock_mapping(struct address_space *mapping); -extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, +void shmem_unlock_mapping(struct address_space *mapping); +struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); -extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); +int shmem_writeout(struct folio *folio, struct writeback_control *wbc); +void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 0ba5e49bace4..6e64f0193777 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -249,10 +249,7 @@ struct spi_device { static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0, "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap"); -static inline struct spi_device *to_spi_device(const struct device *dev) -{ - return dev ? container_of(dev, struct spi_device, dev) : NULL; -} +#define to_spi_device(__dev) container_of_const(__dev, struct spi_device, dev) /* Most drivers won't need to care about device refcounting */ static inline struct spi_device *spi_dev_get(struct spi_device *spi) diff --git a/include/linux/stat.h b/include/linux/stat.h index be7496a6a0dd..e3d00e7bb26d 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -57,6 +57,7 @@ struct kstat { u32 dio_read_offset_align; u32 atomic_write_unit_min; u32 atomic_write_unit_max; + u32 atomic_write_unit_max_opt; u32 atomic_write_segments_max; }; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 5ca8d4dd149d..a40a905e5e1b 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -169,8 +169,13 @@ void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_m int node, const void *caller) __alloc_size(1); #define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__)) -void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); -#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__)) +void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1); +#define vmalloc_huge_node(...) alloc_hooks(vmalloc_huge_node_noprof(__VA_ARGS__)) + +static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) +{ + return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE); +} extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); #define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__)) diff --git a/include/net/netdev_lock.h b/include/net/netdev_lock.h index c316b551df8d..0ee5bc766810 100644 --- a/include/net/netdev_lock.h +++ b/include/net/netdev_lock.h @@ -98,6 +98,9 @@ static inline int netdev_lock_cmp_fn(const struct lockdep_map *a, &qdisc_xmit_lock_key); \ } +#define netdev_lock_dereference(p, dev) \ + rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock)) + int netdev_debug_event(struct notifier_block *nb, unsigned long event, void *ptr); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 39365fd2ea17..06ab2a3d2ebd 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -236,7 +236,6 @@ struct xfrm_state { /* Data for encapsulator */ struct xfrm_encap_tmpl *encap; - struct sock __rcu *encap_sk; /* NAT keepalive */ u32 nat_keepalive_interval; /* seconds */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 26bc23419cfd..c53812b9026f 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -670,8 +670,6 @@ struct Scsi_Host { /* The transport requires the LUN bits NOT to be stored in CDB[1] */ unsigned no_scsi2_lun_in_cdb:1; - unsigned no_highmem:1; - /* * Optional work queue to be utilized by the transport */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 8becb4504887..8582d22f3818 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1404,6 +1404,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_iomem NULL #endif +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); + /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number diff --git a/include/trace/events/block.h b/include/trace/events/block.h index bd0ea07338eb..14a924c0e303 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -11,7 +11,7 @@ #include <linux/tracepoint.h> #include <uapi/linux/ioprio.h> -#define RWBS_LEN 8 +#define RWBS_LEN 9 #define IOPRIO_CLASS_STRINGS \ { IOPRIO_CLASS_NONE, "none" }, \ @@ -361,21 +361,6 @@ DECLARE_EVENT_CLASS(block_bio, ); /** - * block_bio_bounce - used bounce buffer when processing block operation - * @bio: block operation - * - * A bounce buffer was used to handle the block operation @bio in @q. - * This occurs when hardware limitations prevent a direct transfer of - * data between the @bio data memory area and the IO device. Use of a - * bounce buffer requires extra copying of data and decreases - * performance. - */ -DEFINE_EVENT(block_bio, block_bio_bounce, - TP_PROTO(struct bio *bio), - TP_ARGS(bio) -); - -/** * block_bio_backmerge - merging block operation to the end of an existing operation * @bio: new block operation to merge * diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 3efc00cc1bcd..bebc252db865 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -143,7 +143,6 @@ FLUSH_STATES #define EXTENT_FLAGS \ { EXTENT_DIRTY, "DIRTY"}, \ - { EXTENT_UPTODATE, "UPTODATE"}, \ { EXTENT_LOCKED, "LOCKED"}, \ { EXTENT_NEW, "NEW"}, \ { EXTENT_DELALLOC, "DELALLOC"}, \ @@ -224,8 +223,7 @@ DECLARE_EVENT_CLASS(btrfs__inode, __entry->generation = BTRFS_I(inode)->generation; __entry->last_trans = BTRFS_I(inode)->last_trans; __entry->logged_trans = BTRFS_I(inode)->logged_trans; - __entry->root_objectid = - BTRFS_I(inode)->root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root); ), TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu " @@ -297,7 +295,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, ), TP_fast_assign_btrfs(root->fs_info, - __entry->root_objectid = root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(root); __entry->ino = btrfs_ino(inode); __entry->start = map->start; __entry->len = map->len; @@ -376,7 +374,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, ), TP_fast_assign_btrfs(bi->root->fs_info, - __entry->root_obj = bi->root->root_key.objectid; + __entry->root_obj = btrfs_root_id(bi->root); __entry->ino = btrfs_ino(bi); __entry->isize = bi->vfs_inode.i_size; __entry->disk_isize = bi->disk_i_size; @@ -427,7 +425,7 @@ DECLARE_EVENT_CLASS( TP_fast_assign_btrfs( bi->root->fs_info, - __entry->root_obj = bi->root->root_key.objectid; + __entry->root_obj = btrfs_root_id(bi->root); __entry->ino = btrfs_ino(bi); __entry->isize = bi->vfs_inode.i_size; __entry->disk_isize = bi->disk_i_size; @@ -527,7 +525,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent, __entry->flags = ordered->flags; __entry->compress_type = ordered->compress_type; __entry->refs = refcount_read(&ordered->refs); - __entry->root_objectid = inode->root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(inode->root); __entry->truncated_len = ordered->truncated_len; ), @@ -664,7 +662,7 @@ TRACE_EVENT(btrfs_finish_ordered_extent, __entry->start = start; __entry->len = len; __entry->uptodate = uptodate; - __entry->root_objectid = inode->root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(inode->root); ), TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu uptodate=%d", @@ -705,8 +703,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; __entry->writeback_index = inode->i_mapping->writeback_index; - __entry->root_objectid = - BTRFS_I(inode)->root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root); ), TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu " @@ -750,7 +747,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, __entry->start = start; __entry->end = end; __entry->uptodate = uptodate; - __entry->root_objectid = inode->root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(inode->root); ), TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d", @@ -780,8 +777,7 @@ TRACE_EVENT(btrfs_sync_file, __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->parent = btrfs_ino(BTRFS_I(d_inode(dentry->d_parent))); __entry->datasync = datasync; - __entry->root_objectid = - BTRFS_I(inode)->root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root); ), TP_printk_btrfs("root=%llu(%s) ino=%llu parent=%llu datasync=%d", @@ -1052,7 +1048,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk, __entry->sub_stripes = map->sub_stripes; __entry->offset = offset; __entry->size = size; - __entry->root_objectid = fs_info->chunk_root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(fs_info->chunk_root); ), TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu " @@ -1097,7 +1093,7 @@ TRACE_EVENT(btrfs_cow_block, ), TP_fast_assign_btrfs(root->fs_info, - __entry->root_objectid = root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(root); __entry->buf_start = buf->start; __entry->refs = atomic_read(&buf->refs); __entry->cow_start = cow->start; @@ -1240,7 +1236,7 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free, TP_ARGS(fs_info, start, len) ); -TRACE_EVENT(find_free_extent, +TRACE_EVENT(btrfs_find_free_extent, TP_PROTO(const struct btrfs_root *root, const struct find_free_extent_ctl *ffe_ctl), @@ -1255,7 +1251,7 @@ TRACE_EVENT(find_free_extent, ), TP_fast_assign_btrfs(root->fs_info, - __entry->root_objectid = root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(root); __entry->num_bytes = ffe_ctl->num_bytes; __entry->empty_size = ffe_ctl->empty_size; __entry->flags = ffe_ctl->flags; @@ -1268,7 +1264,7 @@ TRACE_EVENT(find_free_extent, BTRFS_GROUP_FLAGS)) ); -TRACE_EVENT(find_free_extent_search_loop, +TRACE_EVENT(btrfs_find_free_extent_search_loop, TP_PROTO(const struct btrfs_root *root, const struct find_free_extent_ctl *ffe_ctl), @@ -1284,7 +1280,7 @@ TRACE_EVENT(find_free_extent_search_loop, ), TP_fast_assign_btrfs(root->fs_info, - __entry->root_objectid = root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(root); __entry->num_bytes = ffe_ctl->num_bytes; __entry->empty_size = ffe_ctl->empty_size; __entry->flags = ffe_ctl->flags; @@ -1298,7 +1294,7 @@ TRACE_EVENT(find_free_extent_search_loop, __entry->loop) ); -TRACE_EVENT(find_free_extent_have_block_group, +TRACE_EVENT(btrfs_find_free_extent_have_block_group, TP_PROTO(const struct btrfs_root *root, const struct find_free_extent_ctl *ffe_ctl, @@ -1318,7 +1314,7 @@ TRACE_EVENT(find_free_extent_have_block_group, ), TP_fast_assign_btrfs(root->fs_info, - __entry->root_objectid = root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(root); __entry->num_bytes = ffe_ctl->num_bytes; __entry->empty_size = ffe_ctl->empty_size; __entry->flags = ffe_ctl->flags; @@ -1480,7 +1476,7 @@ TRACE_EVENT(btrfs_setup_cluster, ); struct extent_state; -TRACE_EVENT(alloc_extent_state, +TRACE_EVENT(btrfs_alloc_extent_state, TP_PROTO(const struct extent_state *state, gfp_t mask, unsigned long IP), @@ -1503,7 +1499,7 @@ TRACE_EVENT(alloc_extent_state, show_gfp_flags(__entry->mask), __entry->ip) ); -TRACE_EVENT(free_extent_state, +TRACE_EVENT(btrfs_free_extent_state, TP_PROTO(const struct extent_state *state, unsigned long IP), @@ -1672,8 +1668,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data, ), TP_fast_assign_btrfs(btrfs_sb(inode->i_sb), - __entry->rootid = - BTRFS_I(inode)->root->root_key.objectid; + __entry->rootid = btrfs_root_id(BTRFS_I(inode)->root); __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->start = start; __entry->len = len; @@ -1744,7 +1739,7 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent, TP_ARGS(fs_info, rec, bytenr) ); -TRACE_EVENT(qgroup_num_dirty_extents, +TRACE_EVENT(btrfs_qgroup_num_dirty_extents, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid, u64 num_dirty_extents), @@ -1798,7 +1793,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent, __entry->nr_new_roots) ); -TRACE_EVENT(qgroup_update_counters, +TRACE_EVENT(btrfs_qgroup_update_counters, TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup, @@ -1827,7 +1822,7 @@ TRACE_EVENT(qgroup_update_counters, __entry->cur_old_count, __entry->cur_new_count) ); -TRACE_EVENT(qgroup_update_reserve, +TRACE_EVENT(btrfs_qgroup_update_reserve, TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup, s64 diff, int type), @@ -1853,7 +1848,7 @@ TRACE_EVENT(qgroup_update_reserve, __entry->cur_reserved, __entry->diff) ); -TRACE_EVENT(qgroup_meta_reserve, +TRACE_EVENT(btrfs_qgroup_meta_reserve, TP_PROTO(const struct btrfs_root *root, s64 diff, int type), @@ -1866,7 +1861,7 @@ TRACE_EVENT(qgroup_meta_reserve, ), TP_fast_assign_btrfs(root->fs_info, - __entry->refroot = root->root_key.objectid; + __entry->refroot = btrfs_root_id(root); __entry->diff = diff; __entry->type = type; ), @@ -1876,7 +1871,7 @@ TRACE_EVENT(qgroup_meta_reserve, __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff) ); -TRACE_EVENT(qgroup_meta_convert, +TRACE_EVENT(btrfs_qgroup_meta_convert, TP_PROTO(const struct btrfs_root *root, s64 diff), @@ -1888,7 +1883,7 @@ TRACE_EVENT(qgroup_meta_convert, ), TP_fast_assign_btrfs(root->fs_info, - __entry->refroot = root->root_key.objectid; + __entry->refroot = btrfs_root_id(root); __entry->diff = diff; ), @@ -1899,7 +1894,7 @@ TRACE_EVENT(qgroup_meta_convert, __entry->diff) ); -TRACE_EVENT(qgroup_meta_free_all_pertrans, +TRACE_EVENT(btrfs_qgroup_meta_free_all_pertrans, TP_PROTO(struct btrfs_root *root), @@ -1912,7 +1907,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans, ), TP_fast_assign_btrfs(root->fs_info, - __entry->refroot = root->root_key.objectid; + __entry->refroot = btrfs_root_id(root); spin_lock(&root->qgroup_meta_rsv_lock); __entry->diff = -(s64)root->qgroup_meta_rsv_pertrans; spin_unlock(&root->qgroup_meta_rsv_lock); @@ -1994,7 +1989,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents, ), TP_fast_assign_btrfs(root->fs_info, - __entry->root_objectid = root->root_key.objectid; + __entry->root_objectid = btrfs_root_id(root); __entry->ino = ino; __entry->mod = mod; __entry->outstanding = outstanding; @@ -2074,12 +2069,12 @@ TRACE_EVENT(btrfs_set_extent_bit, __field( unsigned, set_bits) ), - TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree), - const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree); + TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree), + const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree); __entry->owner = tree->owner; __entry->ino = inode ? btrfs_ino(inode) : 0; - __entry->rootid = inode ? inode->root->root_key.objectid : 0; + __entry->rootid = inode ? btrfs_root_id(inode->root) : 0; __entry->start = start; __entry->len = len; __entry->set_bits = set_bits; @@ -2107,12 +2102,12 @@ TRACE_EVENT(btrfs_clear_extent_bit, __field( unsigned, clear_bits) ), - TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree), - const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree); + TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree), + const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree); __entry->owner = tree->owner; __entry->ino = inode ? btrfs_ino(inode) : 0; - __entry->rootid = inode ? inode->root->root_key.objectid : 0; + __entry->rootid = inode ? btrfs_root_id(inode->root) : 0; __entry->start = start; __entry->len = len; __entry->clear_bits = clear_bits; @@ -2141,12 +2136,12 @@ TRACE_EVENT(btrfs_convert_extent_bit, __field( unsigned, clear_bits) ), - TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree), - const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree); + TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree), + const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree); __entry->owner = tree->owner; __entry->ino = inode ? btrfs_ino(inode) : 0; - __entry->rootid = inode ? inode->root->root_key.objectid : 0; + __entry->rootid = inode ? btrfs_root_id(inode->root) : 0; __entry->start = start; __entry->len = len; __entry->set_bits = set_bits; @@ -2341,11 +2336,7 @@ DEFINE_EVENT(btrfs_locking_events, name, \ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_unlock); DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock); -DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking); -DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read); -DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write); DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock); -DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic); DECLARE_EVENT_CLASS(btrfs__space_info_update, @@ -2621,7 +2612,7 @@ TRACE_EVENT(btrfs_extent_map_shrinker_remove_em, TP_fast_assign_btrfs(inode->root->fs_info, __entry->ino = btrfs_ino(inode); - __entry->root_id = inode->root->root_key.objectid; + __entry->root_id = btrfs_root_id(inode->root); __entry->start = em->start; __entry->len = em->len; __entry->flags = em->flags; diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index c69c7b1e41d1..a5f4b9234f46 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -113,7 +113,7 @@ TRACE_EVENT(erofs_read_folio, __entry->raw) ); -TRACE_EVENT(erofs_readpages, +TRACE_EVENT(erofs_readahead, TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage, bool raw), diff --git a/arch/x86/include/asm/trace/exceptions.h b/include/trace/events/exceptions.h index 6b1e87194809..a631f8de8917 100644 --- a/arch/x86/include/asm/trace/exceptions.h +++ b/include/trace/events/exceptions.h @@ -6,12 +6,8 @@ #define _TRACE_PAGE_FAULT_H #include <linux/tracepoint.h> -#include <asm/trace/common.h> -extern int trace_pagefault_reg(void); -extern void trace_pagefault_unreg(void); - -DECLARE_EVENT_CLASS(x86_exceptions, +DECLARE_EVENT_CLASS(exceptions, TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code), @@ -26,7 +22,7 @@ DECLARE_EVENT_CLASS(x86_exceptions, TP_fast_assign( __entry->address = address; - __entry->ip = regs->ip; + __entry->ip = instruction_pointer(regs); __entry->error_code = error_code; ), @@ -34,20 +30,13 @@ DECLARE_EVENT_CLASS(x86_exceptions, (void *)__entry->address, (void *)__entry->ip, __entry->error_code) ); -#define DEFINE_PAGE_FAULT_EVENT(name) \ -DEFINE_EVENT_FN(x86_exceptions, name, \ - TP_PROTO(unsigned long address, struct pt_regs *regs, \ - unsigned long error_code), \ - TP_ARGS(address, regs, error_code), \ - trace_pagefault_reg, trace_pagefault_unreg); - -DEFINE_PAGE_FAULT_EVENT(page_fault_user); -DEFINE_PAGE_FAULT_EVENT(page_fault_kernel); +DEFINE_EVENT(exceptions, page_fault_user, + TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code), + TP_ARGS(address, regs, error_code)); +DEFINE_EVENT(exceptions, page_fault_kernel, + TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code), + TP_ARGS(address, regs, error_code)); -#undef TRACE_INCLUDE_PATH -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE exceptions #endif /* _TRACE_PAGE_FAULT_H */ /* This part must be outside protection */ diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index fb81c533b310..178ab6f611be 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -645,7 +645,7 @@ TRACE_EVENT(io_uring_short_write, /* * io_uring_local_work_run - ran ring local task work * - * @tctx: pointer to a io_uring_ctx + * @ctx: pointer to an io_ring_ctx * @count: how many functions it ran * @loops: how many loops it ran * diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 8994e97d86c1..3bec9fb73a36 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -326,11 +326,37 @@ DEFINE_EVENT(sched_process_template, sched_process_free, TP_ARGS(p)); /* - * Tracepoint for a task exiting: + * Tracepoint for a task exiting. + * Note, it's a superset of sched_process_template and should be kept + * compatible as much as possible. sched_process_exits has an extra + * `group_dead` argument, so sched_process_template can't be used, + * unfortunately, just like sched_migrate_task above. */ -DEFINE_EVENT(sched_process_template, sched_process_exit, - TP_PROTO(struct task_struct *p), - TP_ARGS(p)); +TRACE_EVENT(sched_process_exit, + + TP_PROTO(struct task_struct *p, bool group_dead), + + TP_ARGS(p, group_dead), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + __field( bool, group_dead ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ + __entry->group_dead = group_dead; + ), + + TP_printk("comm=%s pid=%d prio=%d group_dead=%s", + __entry->comm, __entry->pid, __entry->prio, + __entry->group_dead ? "true" : "false" + ) +); /* * Tracepoint for waiting on task to unschedule: diff --git a/include/uapi/linux/blktrace_api.h b/include/uapi/linux/blktrace_api.h index 690621b610e5..1bfb635e309b 100644 --- a/include/uapi/linux/blktrace_api.h +++ b/include/uapi/linux/blktrace_api.h @@ -49,7 +49,7 @@ enum blktrace_act { __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */ __BLK_TA_INSERT, /* insert request */ __BLK_TA_SPLIT, /* bio was split */ - __BLK_TA_BOUNCE, /* bio was bounced */ + __BLK_TA_BOUNCE, /* unused, was: bio was bounced */ __BLK_TA_REMAP, /* bio was remapped */ __BLK_TA_ABORT, /* request aborted */ __BLK_TA_DRV_DATA, /* driver-specific binary data */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 7a8f4c290187..3aff99f2696a 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -119,7 +119,7 @@ struct fscrypt_key_specifier { */ struct fscrypt_provisioning_key_payload { __u32 type; - __u32 __reserved; + __u32 flags; __u8 raw[]; }; @@ -128,7 +128,9 @@ struct fscrypt_add_key_arg { struct fscrypt_key_specifier key_spec; __u32 raw_size; __u32 key_id; - __u32 __reserved[8]; +#define FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001 + __u32 flags; + __u32 __reserved[7]; __u8 raw[]; }; diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index d2ee625ea189..7e2744ec8933 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -63,7 +63,7 @@ #define FUTEX2_SIZE_U32 0x02 #define FUTEX2_SIZE_U64 0x03 #define FUTEX2_NUMA 0x04 - /* 0x08 */ +#define FUTEX2_MPOL 0x08 /* 0x10 */ /* 0x20 */ /* 0x40 */ @@ -75,6 +75,13 @@ #define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */ /* + * When FUTEX2_NUMA doubles the futex word, the second word is a node value. + * The special value -1 indicates no-node. This is the same value as + * NUMA_NO_NODE, except that value is not ABI, this is. + */ +#define FUTEX_NO_NODE (-1) + +/* * Max numbers of elements in a futex_waitv array */ #define FUTEX_WAITV_MAX 128 diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8f1fc12bac46..cfd17e382082 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -73,6 +73,7 @@ struct io_uring_sqe { __u32 futex_flags; __u32 install_fd_flags; __u32 nop_flags; + __u32 pipe_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -93,6 +94,10 @@ struct io_uring_sqe { __u16 addr_len; __u16 __pad3[1]; }; + struct { + __u8 write_stream; + __u8 __pad4[3]; + }; }; union { struct { @@ -283,6 +288,7 @@ enum io_uring_op { IORING_OP_EPOLL_WAIT, IORING_OP_READV_FIXED, IORING_OP_WRITEV_FIXED, + IORING_OP_PIPE, /* this goes last, obviously */ IORING_OP_LAST, @@ -988,12 +994,16 @@ struct io_uring_zcrx_offsets { __u64 __resv[2]; }; +enum io_uring_zcrx_area_flags { + IORING_ZCRX_AREA_DMABUF = 1, +}; + struct io_uring_zcrx_area_reg { __u64 addr; __u64 len; __u64 rq_area_token; __u32 flags; - __u32 __resv1; + __u32 dmabuf_fd; __u64 __resv2[2]; }; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 5fc753c23734..78a362b80027 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -39,18 +39,21 @@ enum perf_type_id { /* * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE + * * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA * AA: hardware event ID * EEEEEEEE: PMU type ID + * * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB * BB: hardware cache ID * CC: hardware cache op ID * DD: hardware cache op result ID * EEEEEEEE: PMU type ID - * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. + * + * If the PMU type ID is 0, PERF_TYPE_RAW will be applied. */ -#define PERF_PMU_TYPE_SHIFT 32 -#define PERF_HW_EVENT_MASK 0xffffffff +#define PERF_PMU_TYPE_SHIFT 32 +#define PERF_HW_EVENT_MASK 0xffffffff /* * Generalized performance event event_id types, used by the @@ -112,7 +115,7 @@ enum perf_hw_cache_op_result_id { /* * Special "software" events provided by the kernel, even if the hardware * does not support performance events. These events measure various - * physical and sw events of the kernel (and allow the profiling of them as + * physical and SW events of the kernel (and allow the profiling of them as * well): */ enum perf_sw_ids { @@ -167,8 +170,9 @@ enum perf_event_sample_format { }; #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) + /* - * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set + * Values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set. * * If the user does not pass priv level information via branch_sample_type, * the kernel uses the event's priv level. Branch and event priv levels do @@ -178,20 +182,20 @@ enum perf_event_sample_format { * of branches and therefore it supersedes all the other types. */ enum perf_branch_sample_type_shift { - PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ - PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ - PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ - - PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ - PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ - PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ - PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ - PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ - PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ - PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ + + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ - PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* CALL/RET stack */ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ @@ -210,96 +214,95 @@ enum perf_branch_sample_type_shift { }; enum perf_branch_sample_type { - PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, - PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, - PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, + PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, + PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, + PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, - PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, - PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, - PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, - PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, - PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, - PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, + PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, + PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, + PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, + PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, + PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, - PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, - PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, - PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, + PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, + PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, + PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, - PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, - PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, + PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, + PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, - PERF_SAMPLE_BRANCH_TYPE_SAVE = - 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, + PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, - PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, - PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; /* - * Common flow change classification + * Common control flow change classifications: */ enum { - PERF_BR_UNKNOWN = 0, /* unknown */ - PERF_BR_COND = 1, /* conditional */ - PERF_BR_UNCOND = 2, /* unconditional */ - PERF_BR_IND = 3, /* indirect */ - PERF_BR_CALL = 4, /* function call */ - PERF_BR_IND_CALL = 5, /* indirect function call */ - PERF_BR_RET = 6, /* function return */ - PERF_BR_SYSCALL = 7, /* syscall */ - PERF_BR_SYSRET = 8, /* syscall return */ - PERF_BR_COND_CALL = 9, /* conditional function call */ - PERF_BR_COND_RET = 10, /* conditional function return */ - PERF_BR_ERET = 11, /* exception return */ - PERF_BR_IRQ = 12, /* irq */ - PERF_BR_SERROR = 13, /* system error */ - PERF_BR_NO_TX = 14, /* not in transaction */ - PERF_BR_EXTEND_ABI = 15, /* extend ABI */ + PERF_BR_UNKNOWN = 0, /* Unknown */ + PERF_BR_COND = 1, /* Conditional */ + PERF_BR_UNCOND = 2, /* Unconditional */ + PERF_BR_IND = 3, /* Indirect */ + PERF_BR_CALL = 4, /* Function call */ + PERF_BR_IND_CALL = 5, /* Indirect function call */ + PERF_BR_RET = 6, /* Function return */ + PERF_BR_SYSCALL = 7, /* Syscall */ + PERF_BR_SYSRET = 8, /* Syscall return */ + PERF_BR_COND_CALL = 9, /* Conditional function call */ + PERF_BR_COND_RET = 10, /* Conditional function return */ + PERF_BR_ERET = 11, /* Exception return */ + PERF_BR_IRQ = 12, /* IRQ */ + PERF_BR_SERROR = 13, /* System error */ + PERF_BR_NO_TX = 14, /* Not in transaction */ + PERF_BR_EXTEND_ABI = 15, /* Extend ABI */ PERF_BR_MAX, }; /* - * Common branch speculation outcome classification + * Common branch speculation outcome classifications: */ enum { - PERF_BR_SPEC_NA = 0, /* Not available */ - PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ - PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ - PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ + PERF_BR_SPEC_NA = 0, /* Not available */ + PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ + PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ + PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ PERF_BR_SPEC_MAX, }; enum { - PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ - PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ - PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ - PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ - PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ - PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ - PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ - PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ + PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ + PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ + PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ + PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ + PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ + PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ + PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ + PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ PERF_BR_NEW_MAX, }; enum { - PERF_BR_PRIV_UNKNOWN = 0, - PERF_BR_PRIV_USER = 1, - PERF_BR_PRIV_KERNEL = 2, - PERF_BR_PRIV_HV = 3, + PERF_BR_PRIV_UNKNOWN = 0, + PERF_BR_PRIV_USER = 1, + PERF_BR_PRIV_KERNEL = 2, + PERF_BR_PRIV_HV = 3, }; -#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 -#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 -#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 -#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 -#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 +#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 +#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 +#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 +#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 +#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 #define PERF_SAMPLE_BRANCH_PLM_ALL \ (PERF_SAMPLE_BRANCH_USER|\ @@ -310,9 +313,9 @@ enum { * Values to determine ABI of the registers dump. */ enum perf_sample_regs_abi { - PERF_SAMPLE_REGS_ABI_NONE = 0, - PERF_SAMPLE_REGS_ABI_32 = 1, - PERF_SAMPLE_REGS_ABI_64 = 2, + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, }; /* @@ -320,21 +323,21 @@ enum perf_sample_regs_abi { * abort events. Multiple bits can be set. */ enum { - PERF_TXN_ELISION = (1 << 0), /* From elision */ - PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ - PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ - PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ - PERF_TXN_RETRY = (1 << 4), /* Retry possible */ - PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ - PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ - PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ + PERF_TXN_ELISION = (1 << 0), /* From elision */ + PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ + PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ + PERF_TXN_ASYNC = (1 << 3), /* Instruction is not related */ + PERF_TXN_RETRY = (1 << 4), /* Retry possible */ + PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ + PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ + PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ - PERF_TXN_MAX = (1 << 8), /* non-ABI */ + PERF_TXN_MAX = (1 << 8), /* non-ABI */ - /* bits 32..63 are reserved for the abort code */ + /* Bits 32..63 are reserved for the abort code */ - PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), - PERF_TXN_ABORT_SHIFT = 32, + PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), + PERF_TXN_ABORT_SHIFT = 32, }; /* @@ -369,24 +372,22 @@ enum perf_event_read_format { PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ }; -#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ -#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ -#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ -#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ - /* add: sample_stack_user */ -#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ -#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ -#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ -#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ -#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */ +#define PERF_ATTR_SIZE_VER0 64 /* Size of first published 'struct perf_event_attr' */ +#define PERF_ATTR_SIZE_VER1 72 /* Add: config2 */ +#define PERF_ATTR_SIZE_VER2 80 /* Add: branch_sample_type */ +#define PERF_ATTR_SIZE_VER3 96 /* Add: sample_regs_user */ + /* Add: sample_stack_user */ +#define PERF_ATTR_SIZE_VER4 104 /* Add: sample_regs_intr */ +#define PERF_ATTR_SIZE_VER5 112 /* Add: aux_watermark */ +#define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */ +#define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ +#define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ /* - * Hardware event_id to monitor via a performance monitoring event: - * - * @sample_max_stack: Max number of frame pointers in a callchain, - * should be < /proc/sys/kernel/perf_event_max_stack - * Max number of entries of branch stack - * should be < hardware limit + * 'struct perf_event_attr' contains various attributes that define + * a performance event - most of them hardware related configuration + * details, but also a lot of behavioral switches and values implemented + * by the kernel. */ struct perf_event_attr { @@ -396,7 +397,7 @@ struct perf_event_attr { __u32 type; /* - * Size of the attr structure, for fwd/bwd compat. + * Size of the attr structure, for forward/backwards compatibility. */ __u32 size; @@ -451,21 +452,21 @@ struct perf_event_attr { comm_exec : 1, /* flag comm events that are due to an exec */ use_clockid : 1, /* use @clockid for time fields */ context_switch : 1, /* context switch data */ - write_backward : 1, /* Write ring buffer from end to beginning */ + write_backward : 1, /* write ring buffer from end to beginning */ namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ - bpf_event : 1, /* include bpf events */ + bpf_event : 1, /* include BPF events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ - build_id : 1, /* use build id in mmap2 events */ + build_id : 1, /* use build ID in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ remove_on_exec : 1, /* event is removed from task on exec */ sigtrap : 1, /* send synchronous SIGTRAP on event */ __reserved_1 : 26; union { - __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_events; /* wake up every n events */ __u32 wakeup_watermark; /* bytes before wakeup */ }; @@ -474,13 +475,13 @@ struct perf_event_attr { __u64 bp_addr; __u64 kprobe_func; /* for perf_kprobe */ __u64 uprobe_path; /* for perf_uprobe */ - __u64 config1; /* extension of config */ + __u64 config1; /* extension of config */ }; union { __u64 bp_len; - __u64 kprobe_addr; /* when kprobe_func == NULL */ + __u64 kprobe_addr; /* when kprobe_func == NULL */ __u64 probe_offset; /* for perf_[k,u]probe */ - __u64 config2; /* extension of config1 */ + __u64 config2; /* extension of config1 */ }; __u64 branch_sample_type; /* enum perf_branch_sample_type */ @@ -510,7 +511,16 @@ struct perf_event_attr { * Wakeup watermark for AUX area */ __u32 aux_watermark; + + /* + * Max number of frame pointers in a callchain, should be + * lower than /proc/sys/kernel/perf_event_max_stack. + * + * Max number of entries of branch stack should be lower + * than the hardware limit. + */ __u16 sample_max_stack; + __u16 __reserved_2; __u32 aux_sample_size; @@ -537,7 +547,7 @@ struct perf_event_attr { /* * Structure used by below PERF_EVENT_IOC_QUERY_BPF command - * to query bpf programs attached to the same perf tracepoint + * to query BPF programs attached to the same perf tracepoint * as the given perf event. */ struct perf_event_query_bpf { @@ -559,21 +569,21 @@ struct perf_event_query_bpf { /* * Ioctls that can be done on a perf event fd: */ -#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) -#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) -#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) -#define PERF_EVENT_IOC_RESET _IO ('$', 3) -#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) -#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) -#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) -#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) -#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) -#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW ('$', 4, __u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW ('$', 6, char *) +#define PERF_EVENT_IOC_ID _IOR ('$', 7, __u64 *) +#define PERF_EVENT_IOC_SET_BPF _IOW ('$', 8, __u32) +#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW ('$', 9, __u32) #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) -#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) +#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW ('$', 11, struct perf_event_attr *) enum perf_event_ioc_flags { - PERF_IOC_FLAG_GROUP = 1U << 0, + PERF_IOC_FLAG_GROUP = 1U << 0, }; /* @@ -584,7 +594,7 @@ struct perf_event_mmap_page { __u32 compat_version; /* lowest version this is compat with */ /* - * Bits needed to read the hw events in user-space. + * Bits needed to read the HW events in user-space. * * u32 seq, time_mult, time_shift, index, width; * u64 count, enabled, running; @@ -622,7 +632,7 @@ struct perf_event_mmap_page { __u32 index; /* hardware event identifier */ __s64 offset; /* add to hardware event value */ __u64 time_enabled; /* time event active */ - __u64 time_running; /* time event on cpu */ + __u64 time_running; /* time event on CPU */ union { __u64 capabilities; struct { @@ -650,7 +660,7 @@ struct perf_event_mmap_page { /* * If cap_usr_time the below fields can be used to compute the time - * delta since time_enabled (in ns) using rdtsc or similar. + * delta since time_enabled (in ns) using RDTSC or similar. * * u64 quot, rem; * u64 delta; @@ -723,7 +733,7 @@ struct perf_event_mmap_page { * after reading this value. * * When the mapping is PROT_WRITE the @data_tail value should be - * written by userspace to reflect the last read data, after issueing + * written by user-space to reflect the last read data, after issuing * an smp_mb() to separate the data read from the ->data_tail store. * In this case the kernel will not over-write unread data. * @@ -739,7 +749,7 @@ struct perf_event_mmap_page { /* * AUX area is defined by aux_{offset,size} fields that should be set - * by the userspace, so that + * by the user-space, so that * * aux_offset >= data_offset + data_size * @@ -813,7 +823,7 @@ struct perf_event_mmap_page { * Indicates that thread was preempted in TASK_RUNNING state. * * PERF_RECORD_MISC_MMAP_BUILD_ID: - * Indicates that mmap2 event carries build id data. + * Indicates that mmap2 event carries build ID data. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) @@ -824,26 +834,26 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) struct perf_event_header { - __u32 type; - __u16 misc; - __u16 size; + __u32 type; + __u16 misc; + __u16 size; }; struct perf_ns_link_info { - __u64 dev; - __u64 ino; + __u64 dev; + __u64 ino; }; enum { - NET_NS_INDEX = 0, - UTS_NS_INDEX = 1, - IPC_NS_INDEX = 2, - PID_NS_INDEX = 3, - USER_NS_INDEX = 4, - MNT_NS_INDEX = 5, - CGROUP_NS_INDEX = 6, - - NR_NAMESPACES, /* number of available namespaces */ + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + + NR_NAMESPACES, /* number of available namespaces */ }; enum perf_event_type { @@ -859,11 +869,11 @@ enum perf_event_type { * optional fields being ignored. * * struct sample_id { - * { u32 pid, tid; } && PERF_SAMPLE_TID - * { u64 time; } && PERF_SAMPLE_TIME - * { u64 id; } && PERF_SAMPLE_ID - * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID - * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 id; } && PERF_SAMPLE_IDENTIFIER * } && perf_event_attr::sample_id_all * @@ -874,7 +884,7 @@ enum perf_event_type { /* * The MMAP events record the PROT_EXEC mappings so that we can - * correlate userspace IPs to code. They have the following structure: + * correlate user-space IPs to code. They have the following structure: * * struct { * struct perf_event_header header; @@ -884,7 +894,7 @@ enum perf_event_type { * u64 len; * u64 pgoff; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP = 1, @@ -894,7 +904,7 @@ enum perf_event_type { * struct perf_event_header header; * u64 id; * u64 lost; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_LOST = 2, @@ -905,7 +915,7 @@ enum perf_event_type { * * u32 pid, tid; * char comm[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_COMM = 3, @@ -916,7 +926,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_EXIT = 4, @@ -927,7 +937,7 @@ enum perf_event_type { * u64 time; * u64 id; * u64 stream_id; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_THROTTLE = 5, @@ -939,7 +949,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_FORK = 7, @@ -950,7 +960,7 @@ enum perf_event_type { * u32 pid, tid; * * struct read_format values; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_READ = 8, @@ -1005,12 +1015,12 @@ enum perf_event_type { * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * - * { u64 abi; # enum perf_sample_regs_abi - * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER + * { u64 abi; # enum perf_sample_regs_abi + * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER * - * { u64 size; - * char data[size]; - * u64 dyn_size; } && PERF_SAMPLE_STACK_USER + * { u64 size; + * char data[size]; + * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * * { union perf_sample_weight * { @@ -1035,10 +1045,11 @@ enum perf_event_type { * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR - * { u64 size; - * char data[size]; } && PERF_SAMPLE_AUX + * { u64 cgroup;} && PERF_SAMPLE_CGROUP * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE + * { u64 size; + * char data[size]; } && PERF_SAMPLE_AUX * }; */ PERF_RECORD_SAMPLE = 9, @@ -1070,7 +1081,7 @@ enum perf_event_type { * }; * u32 prot, flags; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP2 = 10, @@ -1079,12 +1090,12 @@ enum perf_event_type { * Records that new data landed in the AUX buffer part. * * struct { - * struct perf_event_header header; + * struct perf_event_header header; * - * u64 aux_offset; - * u64 aux_size; + * u64 aux_offset; + * u64 aux_size; * u64 flags; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_AUX = 11, @@ -1167,7 +1178,7 @@ enum perf_event_type { PERF_RECORD_KSYMBOL = 17, /* - * Record bpf events: + * Record BPF events: * enum perf_bpf_event_type { * PERF_BPF_EVENT_UNKNOWN = 0, * PERF_BPF_EVENT_PROG_LOAD = 1, @@ -1245,181 +1256,181 @@ enum perf_record_ksymbol_type { #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) enum perf_bpf_event_type { - PERF_BPF_EVENT_UNKNOWN = 0, - PERF_BPF_EVENT_PROG_LOAD = 1, - PERF_BPF_EVENT_PROG_UNLOAD = 2, - PERF_BPF_EVENT_MAX, /* non-ABI */ + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX, /* non-ABI */ }; -#define PERF_MAX_STACK_DEPTH 127 -#define PERF_MAX_CONTEXTS_PER_STACK 8 +#define PERF_MAX_STACK_DEPTH 127 +#define PERF_MAX_CONTEXTS_PER_STACK 8 enum perf_callchain_context { - PERF_CONTEXT_HV = (__u64)-32, - PERF_CONTEXT_KERNEL = (__u64)-128, - PERF_CONTEXT_USER = (__u64)-512, + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, - PERF_CONTEXT_GUEST = (__u64)-2048, - PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, - PERF_CONTEXT_GUEST_USER = (__u64)-2560, + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, - PERF_CONTEXT_MAX = (__u64)-4095, + PERF_CONTEXT_MAX = (__u64)-4095, }; /** * PERF_RECORD_AUX::flags bits */ -#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ -#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ -#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ -#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ +#define PERF_AUX_FLAG_TRUNCATED 0x0001 /* Record was truncated to fit */ +#define PERF_AUX_FLAG_OVERWRITE 0x0002 /* Snapshot from overwrite mode */ +#define PERF_AUX_FLAG_PARTIAL 0x0004 /* Record contains gaps */ +#define PERF_AUX_FLAG_COLLISION 0x0008 /* Sample collided with another */ #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ /* CoreSight PMU AUX buffer formats */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ -#define PERF_FLAG_FD_NO_GROUP (1UL << 0) -#define PERF_FLAG_FD_OUTPUT (1UL << 1) -#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ -#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ +#define PERF_FLAG_FD_NO_GROUP (1UL << 0) +#define PERF_FLAG_FD_OUTPUT (1UL << 1) +#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup ID, per-CPU mode only */ +#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ #if defined(__LITTLE_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_op:5, /* type of opcode */ - mem_lvl:14, /* memory hierarchy level */ - mem_snoop:5, /* snoop mode */ - mem_lock:2, /* lock instr */ - mem_dtlb:7, /* tlb access */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_remote:1, /* remote */ - mem_snoopx:2, /* snoop mode, ext */ - mem_blk:3, /* access blocked */ - mem_hops:3, /* hop level */ - mem_rsvd:18; + __u64 mem_op : 5, /* Type of opcode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_snoop : 5, /* Snoop mode */ + mem_lock : 2, /* Lock instr */ + mem_dtlb : 7, /* TLB access */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_remote : 1, /* Remote */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_blk : 3, /* Access blocked */ + mem_hops : 3, /* Hop level */ + mem_rsvd : 18; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_rsvd:18, - mem_hops:3, /* hop level */ - mem_blk:3, /* access blocked */ - mem_snoopx:2, /* snoop mode, ext */ - mem_remote:1, /* remote */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_dtlb:7, /* tlb access */ - mem_lock:2, /* lock instr */ - mem_snoop:5, /* snoop mode */ - mem_lvl:14, /* memory hierarchy level */ - mem_op:5; /* type of opcode */ + __u64 mem_rsvd : 18, + mem_hops : 3, /* Hop level */ + mem_blk : 3, /* Access blocked */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_remote : 1, /* Remote */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_dtlb : 7, /* TLB access */ + mem_lock : 2, /* Lock instr */ + mem_snoop : 5, /* Snoop mode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_op : 5; /* Type of opcode */ }; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif -/* type of opcode (load/store/prefetch,code) */ -#define PERF_MEM_OP_NA 0x01 /* not available */ -#define PERF_MEM_OP_LOAD 0x02 /* load instruction */ -#define PERF_MEM_OP_STORE 0x04 /* store instruction */ -#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ -#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ -#define PERF_MEM_OP_SHIFT 0 +/* Type of memory opcode: */ +#define PERF_MEM_OP_NA 0x0001 /* Not available */ +#define PERF_MEM_OP_LOAD 0x0002 /* Load instruction */ +#define PERF_MEM_OP_STORE 0x0004 /* Store instruction */ +#define PERF_MEM_OP_PFETCH 0x0008 /* Prefetch */ +#define PERF_MEM_OP_EXEC 0x0010 /* Code (execution) */ +#define PERF_MEM_OP_SHIFT 0 /* - * PERF_MEM_LVL_* namespace being depricated to some extent in the + * The PERF_MEM_LVL_* namespace is being deprecated to some extent in * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. - * Supporting this namespace inorder to not break defined ABIs. + * We support this namespace in order to not break defined ABIs. * - * memory hierarchy (memory level, hit or miss) + * Memory hierarchy (memory level, hit or miss) */ -#define PERF_MEM_LVL_NA 0x01 /* not available */ -#define PERF_MEM_LVL_HIT 0x02 /* hit level */ -#define PERF_MEM_LVL_MISS 0x04 /* miss level */ -#define PERF_MEM_LVL_L1 0x08 /* L1 */ -#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ -#define PERF_MEM_LVL_L2 0x20 /* L2 */ -#define PERF_MEM_LVL_L3 0x40 /* L3 */ -#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ -#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ -#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ -#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ -#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ -#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ -#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ -#define PERF_MEM_LVL_SHIFT 5 - -#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ -#define PERF_MEM_REMOTE_SHIFT 37 - -#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ -#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ -#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ -#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ -#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */ -/* 0x7 available */ -#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */ -#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */ -#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */ -#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ -#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ -#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ -#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ - -#define PERF_MEM_LVLNUM_SHIFT 33 - -/* snoop mode */ -#define PERF_MEM_SNOOP_NA 0x01 /* not available */ -#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ -#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ -#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ -#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ -#define PERF_MEM_SNOOP_SHIFT 19 - -#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ -#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */ -#define PERF_MEM_SNOOPX_SHIFT 38 - -/* locked instruction */ -#define PERF_MEM_LOCK_NA 0x01 /* not available */ -#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ -#define PERF_MEM_LOCK_SHIFT 24 +#define PERF_MEM_LVL_NA 0x0001 /* Not available */ +#define PERF_MEM_LVL_HIT 0x0002 /* Hit level */ +#define PERF_MEM_LVL_MISS 0x0004 /* Miss level */ +#define PERF_MEM_LVL_L1 0x0008 /* L1 */ +#define PERF_MEM_LVL_LFB 0x0010 /* Line Fill Buffer */ +#define PERF_MEM_LVL_L2 0x0020 /* L2 */ +#define PERF_MEM_LVL_L3 0x0040 /* L3 */ +#define PERF_MEM_LVL_LOC_RAM 0x0080 /* Local DRAM */ +#define PERF_MEM_LVL_REM_RAM1 0x0100 /* Remote DRAM (1 hop) */ +#define PERF_MEM_LVL_REM_RAM2 0x0200 /* Remote DRAM (2 hops) */ +#define PERF_MEM_LVL_REM_CCE1 0x0400 /* Remote Cache (1 hop) */ +#define PERF_MEM_LVL_REM_CCE2 0x0800 /* Remote Cache (2 hops) */ +#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ +#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ +#define PERF_MEM_LVL_SHIFT 5 + +#define PERF_MEM_REMOTE_REMOTE 0x0001 /* Remote */ +#define PERF_MEM_REMOTE_SHIFT 37 + +#define PERF_MEM_LVLNUM_L1 0x0001 /* L1 */ +#define PERF_MEM_LVLNUM_L2 0x0002 /* L2 */ +#define PERF_MEM_LVLNUM_L3 0x0003 /* L3 */ +#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ +#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ +/* 0x007 available */ +#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ +#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ +#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */ +#define PERF_MEM_LVLNUM_ANY_CACHE 0x000b /* Any cache */ +#define PERF_MEM_LVLNUM_LFB 0x000c /* LFB / L1 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_RAM 0x000d /* RAM */ +#define PERF_MEM_LVLNUM_PMEM 0x000e /* PMEM */ +#define PERF_MEM_LVLNUM_NA 0x000f /* N/A */ + +#define PERF_MEM_LVLNUM_SHIFT 33 + +/* Snoop mode */ +#define PERF_MEM_SNOOP_NA 0x0001 /* Not available */ +#define PERF_MEM_SNOOP_NONE 0x0002 /* No snoop */ +#define PERF_MEM_SNOOP_HIT 0x0004 /* Snoop hit */ +#define PERF_MEM_SNOOP_MISS 0x0008 /* Snoop miss */ +#define PERF_MEM_SNOOP_HITM 0x0010 /* Snoop hit modified */ +#define PERF_MEM_SNOOP_SHIFT 19 + +#define PERF_MEM_SNOOPX_FWD 0x0001 /* Forward */ +#define PERF_MEM_SNOOPX_PEER 0x0002 /* Transfer from peer */ +#define PERF_MEM_SNOOPX_SHIFT 38 + +/* Locked instruction */ +#define PERF_MEM_LOCK_NA 0x0001 /* Not available */ +#define PERF_MEM_LOCK_LOCKED 0x0002 /* Locked transaction */ +#define PERF_MEM_LOCK_SHIFT 24 /* TLB access */ -#define PERF_MEM_TLB_NA 0x01 /* not available */ -#define PERF_MEM_TLB_HIT 0x02 /* hit level */ -#define PERF_MEM_TLB_MISS 0x04 /* miss level */ -#define PERF_MEM_TLB_L1 0x08 /* L1 */ -#define PERF_MEM_TLB_L2 0x10 /* L2 */ -#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ -#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ -#define PERF_MEM_TLB_SHIFT 26 +#define PERF_MEM_TLB_NA 0x0001 /* Not available */ +#define PERF_MEM_TLB_HIT 0x0002 /* Hit level */ +#define PERF_MEM_TLB_MISS 0x0004 /* Miss level */ +#define PERF_MEM_TLB_L1 0x0008 /* L1 */ +#define PERF_MEM_TLB_L2 0x0010 /* L2 */ +#define PERF_MEM_TLB_WK 0x0020 /* Hardware Walker*/ +#define PERF_MEM_TLB_OS 0x0040 /* OS fault handler */ +#define PERF_MEM_TLB_SHIFT 26 /* Access blocked */ -#define PERF_MEM_BLK_NA 0x01 /* not available */ -#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ -#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ -#define PERF_MEM_BLK_SHIFT 40 - -/* hop level */ -#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ -#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ -#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ -#define PERF_MEM_HOPS_3 0x04 /* remote board */ +#define PERF_MEM_BLK_NA 0x0001 /* Not available */ +#define PERF_MEM_BLK_DATA 0x0002 /* Data could not be forwarded */ +#define PERF_MEM_BLK_ADDR 0x0004 /* Address conflict */ +#define PERF_MEM_BLK_SHIFT 40 + +/* Hop level */ +#define PERF_MEM_HOPS_0 0x0001 /* Remote core, same node */ +#define PERF_MEM_HOPS_1 0x0002 /* Remote node, same socket */ +#define PERF_MEM_HOPS_2 0x0003 /* Remote socket, same board */ +#define PERF_MEM_HOPS_3 0x0004 /* Remote board */ /* 5-7 available */ -#define PERF_MEM_HOPS_SHIFT 43 +#define PERF_MEM_HOPS_SHIFT 43 #define PERF_MEM_S(a, s) \ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) /* - * single taken branch record layout: + * Layout of single taken branch records: * * from: source instruction (may not always be a branch insn) * to: branch target @@ -1438,37 +1449,37 @@ union perf_mem_data_src { struct perf_branch_entry { __u64 from; __u64 to; - __u64 mispred:1, /* target mispredicted */ - predicted:1,/* target predicted */ - in_tx:1, /* in transaction */ - abort:1, /* transaction abort */ - cycles:16, /* cycle count to last branch */ - type:4, /* branch type */ - spec:2, /* branch speculation info */ - new_type:4, /* additional branch type */ - priv:3, /* privilege level */ - reserved:31; + __u64 mispred : 1, /* target mispredicted */ + predicted : 1, /* target predicted */ + in_tx : 1, /* in transaction */ + abort : 1, /* transaction abort */ + cycles : 16, /* cycle count to last branch */ + type : 4, /* branch type */ + spec : 2, /* branch speculation info */ + new_type : 4, /* additional branch type */ + priv : 3, /* privilege level */ + reserved : 31; }; /* Size of used info bits in struct perf_branch_entry */ #define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 union perf_sample_weight { - __u64 full; + __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) struct { - __u32 var1_dw; - __u16 var2_w; - __u16 var3_w; + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; }; #elif defined(__BIG_ENDIAN_BITFIELD) struct { - __u16 var3_w; - __u16 var2_w; - __u32 var1_dw; + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif }; diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h index 2970ef44655a..c27a4e238e4b 100644 --- a/include/uapi/linux/pidfd.h +++ b/include/uapi/linux/pidfd.h @@ -12,7 +12,7 @@ #define PIDFD_THREAD O_EXCL #ifdef __KERNEL__ #include <linux/sched.h> -#define PIDFD_CLONE CLONE_PIDFD +#define PIDFD_STALE CLONE_PIDFD #endif /* Flags for pidfd_send_signal(). */ @@ -25,10 +25,24 @@ #define PIDFD_INFO_CREDS (1UL << 1) /* Always returned, even if not requested */ #define PIDFD_INFO_CGROUPID (1UL << 2) /* Always returned if available, even if not requested */ #define PIDFD_INFO_EXIT (1UL << 3) /* Only returned if requested. */ +#define PIDFD_INFO_COREDUMP (1UL << 4) /* Only returned if requested. */ #define PIDFD_INFO_SIZE_VER0 64 /* sizeof first published struct */ /* + * Values for @coredump_mask in pidfd_info. + * Only valid if PIDFD_INFO_COREDUMP is set in @mask. + * + * Note, the @PIDFD_COREDUMP_ROOT flag indicates that the generated + * coredump should be treated as sensitive and access should only be + * granted to privileged users. + */ +#define PIDFD_COREDUMPED (1U << 0) /* Did crash and... */ +#define PIDFD_COREDUMP_SKIP (1U << 1) /* coredumping generation was skipped. */ +#define PIDFD_COREDUMP_USER (1U << 2) /* coredump was done as the user. */ +#define PIDFD_COREDUMP_ROOT (1U << 3) /* coredump was done as root. */ + +/* * The concept of process and threads in userland and the kernel is a confusing * one - within the kernel every thread is a 'task' with its own individual PID, * however from userland's point of view threads are grouped by a single PID, @@ -92,6 +106,8 @@ struct pidfd_info { __u32 fsuid; __u32 fsgid; __s32 exit_code; + __u32 coredump_mask; + __u32 __spare1; }; #define PIDFS_IOCTL_MAGIC 0xFF diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 15c18ef4eb11..43dec6eed559 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -364,4 +364,11 @@ struct prctl_mm_map { # define PR_TIMER_CREATE_RESTORE_IDS_ON 1 # define PR_TIMER_CREATE_RESTORE_IDS_GET 2 +/* FUTEX hash management */ +#define PR_FUTEX_HASH 78 +# define PR_FUTEX_HASH_SET_SLOTS 1 +# define FH_FLAG_IMMUTABLE (1ULL << 0) +# define PR_FUTEX_HASH_GET_SLOTS 2 +# define PR_FUTEX_HASH_GET_IMMUTABLE 3 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index f78ee3670dd5..1686861aae20 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -182,8 +182,12 @@ struct statx { /* File offset alignment for direct I/O reads */ __u32 stx_dio_read_offset_align; - /* 0xb8 */ - __u64 __spare3[9]; /* Spare space for future expansion */ + /* Optimised max atomic write unit in bytes */ + __u32 stx_atomic_write_unit_max_opt; + __u32 __spare2[1]; + + /* 0xc0 */ + __u64 __spare3[8]; /* Spare space for future expansion */ /* 0x100 */ }; diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index 95762232e018..5929030d4e8b 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -34,7 +34,7 @@ */ -#define TASKSTATS_VERSION 15 +#define TASKSTATS_VERSION 16 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -72,8 +72,6 @@ struct taskstats { */ __u64 cpu_count __attribute__((aligned(8))); __u64 cpu_delay_total; - __u64 cpu_delay_max; - __u64 cpu_delay_min; /* Following four fields atomically updated using task->delays->lock */ @@ -82,14 +80,10 @@ struct taskstats { */ __u64 blkio_count; __u64 blkio_delay_total; - __u64 blkio_delay_max; - __u64 blkio_delay_min; /* Delay waiting for page fault I/O (swap in only) */ __u64 swapin_count; __u64 swapin_delay_total; - __u64 swapin_delay_max; - __u64 swapin_delay_min; /* cpu "wall-clock" running time * On some architectures, value will adjust for cpu time stolen @@ -172,14 +166,11 @@ struct taskstats { /* Delay waiting for memory reclaim */ __u64 freepages_count; __u64 freepages_delay_total; - __u64 freepages_delay_max; - __u64 freepages_delay_min; + /* Delay waiting for thrashing page */ __u64 thrashing_count; __u64 thrashing_delay_total; - __u64 thrashing_delay_max; - __u64 thrashing_delay_min; /* v10: 64-bit btime to avoid overflow */ __u64 ac_btime64; /* 64-bit begin time */ @@ -187,8 +178,6 @@ struct taskstats { /* v11: Delay waiting for memory compact */ __u64 compact_count; __u64 compact_delay_total; - __u64 compact_delay_max; - __u64 compact_delay_min; /* v12 begin */ __u32 ac_tgid; /* thread group ID */ @@ -210,15 +199,37 @@ struct taskstats { /* v13: Delay waiting for write-protect copy */ __u64 wpcopy_count; __u64 wpcopy_delay_total; - __u64 wpcopy_delay_max; - __u64 wpcopy_delay_min; /* v14: Delay waiting for IRQ/SOFTIRQ */ __u64 irq_count; __u64 irq_delay_total; - __u64 irq_delay_max; - __u64 irq_delay_min; - /* v15: add Delay max */ + + /* v15: add Delay max and Delay min */ + + /* v16: move Delay max and Delay min to the end of taskstat */ + __u64 cpu_delay_max; + __u64 cpu_delay_min; + + __u64 blkio_delay_max; + __u64 blkio_delay_min; + + __u64 swapin_delay_max; + __u64 swapin_delay_min; + + __u64 freepages_delay_max; + __u64 freepages_delay_min; + + __u64 thrashing_delay_max; + __u64 thrashing_delay_min; + + __u64 compact_delay_max; + __u64 compact_delay_min; + + __u64 wpcopy_delay_max; + __u64 wpcopy_delay_min; + + __u64 irq_delay_max; + __u64 irq_delay_min; }; diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h index 583b86681c93..56c7e3fc666f 100644 --- a/include/uapi/linux/ublk_cmd.h +++ b/include/uapi/linux/ublk_cmd.h @@ -51,6 +51,10 @@ _IOR('u', 0x13, struct ublksrv_ctrl_cmd) #define UBLK_U_CMD_DEL_DEV_ASYNC \ _IOR('u', 0x14, struct ublksrv_ctrl_cmd) +#define UBLK_U_CMD_UPDATE_SIZE \ + _IOWR('u', 0x15, struct ublksrv_ctrl_cmd) +#define UBLK_U_CMD_QUIESCE_DEV \ + _IOWR('u', 0x16, struct ublksrv_ctrl_cmd) /* * 64bits are enough now, and it should be easy to extend in case of @@ -211,6 +215,63 @@ */ #define UBLK_F_USER_RECOVERY_FAIL_IO (1ULL << 9) +/* + * Resizing a block device is possible with UBLK_U_CMD_UPDATE_SIZE + * New size is passed in cmd->data[0] and is in units of sectors + */ +#define UBLK_F_UPDATE_SIZE (1ULL << 10) + +/* + * request buffer is registered automatically to uring_cmd's io_uring + * context before delivering this io command to ublk server, meantime + * it is un-registered automatically when completing this io command. + * + * For using this feature: + * + * - ublk server has to create sparse buffer table on the same `io_ring_ctx` + * for issuing `UBLK_IO_FETCH_REQ` and `UBLK_IO_COMMIT_AND_FETCH_REQ`. + * If uring_cmd isn't issued on same `io_ring_ctx`, it is ublk server's + * responsibility to unregister the buffer by issuing `IO_UNREGISTER_IO_BUF` + * manually, otherwise this ublk request won't complete. + * + * - ublk server passes auto buf register data via uring_cmd's sqe->addr, + * `struct ublk_auto_buf_reg` is populated from sqe->addr, please see + * the definition of ublk_sqe_addr_to_auto_buf_reg() + * + * - pass buffer index from `ublk_auto_buf_reg.index` + * + * - all reserved fields in `ublk_auto_buf_reg` need to be zeroed + * + * - pass flags from `ublk_auto_buf_reg.flags` if needed + * + * This way avoids extra cost from two uring_cmd, but also simplifies backend + * implementation, such as, the dependency on IO_REGISTER_IO_BUF and + * IO_UNREGISTER_IO_BUF becomes not necessary. + * + * If wrong data or flags are provided, both IO_FETCH_REQ and + * IO_COMMIT_AND_FETCH_REQ are failed, for the latter, the ublk IO request + * won't be completed until new IO_COMMIT_AND_FETCH_REQ command is issued + * successfully + */ +#define UBLK_F_AUTO_BUF_REG (1ULL << 11) + +/* + * Control command `UBLK_U_CMD_QUIESCE_DEV` is added for quiescing device, + * which state can be transitioned to `UBLK_S_DEV_QUIESCED` or + * `UBLK_S_DEV_FAIL_IO` finally, and it needs ublk server cooperation for + * handling `UBLK_IO_RES_ABORT` correctly. + * + * Typical use case is for supporting to upgrade ublk server application, + * meantime keep ublk block device persistent during the period. + * + * This feature is only available when UBLK_F_USER_RECOVERY is enabled. + * + * Note, this command returns -EBUSY in case that all IO commands are being + * handled by ublk server and not completed in specified time period which + * is passed from the control command parameter. + */ +#define UBLK_F_QUIESCE (1ULL << 12) + /* device state */ #define UBLK_S_DEV_DEAD 0 #define UBLK_S_DEV_LIVE 1 @@ -297,6 +358,17 @@ struct ublksrv_ctrl_dev_info { #define UBLK_IO_F_FUA (1U << 13) #define UBLK_IO_F_NOUNMAP (1U << 15) #define UBLK_IO_F_SWAP (1U << 16) +/* + * For UBLK_F_AUTO_BUF_REG & UBLK_AUTO_BUF_REG_FALLBACK only. + * + * This flag is set if auto buffer register is failed & ublk server passes + * UBLK_AUTO_BUF_REG_FALLBACK, and ublk server need to register buffer + * manually for handling the delivered IO command if this flag is observed + * + * ublk server has to check this flag if UBLK_AUTO_BUF_REG_FALLBACK is + * passed in. + */ +#define UBLK_IO_F_NEED_REG_BUF (1U << 17) /* * io cmd is described by this structure, and stored in share memory, indexed @@ -331,6 +403,62 @@ static inline __u32 ublksrv_get_flags(const struct ublksrv_io_desc *iod) return iod->op_flags >> 8; } +/* + * If this flag is set, fallback by completing the uring_cmd and setting + * `UBLK_IO_F_NEED_REG_BUF` in case of auto-buf-register failure; + * otherwise the client ublk request is failed silently + * + * If ublk server passes this flag, it has to check if UBLK_IO_F_NEED_REG_BUF + * is set in `ublksrv_io_desc.op_flags`. If UBLK_IO_F_NEED_REG_BUF is set, + * ublk server needs to register io buffer manually for handling IO command. + */ +#define UBLK_AUTO_BUF_REG_FALLBACK (1 << 0) +#define UBLK_AUTO_BUF_REG_F_MASK UBLK_AUTO_BUF_REG_FALLBACK + +struct ublk_auto_buf_reg { + /* index for registering the delivered request buffer */ + __u16 index; + __u8 flags; + __u8 reserved0; + + /* + * io_ring FD can be passed via the reserve field in future for + * supporting to register io buffer to external io_uring + */ + __u32 reserved1; +}; + +/* + * For UBLK_F_AUTO_BUF_REG, auto buffer register data is carried via + * uring_cmd's sqe->addr: + * + * - bit0 ~ bit15: buffer index + * - bit16 ~ bit23: flags + * - bit24 ~ bit31: reserved0 + * - bit32 ~ bit63: reserved1 + */ +static inline struct ublk_auto_buf_reg ublk_sqe_addr_to_auto_buf_reg( + __u64 sqe_addr) +{ + struct ublk_auto_buf_reg reg = { + .index = sqe_addr & 0xffff, + .flags = (sqe_addr >> 16) & 0xff, + .reserved0 = (sqe_addr >> 24) & 0xff, + .reserved1 = sqe_addr >> 32, + }; + + return reg; +} + +static inline __u64 +ublk_auto_buf_reg_to_sqe_addr(const struct ublk_auto_buf_reg *buf) +{ + __u64 addr = buf->index | (__u64)buf->flags << 16 | (__u64)buf->reserved0 << 24 | + (__u64)buf->reserved1 << 32; + + return addr; +} + /* issued to ublk driver via /dev/ublkcN */ struct ublksrv_io_cmd { __u16 q_id; diff --git a/init/Kconfig b/init/Kconfig index bf3a920064be..ae5d00baeafb 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -477,16 +477,6 @@ config CROSS_MEMORY_ATTACH to directly read from or write to another process' address space. See the man page for more details. -config USELIB - bool "uselib syscall (for libc5 and earlier)" - default ALPHA || M68K || SPARC - help - This option enables the uselib syscall, a system call used in the - dynamic linker from libc5 and earlier. glibc does not use this - system call. If you intend to run programs built on libc5 or - earlier, you may need to enable this syscall. Current systems - running glibc can safely disable this. - config AUDIT bool "Auditing support" depends on NET @@ -1085,6 +1075,17 @@ config RT_GROUP_SCHED realtime bandwidth for them. See Documentation/scheduler/sched-rt-group.rst for more information. +config RT_GROUP_SCHED_DEFAULT_DISABLED + bool "Require boot parameter to enable group scheduling for SCHED_RR/FIFO" + depends on RT_GROUP_SCHED + default n + help + When set, the RT group scheduling is disabled by default. The option + is in inverted form so that mere RT_GROUP_SCHED enables the group + scheduling. + + Say N if unsure. + config EXT_GROUP_SCHED bool depends on SCHED_CLASS_EXT && CGROUP_SCHED @@ -1697,6 +1698,16 @@ config FUTEX_PI depends on FUTEX && RT_MUTEXES default y +config FUTEX_PRIVATE_HASH + bool + depends on FUTEX && !BASE_SMALL && MMU + default y + +config FUTEX_MPOL + bool + depends on FUTEX && NUMA + default y + config EPOLL bool "Enable eventpoll support" if EXPERT default y diff --git a/io_uring/Makefile b/io_uring/Makefile index 3e28a741ca15..d97c6b51d584 100644 --- a/io_uring/Makefile +++ b/io_uring/Makefile @@ -7,11 +7,11 @@ GCOV_PROFILE := y endif obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \ - tctx.o filetable.o rw.o net.o poll.o \ + tctx.o filetable.o rw.o poll.o \ eventfd.o uring_cmd.o openclose.o \ sqpoll.o xattr.o nop.o fs.o splice.o \ sync.o msg_ring.o advise.o openclose.o \ - statx.o timeout.o fdinfo.o cancel.o \ + statx.o timeout.o cancel.o \ waitid.o register.o truncate.o \ memmap.o alloc_cache.o obj-$(CONFIG_IO_URING_ZCRX) += zcrx.o @@ -19,3 +19,5 @@ obj-$(CONFIG_IO_WQ) += io-wq.o obj-$(CONFIG_FUTEX) += futex.o obj-$(CONFIG_EPOLL) += epoll.o obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o +obj-$(CONFIG_NET) += net.o cmd_net.o +obj-$(CONFIG_PROC_FS) += fdinfo.o diff --git a/io_uring/advise.c b/io_uring/advise.c index cb7b881665e5..0073f74e3658 100644 --- a/io_uring/advise.c +++ b/io_uring/advise.c @@ -58,7 +58,7 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags) ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; #else return -EOPNOTSUPP; #endif @@ -104,5 +104,5 @@ int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/cancel.c b/io_uring/cancel.c index 0870060bac7c..6d57602304df 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -229,7 +229,7 @@ done: if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } static int __io_sync_cancel(struct io_uring_task *tctx, diff --git a/io_uring/cmd_net.c b/io_uring/cmd_net.c new file mode 100644 index 000000000000..e99170c7d41a --- /dev/null +++ b/io_uring/cmd_net.c @@ -0,0 +1,83 @@ +#include <asm/ioctls.h> +#include <linux/io_uring/net.h> +#include <net/sock.h> + +#include "uring_cmd.h" + +static inline int io_uring_cmd_getsockopt(struct socket *sock, + struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + const struct io_uring_sqe *sqe = cmd->sqe; + bool compat = !!(issue_flags & IO_URING_F_COMPAT); + int optlen, optname, level, err; + void __user *optval; + + level = READ_ONCE(sqe->level); + if (level != SOL_SOCKET) + return -EOPNOTSUPP; + + optval = u64_to_user_ptr(READ_ONCE(sqe->optval)); + optname = READ_ONCE(sqe->optname); + optlen = READ_ONCE(sqe->optlen); + + err = do_sock_getsockopt(sock, compat, level, optname, + USER_SOCKPTR(optval), + KERNEL_SOCKPTR(&optlen)); + if (err) + return err; + + /* On success, return optlen */ + return optlen; +} + +static inline int io_uring_cmd_setsockopt(struct socket *sock, + struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + const struct io_uring_sqe *sqe = cmd->sqe; + bool compat = !!(issue_flags & IO_URING_F_COMPAT); + int optname, optlen, level; + void __user *optval; + sockptr_t optval_s; + + optval = u64_to_user_ptr(READ_ONCE(sqe->optval)); + optname = READ_ONCE(sqe->optname); + optlen = READ_ONCE(sqe->optlen); + level = READ_ONCE(sqe->level); + optval_s = USER_SOCKPTR(optval); + + return do_sock_setsockopt(sock, compat, level, optname, optval_s, + optlen); +} + +int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct socket *sock = cmd->file->private_data; + struct sock *sk = sock->sk; + struct proto *prot = READ_ONCE(sk->sk_prot); + int ret, arg = 0; + + if (!prot || !prot->ioctl) + return -EOPNOTSUPP; + + switch (cmd->cmd_op) { + case SOCKET_URING_OP_SIOCINQ: + ret = prot->ioctl(sk, SIOCINQ, &arg); + if (ret) + return ret; + return arg; + case SOCKET_URING_OP_SIOCOUTQ: + ret = prot->ioctl(sk, SIOCOUTQ, &arg); + if (ret) + return ret; + return arg; + case SOCKET_URING_OP_GETSOCKOPT: + return io_uring_cmd_getsockopt(sock, cmd, issue_flags); + case SOCKET_URING_OP_SETSOCKOPT: + return io_uring_cmd_setsockopt(sock, cmd, issue_flags); + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL_GPL(io_uring_cmd_sock); diff --git a/io_uring/epoll.c b/io_uring/epoll.c index 6d2c48ba1923..8d4610246ba0 100644 --- a/io_uring/epoll.c +++ b/io_uring/epoll.c @@ -61,7 +61,7 @@ int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -88,5 +88,5 @@ int io_epoll_wait(struct io_kiocb *req, unsigned int issue_flags) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c index 100d5da94cb9..78f8ab7db104 100644 --- a/io_uring/eventfd.c +++ b/io_uring/eventfd.c @@ -47,13 +47,6 @@ static void io_eventfd_do_signal(struct rcu_head *rcu) io_eventfd_put(ev_fd); } -static void io_eventfd_release(struct io_ev_fd *ev_fd, bool put_ref) -{ - if (put_ref) - io_eventfd_put(ev_fd); - rcu_read_unlock(); -} - /* * Returns true if the caller should put the ev_fd reference, false if not. */ @@ -72,63 +65,34 @@ static bool __io_eventfd_signal(struct io_ev_fd *ev_fd) /* * Trigger if eventfd_async isn't set, or if it's set and the caller is - * an async worker. If ev_fd isn't valid, obviously return false. + * an async worker. */ static bool io_eventfd_trigger(struct io_ev_fd *ev_fd) { - if (ev_fd) - return !ev_fd->eventfd_async || io_wq_current_is_worker(); - return false; + return !ev_fd->eventfd_async || io_wq_current_is_worker(); } -/* - * On success, returns with an ev_fd reference grabbed and the RCU read - * lock held. - */ -static struct io_ev_fd *io_eventfd_grab(struct io_ring_ctx *ctx) +void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event) { + bool skip = false; struct io_ev_fd *ev_fd; if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) - return NULL; - - rcu_read_lock(); + return; - /* - * rcu_dereference ctx->io_ev_fd once and use it for both for checking - * and eventfd_signal - */ + guard(rcu)(); ev_fd = rcu_dereference(ctx->io_ev_fd); - /* * Check again if ev_fd exists in case an io_eventfd_unregister call * completed between the NULL check of ctx->io_ev_fd at the start of * the function and rcu_read_lock. */ - if (io_eventfd_trigger(ev_fd) && refcount_inc_not_zero(&ev_fd->refs)) - return ev_fd; - - rcu_read_unlock(); - return NULL; -} - -void io_eventfd_signal(struct io_ring_ctx *ctx) -{ - struct io_ev_fd *ev_fd; - - ev_fd = io_eventfd_grab(ctx); - if (ev_fd) - io_eventfd_release(ev_fd, __io_eventfd_signal(ev_fd)); -} - -void io_eventfd_flush_signal(struct io_ring_ctx *ctx) -{ - struct io_ev_fd *ev_fd; - - ev_fd = io_eventfd_grab(ctx); - if (ev_fd) { - bool skip, put_ref = true; + if (!ev_fd) + return; + if (!io_eventfd_trigger(ev_fd) || !refcount_inc_not_zero(&ev_fd->refs)) + return; + if (cqe_event) { /* * Eventfd should only get triggered when at least one event * has been posted. Some applications rely on the eventfd @@ -142,12 +106,10 @@ void io_eventfd_flush_signal(struct io_ring_ctx *ctx) skip = ctx->cached_cq_tail == ev_fd->last_cq_tail; ev_fd->last_cq_tail = ctx->cached_cq_tail; spin_unlock(&ctx->completion_lock); - - if (!skip) - put_ref = __io_eventfd_signal(ev_fd); - - io_eventfd_release(ev_fd, put_ref); } + + if (skip || __io_eventfd_signal(ev_fd)) + io_eventfd_put(ev_fd); } int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, diff --git a/io_uring/eventfd.h b/io_uring/eventfd.h index d394f49c6321..e2f1985c2cf9 100644 --- a/io_uring/eventfd.h +++ b/io_uring/eventfd.h @@ -4,5 +4,4 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, unsigned int eventfd_async); int io_eventfd_unregister(struct io_ring_ctx *ctx); -void io_eventfd_flush_signal(struct io_ring_ctx *ctx); -void io_eventfd_signal(struct io_ring_ctx *ctx); +void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event); diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index e0d6a59a89fa..e9355276ab5d 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -15,37 +15,6 @@ #include "cancel.h" #include "rsrc.h" -#ifdef CONFIG_PROC_FS -static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id, - const struct cred *cred) -{ - struct user_namespace *uns = seq_user_ns(m); - struct group_info *gi; - kernel_cap_t cap; - int g; - - seq_printf(m, "%5d\n", id); - seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid)); - seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid)); - seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid)); - seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid)); - seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid)); - seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid)); - seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid)); - seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid)); - seq_puts(m, "\n\tGroups:\t"); - gi = cred->group_info; - for (g = 0; g < gi->ngroups; g++) { - seq_put_decimal_ull(m, g ? " " : "", - from_kgid_munged(uns, gi->gid[g])); - } - seq_puts(m, "\n\tCapEff:\t"); - cap = cred->cap_effective; - seq_put_hex_ll(m, NULL, cap.val, 16); - seq_putc(m, '\n'); - return 0; -} - #ifdef CONFIG_NET_RX_BUSY_POLL static __cold void common_tracking_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m, @@ -214,14 +183,6 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) else seq_printf(m, "%5u: <none>\n", i); } - if (!xa_empty(&ctx->personalities)) { - unsigned long index; - const struct cred *cred; - - seq_printf(m, "Personalities:\n"); - xa_for_each(&ctx->personalities, index, cred) - io_uring_show_cred(m, index, cred); - } seq_puts(m, "PollList:\n"); for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) { @@ -264,4 +225,3 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) mutex_unlock(&ctx->uring_lock); } } -#endif diff --git a/io_uring/fs.c b/io_uring/fs.c index eccea851dd5a..37079a414eab 100644 --- a/io_uring/fs.c +++ b/io_uring/fs.c @@ -90,7 +90,7 @@ int io_renameat(struct io_kiocb *req, unsigned int issue_flags) req->flags &= ~REQ_F_NEED_CLEANUP; io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } void io_renameat_cleanup(struct io_kiocb *req) @@ -141,7 +141,7 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) req->flags &= ~REQ_F_NEED_CLEANUP; io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } void io_unlinkat_cleanup(struct io_kiocb *req) @@ -185,7 +185,7 @@ int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) req->flags &= ~REQ_F_NEED_CLEANUP; io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } void io_mkdirat_cleanup(struct io_kiocb *req) @@ -235,7 +235,7 @@ int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) req->flags &= ~REQ_F_NEED_CLEANUP; io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -281,7 +281,7 @@ int io_linkat(struct io_kiocb *req, unsigned int issue_flags) req->flags &= ~REQ_F_NEED_CLEANUP; io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } void io_link_cleanup(struct io_kiocb *req) diff --git a/io_uring/futex.c b/io_uring/futex.c index 0ea4820cd8ff..fa374afbaa51 100644 --- a/io_uring/futex.c +++ b/io_uring/futex.c @@ -234,7 +234,7 @@ int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags) kfree(futexv); req->async_data = NULL; req->flags &= ~REQ_F_ASYNC_DATA; - return IOU_OK; + return IOU_COMPLETE; } /* @@ -273,7 +273,6 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); struct io_ring_ctx *ctx = req->ctx; struct io_futex_data *ifd = NULL; - struct futex_hash_bucket *hb; int ret; if (!iof->futex_mask) { @@ -295,12 +294,11 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) ifd->req = req; ret = futex_wait_setup(iof->uaddr, iof->futex_val, iof->futex_flags, - &ifd->q, &hb); + &ifd->q, NULL, NULL); if (!ret) { hlist_add_head(&req->hash_node, &ctx->futex_list); io_ring_submit_unlock(ctx, issue_flags); - futex_queue(&ifd->q, hb, NULL); return IOU_ISSUE_SKIP_COMPLETE; } @@ -311,7 +309,7 @@ done: req_set_fail(req); io_req_set_res(req, ret, 0); kfree(ifd); - return IOU_OK; + return IOU_COMPLETE; } int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags) @@ -328,5 +326,5 @@ int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 04a75d666195..cd1fcb115739 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -114,9 +114,6 @@ enum { struct io_wq { unsigned long state; - free_work_fn *free_work; - io_wq_work_fn *do_work; - struct io_wq_hash *hash; atomic_t worker_refs; @@ -153,6 +150,16 @@ static bool io_acct_cancel_pending_work(struct io_wq *wq, static void create_worker_cb(struct callback_head *cb); static void io_wq_cancel_tw_create(struct io_wq *wq); +static inline unsigned int __io_get_work_hash(unsigned int work_flags) +{ + return work_flags >> IO_WQ_HASH_SHIFT; +} + +static inline unsigned int io_get_work_hash(struct io_wq_work *work) +{ + return __io_get_work_hash(atomic_read(&work->flags)); +} + static bool io_worker_get(struct io_worker *worker) { return refcount_inc_not_zero(&worker->ref); @@ -412,6 +419,30 @@ fail: return false; } +/* Defer if current and next work are both hashed to the same chain */ +static bool io_wq_hash_defer(struct io_wq_work *work, struct io_wq_acct *acct) +{ + unsigned int hash, work_flags; + struct io_wq_work *next; + + lockdep_assert_held(&acct->lock); + + work_flags = atomic_read(&work->flags); + if (!__io_wq_is_hashed(work_flags)) + return false; + + /* should not happen, io_acct_run_queue() said we had work */ + if (wq_list_empty(&acct->work_list)) + return true; + + hash = __io_get_work_hash(work_flags); + next = container_of(acct->work_list.first, struct io_wq_work, list); + work_flags = atomic_read(&next->flags); + if (!__io_wq_is_hashed(work_flags)) + return false; + return hash == __io_get_work_hash(work_flags); +} + static void io_wq_dec_running(struct io_worker *worker) { struct io_wq_acct *acct = io_wq_get_acct(worker); @@ -422,8 +453,14 @@ static void io_wq_dec_running(struct io_worker *worker) if (!atomic_dec_and_test(&acct->nr_running)) return; + if (!worker->cur_work) + return; if (!io_acct_run_queue(acct)) return; + if (io_wq_hash_defer(worker->cur_work, acct)) { + raw_spin_unlock(&acct->lock); + return; + } raw_spin_unlock(&acct->lock); atomic_inc(&acct->nr_running); @@ -457,16 +494,6 @@ static void __io_worker_idle(struct io_wq_acct *acct, struct io_worker *worker) } } -static inline unsigned int __io_get_work_hash(unsigned int work_flags) -{ - return work_flags >> IO_WQ_HASH_SHIFT; -} - -static inline unsigned int io_get_work_hash(struct io_wq_work *work) -{ - return __io_get_work_hash(atomic_read(&work->flags)); -} - static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash) { bool ret = false; @@ -612,10 +639,10 @@ static void io_worker_handle_work(struct io_wq_acct *acct, if (do_kill && (work_flags & IO_WQ_WORK_UNBOUND)) atomic_or(IO_WQ_WORK_CANCEL, &work->flags); - wq->do_work(work); + io_wq_submit_work(work); io_assign_current_work(worker, NULL); - linked = wq->free_work(work); + linked = io_wq_free_work(work); work = next_hashed; if (!work && linked && !io_wq_is_hashed(linked)) { work = linked; @@ -934,8 +961,8 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq) { do { atomic_or(IO_WQ_WORK_CANCEL, &work->flags); - wq->do_work(work); - work = wq->free_work(work); + io_wq_submit_work(work); + work = io_wq_free_work(work); } while (work); } @@ -1195,8 +1222,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) int ret, i; struct io_wq *wq; - if (WARN_ON_ONCE(!data->free_work || !data->do_work)) - return ERR_PTR(-EINVAL); if (WARN_ON_ONCE(!bounded)) return ERR_PTR(-EINVAL); @@ -1206,8 +1231,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) refcount_inc(&data->hash->refs); wq->hash = data->hash; - wq->free_work = data->free_work; - wq->do_work = data->do_work; ret = -ENOMEM; diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h index d4fb2940e435..774abab54732 100644 --- a/io_uring/io-wq.h +++ b/io_uring/io-wq.h @@ -21,9 +21,6 @@ enum io_wq_cancel { IO_WQ_CANCEL_NOTFOUND, /* work not found */ }; -typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); -typedef void (io_wq_work_fn)(struct io_wq_work *); - struct io_wq_hash { refcount_t refs; unsigned long map; @@ -39,8 +36,6 @@ static inline void io_wq_put_hash(struct io_wq_hash *hash) struct io_wq_data { struct io_wq_hash *hash; struct task_struct *task; - io_wq_work_fn *do_work; - free_work_fn *free_work; }; struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 541e65a1eebf..c7a9cecf528e 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -129,7 +129,6 @@ struct io_defer_entry { struct list_head list; struct io_kiocb *req; - u32 seq; }; /* requests with any of those set should undergo io_disarm_next() */ @@ -149,6 +148,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, bool is_sqpoll_thread); static void io_queue_sqe(struct io_kiocb *req); +static void __io_req_caches_free(struct io_ring_ctx *ctx); static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray); @@ -359,6 +359,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->tctx_list); ctx->submit_state.free_list.next = NULL; INIT_HLIST_HEAD(&ctx->waitid_list); + xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC); #ifdef CONFIG_FUTEX INIT_HLIST_HEAD(&ctx->futex_list); #endif @@ -380,25 +381,6 @@ err: return NULL; } -static void io_account_cq_overflow(struct io_ring_ctx *ctx) -{ - struct io_rings *r = ctx->rings; - - WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); - ctx->cq_extra--; -} - -static bool req_need_defer(struct io_kiocb *req, u32 seq) -{ - if (unlikely(req->flags & REQ_F_IO_DRAIN)) { - struct io_ring_ctx *ctx = req->ctx; - - return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; - } - - return false; -} - static void io_clean_op(struct io_kiocb *req) { if (unlikely(req->flags & REQ_F_BUFFER_SELECTED)) @@ -537,20 +519,37 @@ void io_req_queue_iowq(struct io_kiocb *req) io_req_task_work_add(req); } +static unsigned io_linked_nr(struct io_kiocb *req) +{ + struct io_kiocb *tmp; + unsigned nr = 0; + + io_for_each_link(tmp, req) + nr++; + return nr; +} + static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx) { - spin_lock(&ctx->completion_lock); + bool drain_seen = false, first = true; + + lockdep_assert_held(&ctx->uring_lock); + __io_req_caches_free(ctx); + while (!list_empty(&ctx->defer_list)) { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, struct io_defer_entry, list); - if (req_need_defer(de->req, de->seq)) - break; + drain_seen |= de->req->flags & REQ_F_IO_DRAIN; + if ((drain_seen || first) && ctx->nr_req_allocated != ctx->nr_drained) + return; + list_del_init(&de->list); + ctx->nr_drained -= io_linked_nr(de->req); io_req_task_queue(de->req); kfree(de); + first = false; } - spin_unlock(&ctx->completion_lock); } void __io_commit_cqring_flush(struct io_ring_ctx *ctx) @@ -559,10 +558,8 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx) io_poll_wq_wake(ctx); if (ctx->off_timeout_used) io_flush_timeouts(ctx); - if (ctx->drain_active) - io_queue_deferred(ctx); if (ctx->has_evfd) - io_eventfd_flush_signal(ctx); + io_eventfd_signal(ctx, true); } static inline void __io_cq_lock(struct io_ring_ctx *ctx) @@ -636,6 +633,7 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying) * to care for a non-real case. */ if (need_resched()) { + ctx->cqe_sentinel = ctx->cqe_cached; io_cq_unlock_post(ctx); mutex_unlock(&ctx->uring_lock); cond_resched(); @@ -700,27 +698,20 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task) } } -static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, - s32 res, u32 cflags, u64 extra1, u64 extra2) +static __cold bool io_cqring_add_overflow(struct io_ring_ctx *ctx, + struct io_overflow_cqe *ocqe) { - struct io_overflow_cqe *ocqe; - size_t ocq_size = sizeof(struct io_overflow_cqe); - bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); - lockdep_assert_held(&ctx->completion_lock); - if (is_cqe32) - ocq_size += sizeof(struct io_uring_cqe); - - ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT); - trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe); if (!ocqe) { + struct io_rings *r = ctx->rings; + /* * If we're in ring overflow flush mode, or in task cancel mode, * or cannot allocate an overflow entry, then we need to drop it * on the floor. */ - io_account_cq_overflow(ctx); + WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); return false; } @@ -729,23 +720,35 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); } - ocqe->cqe.user_data = user_data; - ocqe->cqe.res = res; - ocqe->cqe.flags = cflags; - if (is_cqe32) { - ocqe->cqe.big_cqe[0] = extra1; - ocqe->cqe.big_cqe[1] = extra2; - } list_add_tail(&ocqe->list, &ctx->cq_overflow_list); return true; } -static void io_req_cqe_overflow(struct io_kiocb *req) +static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx, + struct io_cqe *cqe, + struct io_big_cqe *big_cqe, gfp_t gfp) { - io_cqring_event_overflow(req->ctx, req->cqe.user_data, - req->cqe.res, req->cqe.flags, - req->big_cqe.extra1, req->big_cqe.extra2); - memset(&req->big_cqe, 0, sizeof(req->big_cqe)); + struct io_overflow_cqe *ocqe; + size_t ocq_size = sizeof(struct io_overflow_cqe); + bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); + + if (is_cqe32) + ocq_size += sizeof(struct io_uring_cqe); + + ocqe = kzalloc(ocq_size, gfp | __GFP_ACCOUNT); + trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe); + if (ocqe) { + ocqe->cqe.user_data = cqe->user_data; + ocqe->cqe.res = cqe->res; + ocqe->cqe.flags = cqe->flags; + if (is_cqe32 && big_cqe) { + ocqe->cqe.big_cqe[0] = big_cqe->extra1; + ocqe->cqe.big_cqe[1] = big_cqe->extra2; + } + } + if (big_cqe) + big_cqe->extra1 = big_cqe->extra2 = 0; + return ocqe; } /* @@ -790,13 +793,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, { struct io_uring_cqe *cqe; - ctx->cq_extra++; - - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ if (likely(io_get_cqe(ctx, &cqe))) { WRITE_ONCE(cqe->user_data, user_data); WRITE_ONCE(cqe->res, res); @@ -813,14 +809,43 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, return false; } +static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags) +{ + return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags }; +} + +static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe, + struct io_big_cqe *big_cqe) +{ + struct io_overflow_cqe *ocqe; + + ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_KERNEL); + spin_lock(&ctx->completion_lock); + io_cqring_add_overflow(ctx, ocqe); + spin_unlock(&ctx->completion_lock); +} + +static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx, + struct io_cqe *cqe, + struct io_big_cqe *big_cqe) +{ + struct io_overflow_cqe *ocqe; + + ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_ATOMIC); + return io_cqring_add_overflow(ctx, ocqe); +} + bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { bool filled; io_cq_lock(ctx); filled = io_fill_cqe_aux(ctx, user_data, res, cflags); - if (!filled) - filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); + if (unlikely(!filled)) { + struct io_cqe cqe = io_init_cqe(user_data, res, cflags); + + filled = io_cqe_overflow_locked(ctx, &cqe, NULL); + } io_cq_unlock_post(ctx); return filled; } @@ -831,10 +856,13 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags */ void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { + lockdep_assert_held(&ctx->uring_lock); + lockdep_assert(ctx->lockless_cq); + if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) { - spin_lock(&ctx->completion_lock); - io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); - spin_unlock(&ctx->completion_lock); + struct io_cqe cqe = io_init_cqe(user_data, res, cflags); + + io_cqe_overflow(ctx, &cqe, NULL); } ctx->submit_state.cq_flush = true; } @@ -924,22 +952,6 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res) } /* - * Don't initialise the fields below on every allocation, but do that in - * advance and keep them valid across allocations. - */ -static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) -{ - req->ctx = ctx; - req->buf_node = NULL; - req->file_node = NULL; - req->link = NULL; - req->async_data = NULL; - /* not necessary, but safer to zero */ - memset(&req->cqe, 0, sizeof(req->cqe)); - memset(&req->big_cqe, 0, sizeof(req->big_cqe)); -} - -/* * A request might get retired back into the request caches even before opcode * handlers and io_issue_sqe() are done with it, e.g. inline completion path. * Because of that, io_alloc_req() should be called only under ->uring_lock @@ -948,7 +960,7 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { - gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO; void *reqs[IO_REQ_ALLOC_BATCH]; int ret; @@ -966,10 +978,11 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) } percpu_ref_get_many(&ctx->refs, ret); + ctx->nr_req_allocated += ret; + while (ret--) { struct io_kiocb *req = reqs[ret]; - io_preinit_req(req, ctx); io_req_add_to_cache(req, ctx); } return true; @@ -1191,7 +1204,7 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); if (ctx->has_evfd) - io_eventfd_signal(ctx); + io_eventfd_signal(ctx, false); } nr_wait = atomic_read(&ctx->cq_wait_nr); @@ -1383,6 +1396,16 @@ void io_queue_next(struct io_kiocb *req) io_req_task_queue(nxt); } +static inline void io_req_put_rsrc_nodes(struct io_kiocb *req) +{ + if (req->file_node) { + io_put_rsrc_node(req->ctx, req->file_node); + req->file_node = NULL; + } + if (req->flags & REQ_F_BUF_NODE) + io_put_rsrc_node(req->ctx, req->buf_node); +} + static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node) __must_hold(&ctx->uring_lock) @@ -1443,13 +1466,10 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx) */ if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) && unlikely(!io_fill_cqe_req(ctx, req))) { - if (ctx->lockless_cq) { - spin_lock(&ctx->completion_lock); - io_req_cqe_overflow(req); - spin_unlock(&ctx->completion_lock); - } else { - io_req_cqe_overflow(req); - } + if (ctx->lockless_cq) + io_cqe_overflow(ctx, &req->cqe, &req->big_cqe); + else + io_cqe_overflow_locked(ctx, &req->cqe, &req->big_cqe); } } __io_cq_unlock_post(ctx); @@ -1458,6 +1478,10 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx) io_free_batch_list(ctx, state->compl_reqs.first); INIT_WQ_LIST(&state->compl_reqs); } + + if (unlikely(ctx->drain_active)) + io_queue_deferred(ctx); + ctx->submit_state.cq_flush = false; } @@ -1645,56 +1669,28 @@ io_req_flags_t io_file_get_flags(struct file *file) return res; } -static u32 io_get_sequence(struct io_kiocb *req) -{ - u32 seq = req->ctx->cached_sq_head; - struct io_kiocb *cur; - - /* need original cached_sq_head, but it was increased for each req */ - io_for_each_link(cur, req) - seq--; - return seq; -} - static __cold void io_drain_req(struct io_kiocb *req) __must_hold(&ctx->uring_lock) { struct io_ring_ctx *ctx = req->ctx; + bool drain = req->flags & IOSQE_IO_DRAIN; struct io_defer_entry *de; - int ret; - u32 seq = io_get_sequence(req); - - /* Still need defer if there is pending req in defer list. */ - spin_lock(&ctx->completion_lock); - if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { - spin_unlock(&ctx->completion_lock); -queue: - ctx->drain_active = false; - io_req_task_queue(req); - return; - } - spin_unlock(&ctx->completion_lock); - io_prep_async_link(req); - de = kmalloc(sizeof(*de), GFP_KERNEL); + de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT); if (!de) { - ret = -ENOMEM; - io_req_defer_failed(req, ret); + io_req_defer_failed(req, -ENOMEM); return; } - spin_lock(&ctx->completion_lock); - if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { - spin_unlock(&ctx->completion_lock); - kfree(de); - goto queue; - } - + io_prep_async_link(req); trace_io_uring_defer(req); de->req = req; - de->seq = seq; + + ctx->nr_drained += io_linked_nr(req); list_add_tail(&de->list, &ctx->defer_list); - spin_unlock(&ctx->completion_lock); + io_queue_deferred(ctx); + if (!drain && list_empty(&ctx->defer_list)) + ctx->drain_active = false; } static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, @@ -1756,7 +1752,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) ret = __io_issue_sqe(req, issue_flags, def); - if (ret == IOU_OK) { + if (ret == IOU_COMPLETE) { if (issue_flags & IO_URING_F_COMPLETE_DEFER) io_req_complete_defer(req); else @@ -1815,7 +1811,7 @@ void io_wq_submit_work(struct io_wq_work *work) bool needs_poll = false; int ret = 0, err = -ECANCELED; - /* one will be dropped by ->io_wq_free_work() after returning to io-wq */ + /* one will be dropped by io_wq_free_work() after returning to io-wq */ if (!(req->flags & REQ_F_REFCOUNT)) __io_req_set_refcount(req, 2); else @@ -1913,7 +1909,8 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, io_ring_submit_lock(ctx, issue_flags); node = io_rsrc_node_lookup(&ctx->file_table.data, fd); if (node) { - io_req_assign_rsrc_node(&req->file_node, node); + node->refs++; + req->file_node = node; req->flags |= io_slot_flags(node); file = io_slot_file(node); } @@ -2046,7 +2043,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, int personality; u8 opcode; - /* req is partially pre-initialised, see io_preinit_req() */ + req->ctx = ctx; req->opcode = opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ sqe_flags = READ_ONCE(sqe->flags); @@ -2277,10 +2274,6 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) (!(ctx->flags & IORING_SETUP_NO_SQARRAY))) { head = READ_ONCE(ctx->sq_array[head]); if (unlikely(head >= ctx->sq_entries)) { - /* drop invalid entries */ - spin_lock(&ctx->completion_lock); - ctx->cq_extra--; - spin_unlock(&ctx->completion_lock); WRITE_ONCE(ctx->rings->sq_dropped, READ_ONCE(ctx->rings->sq_dropped) + 1); return false; @@ -2698,21 +2691,26 @@ unsigned long rings_size(unsigned int flags, unsigned int sq_entries, return off; } -static void io_req_caches_free(struct io_ring_ctx *ctx) +static __cold void __io_req_caches_free(struct io_ring_ctx *ctx) { struct io_kiocb *req; int nr = 0; - mutex_lock(&ctx->uring_lock); - while (!io_req_cache_empty(ctx)) { req = io_extract_req(ctx); kmem_cache_free(req_cachep, req); nr++; } - if (nr) + if (nr) { + ctx->nr_req_allocated -= nr; percpu_ref_put_many(&ctx->refs, nr); - mutex_unlock(&ctx->uring_lock); + } +} + +static __cold void io_req_caches_free(struct io_ring_ctx *ctx) +{ + guard(mutex)(&ctx->uring_lock); + __io_req_caches_free(ctx); } static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) @@ -2748,6 +2746,9 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) percpu_ref_exit(&ctx->refs); free_uid(ctx->user); io_req_caches_free(ctx); + + WARN_ON_ONCE(ctx->nr_req_allocated); + if (ctx->hash_map) io_wq_put_hash(ctx->hash_map); io_napi_free(ctx); @@ -2882,7 +2883,7 @@ static __cold void io_ring_exit_work(struct work_struct *work) io_cqring_overflow_kill(ctx); mutex_unlock(&ctx->uring_lock); } - if (ctx->ifq) { + if (!xa_empty(&ctx->zcrx_ctxs)) { mutex_lock(&ctx->uring_lock); io_shutdown_zcrx_ifqs(ctx); mutex_unlock(&ctx->uring_lock); @@ -3014,20 +3015,19 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, struct io_defer_entry *de; LIST_HEAD(list); - spin_lock(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { if (io_match_task_safe(de->req, tctx, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } } - spin_unlock(&ctx->completion_lock); if (list_empty(&list)) return false; while (!list_empty(&list)) { de = list_first_entry(&list, struct io_defer_entry, list); list_del_init(&de->list); + ctx->nr_drained -= io_linked_nr(de->req); io_req_task_queue_fail(de->req, -ECANCELED); kfree(de); } @@ -3102,8 +3102,8 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && io_allowed_defer_tw_run(ctx)) ret |= io_run_local_work(ctx, INT_MAX, INT_MAX) > 0; - ret |= io_cancel_defer_files(ctx, tctx, cancel_all); mutex_lock(&ctx->uring_lock); + ret |= io_cancel_defer_files(ctx, tctx, cancel_all); ret |= io_poll_remove_all(ctx, tctx, cancel_all); ret |= io_waitid_remove_all(ctx, tctx, cancel_all); ret |= io_futex_remove_all(ctx, tctx, cancel_all); @@ -3913,6 +3913,8 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); BUILD_BUG_SQE_ELEM(44, __u32, file_index); BUILD_BUG_SQE_ELEM(44, __u16, addr_len); + BUILD_BUG_SQE_ELEM(44, __u8, write_stream); + BUILD_BUG_SQE_ELEM(45, __u8, __pad4[0]); BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); BUILD_BUG_SQE_ELEM(48, __u64, addr3); BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index e4050b2d0821..0ea7a435d1de 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -19,7 +19,6 @@ #endif enum { - IOU_OK = 0, /* deprecated, use IOU_COMPLETE */ IOU_COMPLETE = 0, IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, @@ -196,7 +195,6 @@ static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx, { io_lockdep_assert_cq_locked(ctx); - ctx->cq_extra++; ctx->submit_state.cq_flush = true; return io_get_cqe(ctx, cqe_ret); } @@ -414,7 +412,7 @@ static inline void io_req_complete_defer(struct io_kiocb *req) static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) { - if (unlikely(ctx->off_timeout_used || ctx->drain_active || + if (unlikely(ctx->off_timeout_used || ctx->has_evfd || ctx->poll_activated)) __io_commit_cqring_flush(ctx); } diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 953d5e742569..8cce3ebd813f 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -92,7 +92,6 @@ void io_kbuf_drop_legacy(struct io_kiocb *req) { if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED))) return; - req->buf_index = req->kbuf->bgid; req->flags &= ~REQ_F_BUFFER_SELECTED; kfree(req->kbuf); req->kbuf = NULL; @@ -110,7 +109,6 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) bl = io_buffer_get_list(ctx, buf->bgid); list_add(&buf->list, &bl->buf_list); req->flags &= ~REQ_F_BUFFER_SELECTED; - req->buf_index = buf->bgid; io_ring_submit_unlock(ctx, issue_flags); return true; @@ -193,7 +191,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, } void __user *io_buffer_select(struct io_kiocb *req, size_t *len, - unsigned int issue_flags) + unsigned buf_group, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_buffer_list *bl; @@ -201,7 +199,7 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len, io_ring_submit_lock(req->ctx, issue_flags); - bl = io_buffer_get_list(ctx, req->buf_index); + bl = io_buffer_get_list(ctx, buf_group); if (likely(bl)) { if (bl->flags & IOBL_BUF_RING) ret = io_ring_buffer_select(req, len, bl, issue_flags); @@ -302,7 +300,7 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, int ret = -ENOENT; io_ring_submit_lock(ctx, issue_flags); - bl = io_buffer_get_list(ctx, req->buf_index); + bl = io_buffer_get_list(ctx, arg->buf_group); if (unlikely(!bl)) goto out_unlock; @@ -335,7 +333,7 @@ int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg) lockdep_assert_held(&ctx->uring_lock); - bl = io_buffer_get_list(ctx, req->buf_index); + bl = io_buffer_get_list(ctx, arg->buf_group); if (unlikely(!bl)) return -ENOENT; @@ -355,10 +353,9 @@ static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr) struct io_buffer_list *bl = req->buf_list; bool ret = true; - if (bl) { + if (bl) ret = io_kbuf_commit(req, bl, len, nr); - req->buf_index = bl->bgid; - } + req->flags &= ~REQ_F_BUFFER_RING; return ret; } @@ -379,45 +376,33 @@ unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs) return ret; } -static int __io_remove_buffers(struct io_ring_ctx *ctx, - struct io_buffer_list *bl, unsigned nbufs) +static int io_remove_buffers_legacy(struct io_ring_ctx *ctx, + struct io_buffer_list *bl, + unsigned long nbufs) { - unsigned i = 0; - - /* shouldn't happen */ - if (!nbufs) - return 0; - - if (bl->flags & IOBL_BUF_RING) { - i = bl->buf_ring->tail - bl->head; - io_free_region(ctx, &bl->region); - /* make sure it's seen as empty */ - INIT_LIST_HEAD(&bl->buf_list); - bl->flags &= ~IOBL_BUF_RING; - return i; - } + unsigned long i = 0; + struct io_buffer *nxt; /* protects io_buffers_cache */ lockdep_assert_held(&ctx->uring_lock); + WARN_ON_ONCE(bl->flags & IOBL_BUF_RING); - while (!list_empty(&bl->buf_list)) { - struct io_buffer *nxt; - + for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) { nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); list_del(&nxt->list); kfree(nxt); - - if (++i == nbufs) - return i; cond_resched(); } - return i; } static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) { - __io_remove_buffers(ctx, bl, -1U); + if (bl->flags & IOBL_BUF_RING) + io_free_region(ctx, &bl->region); + else + io_remove_buffers_legacy(ctx, bl, -1U); + kfree(bl); } @@ -465,30 +450,6 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) -{ - struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); - struct io_ring_ctx *ctx = req->ctx; - struct io_buffer_list *bl; - int ret = 0; - - io_ring_submit_lock(ctx, issue_flags); - - ret = -ENOENT; - bl = io_buffer_get_list(ctx, p->bgid); - if (bl) { - ret = -EINVAL; - /* can't use provide/remove buffers command on mapped buffers */ - if (!(bl->flags & IOBL_BUF_RING)) - ret = __io_remove_buffers(ctx, bl, p->nbufs); - } - io_ring_submit_unlock(ctx, issue_flags); - if (ret < 0) - req_set_fail(req); - io_req_set_res(req, ret, 0); - return IOU_OK; -} - int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { unsigned long size, tmp_check; @@ -512,8 +473,6 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe return -EOVERFLOW; if (check_add_overflow((unsigned long)p->addr, size, &tmp_check)) return -EOVERFLOW; - - size = (unsigned long)p->len * p->nbufs; if (!access_ok(u64_to_user_ptr(p->addr), size)) return -EFAULT; @@ -552,49 +511,56 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf, return i ? 0 : -ENOMEM; } -int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) +static int __io_manage_buffers_legacy(struct io_kiocb *req, + struct io_buffer_list *bl) { struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); - struct io_ring_ctx *ctx = req->ctx; - struct io_buffer_list *bl; - int ret = 0; - - io_ring_submit_lock(ctx, issue_flags); + int ret; - bl = io_buffer_get_list(ctx, p->bgid); - if (unlikely(!bl)) { + if (!bl) { + if (req->opcode != IORING_OP_PROVIDE_BUFFERS) + return -ENOENT; bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); - if (!bl) { - ret = -ENOMEM; - goto err; - } + if (!bl) + return -ENOMEM; + INIT_LIST_HEAD(&bl->buf_list); - ret = io_buffer_add_list(ctx, bl, p->bgid); + ret = io_buffer_add_list(req->ctx, bl, p->bgid); if (ret) { kfree(bl); - goto err; + return ret; } } - /* can't add buffers via this command for a mapped buffer ring */ - if (bl->flags & IOBL_BUF_RING) { - ret = -EINVAL; - goto err; - } + /* can't use provide/remove buffers command on mapped buffers */ + if (bl->flags & IOBL_BUF_RING) + return -EINVAL; + if (req->opcode == IORING_OP_PROVIDE_BUFFERS) + return io_add_buffers(req->ctx, p, bl); + return io_remove_buffers_legacy(req->ctx, bl, p->nbufs); +} + +int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); + struct io_ring_ctx *ctx = req->ctx; + struct io_buffer_list *bl; + int ret; - ret = io_add_buffers(ctx, p, bl); -err: + io_ring_submit_lock(ctx, issue_flags); + bl = io_buffer_get_list(ctx, p->bgid); + ret = __io_manage_buffers_legacy(req, bl); io_ring_submit_unlock(ctx, issue_flags); if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) { struct io_uring_buf_reg reg; - struct io_buffer_list *bl, *free_bl = NULL; + struct io_buffer_list *bl; struct io_uring_region_desc rd; struct io_uring_buf_ring *br; unsigned long mmap_offset; @@ -605,8 +571,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) if (copy_from_user(®, arg, sizeof(reg))) return -EFAULT; - - if (reg.resv[0] || reg.resv[1] || reg.resv[2]) + if (!mem_is_zero(reg.resv, sizeof(reg.resv))) return -EINVAL; if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC)) return -EINVAL; @@ -624,7 +589,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) io_destroy_bl(ctx, bl); } - free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); + bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); if (!bl) return -ENOMEM; @@ -669,7 +634,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) return 0; fail: io_free_region(ctx, &bl->region); - kfree(free_bl); + kfree(bl); return ret; } @@ -682,9 +647,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) if (copy_from_user(®, arg, sizeof(reg))) return -EFAULT; - if (reg.resv[0] || reg.resv[1] || reg.resv[2]) - return -EINVAL; - if (reg.flags) + if (!mem_is_zero(reg.resv, sizeof(reg.resv)) || reg.flags) return -EINVAL; bl = io_buffer_get_list(ctx, reg.bgid); @@ -704,14 +667,11 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg) { struct io_uring_buf_status buf_status; struct io_buffer_list *bl; - int i; if (copy_from_user(&buf_status, arg, sizeof(buf_status))) return -EFAULT; - - for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++) - if (buf_status.resv[i]) - return -EINVAL; + if (!mem_is_zero(buf_status.resv, sizeof(buf_status.resv))) + return -EINVAL; bl = io_buffer_get_list(ctx, buf_status.buf_group); if (!bl) diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 2ec0b983ce24..4d2c209d1a41 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -55,20 +55,19 @@ struct buf_sel_arg { size_t max_len; unsigned short nr_iovs; unsigned short mode; + unsigned buf_group; }; void __user *io_buffer_select(struct io_kiocb *req, size_t *len, - unsigned int issue_flags); + unsigned buf_group, unsigned int issue_flags); int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, unsigned int issue_flags); int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg); void io_destroy_buffers(struct io_ring_ctx *ctx); int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); -int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); - int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); -int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); +int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags); int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); @@ -94,7 +93,6 @@ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) * to monopolize the buffer. */ if (req->buf_list) { - req->buf_index = req->buf_list->bgid; req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); return true; } diff --git a/io_uring/memmap.c b/io_uring/memmap.c index 07f8a5cbd37e..725dc0bec24c 100644 --- a/io_uring/memmap.c +++ b/io_uring/memmap.c @@ -13,6 +13,7 @@ #include "memmap.h" #include "kbuf.h" #include "rsrc.h" +#include "zcrx.h" static void *io_mem_alloc_compound(struct page **pages, int nr_pages, size_t size, gfp_t gfp) @@ -258,7 +259,8 @@ static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx, loff_t pgoff) { loff_t offset = pgoff << PAGE_SHIFT; - unsigned int bgid; + unsigned int id; + switch (offset & IORING_OFF_MMAP_MASK) { case IORING_OFF_SQ_RING: @@ -267,12 +269,13 @@ static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx, case IORING_OFF_SQES: return &ctx->sq_region; case IORING_OFF_PBUF_RING: - bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; - return io_pbuf_get_region(ctx, bgid); + id = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; + return io_pbuf_get_region(ctx, id); case IORING_MAP_OFF_PARAM_REGION: return &ctx->param_region; case IORING_MAP_OFF_ZCRX_REGION: - return &ctx->zcrx_region; + id = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_ZCRX_SHIFT; + return io_zcrx_get_region(ctx, id); } return NULL; } diff --git a/io_uring/memmap.h b/io_uring/memmap.h index dad0aa5b1b45..08419684e4bc 100644 --- a/io_uring/memmap.h +++ b/io_uring/memmap.h @@ -4,7 +4,9 @@ #define IORING_MAP_OFF_PARAM_REGION 0x20000000ULL #define IORING_MAP_OFF_ZCRX_REGION 0x30000000ULL -struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); +#define IORING_OFF_ZCRX_SHIFT 16 + +struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages); #ifndef CONFIG_MMU unsigned int io_uring_nommu_mmap_capabilities(struct file *file); diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 50a958e9c921..71400d6cefc8 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -328,7 +328,7 @@ done: req_set_fail(req); } io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_uring_sync_msg_ring(struct io_uring_sqe *sqe) diff --git a/io_uring/net.c b/io_uring/net.c index 24040bc3916a..d13f3e8f6c72 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -18,7 +18,6 @@ #include "rsrc.h" #include "zcrx.h" -#if defined(CONFIG_NET) struct io_shutdown { struct file *file; int how; @@ -129,7 +128,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) ret = __sys_shutdown_sock(sock, shutdown->how); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } static bool io_net_retry(struct socket *sock, int flags) @@ -190,7 +189,6 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req, sr->done_io = 0; sr->retry = false; sr->len = 0; /* get from the provided buffer */ - req->buf_index = sr->buf_group; } static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg, @@ -359,15 +357,13 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) kmsg->msg.msg_name = &kmsg->addr; kmsg->msg.msg_namelen = addr_len; } - if (sr->flags & IORING_RECVSEND_FIXED_BUF) + if (sr->flags & IORING_RECVSEND_FIXED_BUF) { + req->flags |= REQ_F_IMPORT_BUFFER; return 0; - if (!io_do_buffer_select(req)) { - ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, - &kmsg->msg.msg_iter); - if (unlikely(ret < 0)) - return ret; } - return 0; + if (req->flags & REQ_F_BUFFER_SELECT) + return 0; + return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); } static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -409,13 +405,12 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; if (sr->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; + if (req->flags & REQ_F_BUFFER_SELECT) + sr->buf_group = req->buf_index; if (sr->flags & IORING_RECVSEND_BUNDLE) { if (req->opcode == IORING_OP_SENDMSG) return -EINVAL; - if (!(req->flags & REQ_F_BUFFER_SELECT)) - return -EINVAL; sr->msg_flags |= MSG_WAITALL; - sr->buf_group = req->buf_index; req->buf_list = NULL; req->flags |= REQ_F_MULTISHOT; } @@ -507,7 +502,7 @@ static inline bool io_send_finish(struct io_kiocb *req, int *ret, /* Otherwise stop bundle and use the current result. */ finish: io_req_set_res(req, *ret, cflags); - *ret = IOU_OK; + *ret = IOU_COMPLETE; return true; } @@ -558,7 +553,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) else if (sr->done_io) ret = sr->done_io; io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, @@ -571,6 +566,7 @@ static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, .iovs = &kmsg->fast_iov, .max_len = min_not_zero(sr->len, INT_MAX), .nr_iovs = 1, + .buf_group = sr->buf_group, }; if (kmsg->vec.iovec) { @@ -723,7 +719,6 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_async_msghdr *kmsg; - int ret; kmsg = io_msg_alloc_async(req); if (unlikely(!kmsg)) @@ -739,13 +734,10 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req) kmsg->msg.msg_iocb = NULL; kmsg->msg.msg_ubuf = NULL; - if (!io_do_buffer_select(req)) { - ret = import_ubuf(ITER_DEST, sr->buf, sr->len, - &kmsg->msg.msg_iter); - if (unlikely(ret)) - return ret; - } - return 0; + if (req->flags & REQ_F_BUFFER_SELECT) + return 0; + return import_ubuf(ITER_DEST, sr->buf, sr->len, + &kmsg->msg.msg_iter); } return io_recvmsg_copy_hdr(req, kmsg); @@ -827,18 +819,24 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, cflags |= IORING_CQE_F_SOCK_NONEMPTY; if (sr->flags & IORING_RECVSEND_BUNDLE) { - cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), + size_t this_ret = *ret - sr->done_io; + + cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, this_ret), issue_flags); if (sr->retry) cflags = req->cqe.flags | (cflags & CQE_F_MASK); /* bundle with no more immediate buffers, we're done */ if (req->flags & REQ_F_BL_EMPTY) goto finish; - /* if more is available, retry and append to this one */ - if (!sr->retry && kmsg->msg.msg_inq > 0 && *ret > 0) { + /* + * If more is available AND it was a full transfer, retry and + * append to this one + */ + if (!sr->retry && kmsg->msg.msg_inq > 0 && this_ret > 0 && + !iov_iter_count(&kmsg->msg.msg_iter)) { req->cqe.flags = cflags & ~CQE_F_MASK; sr->len = kmsg->msg.msg_inq; - sr->done_io += *ret; + sr->done_io += this_ret; sr->retry = true; return false; } @@ -985,7 +983,7 @@ retry_multishot: void __user *buf; size_t len = sr->len; - buf = io_buffer_select(req, &len, issue_flags); + buf = io_buffer_select(req, &len, sr->buf_group, issue_flags); if (!buf) return -ENOBUFS; @@ -1063,6 +1061,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg .iovs = &kmsg->fast_iov, .nr_iovs = 1, .mode = KBUF_MODE_EXPAND, + .buf_group = sr->buf_group, }; if (kmsg->vec.iovec) { @@ -1095,7 +1094,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg void __user *buf; *len = sr->len; - buf = io_buffer_select(req, len, issue_flags); + buf = io_buffer_select(req, len, sr->buf_group, issue_flags); if (!buf) return -ENOBUFS; sr->buf = buf; @@ -1191,16 +1190,14 @@ int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); unsigned ifq_idx; - if (unlikely(sqe->file_index || sqe->addr2 || sqe->addr || - sqe->addr3)) + if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3)) return -EINVAL; ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx); - if (ifq_idx != 0) - return -EINVAL; - zc->ifq = req->ctx->ifq; + zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx); if (!zc->ifq) return -EINVAL; + zc->len = READ_ONCE(sqe->len); zc->flags = READ_ONCE(sqe->ioprio); zc->msg_flags = READ_ONCE(sqe->msg_flags); @@ -1321,8 +1318,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -ENOMEM; if (req->opcode == IORING_OP_SEND_ZC) { - if (zc->flags & IORING_RECVSEND_FIXED_BUF) - req->flags |= REQ_F_IMPORT_BUFFER; ret = io_send_setup(req, sqe); } else { if (unlikely(sqe->addr2 || sqe->file_index)) @@ -1470,7 +1465,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) io_req_msg_cleanup(req, 0); } io_req_set_res(req, ret, IORING_CQE_F_MORE); - return IOU_OK; + return IOU_COMPLETE; } int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) @@ -1541,7 +1536,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) io_req_msg_cleanup(req, 0); } io_req_set_res(req, ret, IORING_CQE_F_MORE); - return IOU_OK; + return IOU_COMPLETE; } void io_sendrecv_fail(struct io_kiocb *req) @@ -1705,7 +1700,7 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags) sock->file_slot); } io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -1772,7 +1767,7 @@ out: req_set_fail(req); io_req_msg_cleanup(req, issue_flags); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -1846,4 +1841,3 @@ void io_netmsg_cache_free(const void *entry) io_vec_free(&kmsg->vec); kfree(kmsg); } -#endif diff --git a/io_uring/nop.c b/io_uring/nop.c index 28f06285fdc2..6ac2de761fd3 100644 --- a/io_uring/nop.c +++ b/io_uring/nop.c @@ -68,5 +68,5 @@ done: if (ret < 0) req_set_fail(req); io_req_set_res(req, nop->result, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/notif.c b/io_uring/notif.c index 7bd92538dccb..9a6f6e92d742 100644 --- a/io_uring/notif.c +++ b/io_uring/notif.c @@ -112,6 +112,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) if (unlikely(!io_alloc_req(ctx, ¬if))) return NULL; + notif->ctx = ctx; notif->opcode = IORING_OP_NOP; notif->flags = 0; notif->file = NULL; diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 489384c0438b..6e0882b051f9 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -333,13 +333,13 @@ const struct io_issue_def io_issue_defs[] = { .audit_skip = 1, .iopoll = 1, .prep = io_provide_buffers_prep, - .issue = io_provide_buffers, + .issue = io_manage_buffers_legacy, }, [IORING_OP_REMOVE_BUFFERS] = { .audit_skip = 1, .iopoll = 1, .prep = io_remove_buffers_prep, - .issue = io_remove_buffers, + .issue = io_manage_buffers_legacy, }, [IORING_OP_TEE] = { .needs_file = 1, @@ -569,6 +569,10 @@ const struct io_issue_def io_issue_defs[] = { .prep = io_prep_writev_fixed, .issue = io_write, }, + [IORING_OP_PIPE] = { + .prep = io_pipe_prep, + .issue = io_pipe, + }, }; const struct io_cold_def io_cold_defs[] = { @@ -815,6 +819,9 @@ const struct io_cold_def io_cold_defs[] = { .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, + [IORING_OP_PIPE] = { + .name = "PIPE", + }, }; const char *io_uring_get_opcode(u8 opcode) diff --git a/io_uring/openclose.c b/io_uring/openclose.c index e3357dfa14ca..83e36ad4e31b 100644 --- a/io_uring/openclose.c +++ b/io_uring/openclose.c @@ -6,6 +6,8 @@ #include <linux/fdtable.h> #include <linux/fsnotify.h> #include <linux/namei.h> +#include <linux/pipe_fs_i.h> +#include <linux/watch_queue.h> #include <linux/io_uring.h> #include <uapi/linux/io_uring.h> @@ -169,7 +171,7 @@ err: if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_openat(struct io_kiocb *req, unsigned int issue_flags) @@ -257,7 +259,7 @@ err: if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -300,5 +302,136 @@ int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; +} + +struct io_pipe { + struct file *file; + int __user *fds; + int flags; + int file_slot; + unsigned long nofile; +}; + +int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe); + + if (sqe->fd || sqe->off || sqe->addr3) + return -EINVAL; + + p->fds = u64_to_user_ptr(READ_ONCE(sqe->addr)); + p->flags = READ_ONCE(sqe->pipe_flags); + if (p->flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE)) + return -EINVAL; + + p->file_slot = READ_ONCE(sqe->file_index); + p->nofile = rlimit(RLIMIT_NOFILE); + return 0; +} + +static int io_pipe_fixed(struct io_kiocb *req, struct file **files, + unsigned int issue_flags) +{ + struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe); + struct io_ring_ctx *ctx = req->ctx; + int ret, fds[2] = { -1, -1 }; + int slot = p->file_slot; + + if (p->flags & O_CLOEXEC) + return -EINVAL; + + io_ring_submit_lock(ctx, issue_flags); + + ret = __io_fixed_fd_install(ctx, files[0], slot); + if (ret < 0) + goto err; + fds[0] = ret; + files[0] = NULL; + + /* + * If a specific slot is given, next one will be used for + * the write side. + */ + if (slot != IORING_FILE_INDEX_ALLOC) + slot++; + + ret = __io_fixed_fd_install(ctx, files[1], slot); + if (ret < 0) + goto err; + fds[1] = ret; + files[1] = NULL; + + io_ring_submit_unlock(ctx, issue_flags); + + if (!copy_to_user(p->fds, fds, sizeof(fds))) + return 0; + + ret = -EFAULT; + io_ring_submit_lock(ctx, issue_flags); +err: + if (fds[0] != -1) + io_fixed_fd_remove(ctx, fds[0]); + if (fds[1] != -1) + io_fixed_fd_remove(ctx, fds[1]); + io_ring_submit_unlock(ctx, issue_flags); + return ret; +} + +static int io_pipe_fd(struct io_kiocb *req, struct file **files) +{ + struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe); + int ret, fds[2] = { -1, -1 }; + + ret = __get_unused_fd_flags(p->flags, p->nofile); + if (ret < 0) + goto err; + fds[0] = ret; + + ret = __get_unused_fd_flags(p->flags, p->nofile); + if (ret < 0) + goto err; + fds[1] = ret; + + if (!copy_to_user(p->fds, fds, sizeof(fds))) { + fd_install(fds[0], files[0]); + fd_install(fds[1], files[1]); + return 0; + } + ret = -EFAULT; +err: + if (fds[0] != -1) + put_unused_fd(fds[0]); + if (fds[1] != -1) + put_unused_fd(fds[1]); + return ret; +} + +int io_pipe(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe); + struct file *files[2]; + int ret; + + ret = create_pipe_files(files, p->flags); + if (ret) + return ret; + files[0]->f_mode |= FMODE_NOWAIT; + files[1]->f_mode |= FMODE_NOWAIT; + + if (!!p->file_slot) + ret = io_pipe_fixed(req, files, issue_flags); + else + ret = io_pipe_fd(req, files); + + io_req_set_res(req, ret, 0); + if (!ret) + return IOU_COMPLETE; + + req_set_fail(req); + if (files[0]) + fput(files[0]); + if (files[1]) + fput(files[1]); + return ret; } diff --git a/io_uring/openclose.h b/io_uring/openclose.h index 8a93c98ad0ad..4ca2a9935abc 100644 --- a/io_uring/openclose.h +++ b/io_uring/openclose.h @@ -13,5 +13,8 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags); int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_close(struct io_kiocb *req, unsigned int issue_flags); +int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_pipe(struct io_kiocb *req, unsigned int issue_flags); + int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags); diff --git a/io_uring/poll.c b/io_uring/poll.c index 8eb744eb9f4c..0526062e2f81 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -893,7 +893,7 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); if (ret > 0) { io_req_set_res(req, ipt.result_mask, 0); - return IOU_OK; + return IOU_COMPLETE; } return ret ?: IOU_ISSUE_SKIP_COMPLETE; } @@ -948,5 +948,5 @@ out: } /* complete update request, we're done with it */ io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index f80a77c4973f..c592ceace97d 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -80,10 +80,21 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) return 0; } -int io_buffer_validate(struct iovec *iov) +int io_validate_user_buf_range(u64 uaddr, u64 ulen) { - unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1); + unsigned long tmp, base = (unsigned long)uaddr; + unsigned long acct_len = (unsigned long)PAGE_ALIGN(ulen); + /* arbitrary limit, but we need something */ + if (ulen > SZ_1G || !ulen) + return -EFAULT; + if (check_add_overflow(base, acct_len, &tmp)) + return -EOVERFLOW; + return 0; +} + +static int io_buffer_validate(struct iovec *iov) +{ /* * Don't impose further limits on the size and buffer * constraints here, we'll -EINVAL later when IO is @@ -91,17 +102,9 @@ int io_buffer_validate(struct iovec *iov) */ if (!iov->iov_base) return iov->iov_len ? -EFAULT : 0; - if (!iov->iov_len) - return -EFAULT; - - /* arbitrary limit, but we need something */ - if (iov->iov_len > SZ_1G) - return -EFAULT; - if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp)) - return -EOVERFLOW; - - return 0; + return io_validate_user_buf_range((unsigned long)iov->iov_base, + iov->iov_len); } static void io_release_ubuf(void *priv) @@ -497,7 +500,7 @@ int io_files_update(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node) @@ -685,38 +688,34 @@ static bool io_coalesce_buffer(struct page ***pages, int *nr_pages, struct io_imu_folio_data *data) { struct page **page_array = *pages, **new_array = NULL; - int nr_pages_left = *nr_pages, i, j; - int nr_folios = data->nr_folios; + unsigned nr_pages_left = *nr_pages; + unsigned nr_folios = data->nr_folios; + unsigned i, j; /* Store head pages only*/ - new_array = kvmalloc_array(nr_folios, sizeof(struct page *), - GFP_KERNEL); + new_array = kvmalloc_array(nr_folios, sizeof(struct page *), GFP_KERNEL); if (!new_array) return false; - new_array[0] = compound_head(page_array[0]); - /* - * The pages are bound to the folio, it doesn't - * actually unpin them but drops all but one reference, - * which is usually put down by io_buffer_unmap(). - * Note, needs a better helper. - */ - if (data->nr_pages_head > 1) - unpin_user_pages(&page_array[1], data->nr_pages_head - 1); - - j = data->nr_pages_head; - nr_pages_left -= data->nr_pages_head; - for (i = 1; i < nr_folios; i++) { - unsigned int nr_unpin; - - new_array[i] = page_array[j]; - nr_unpin = min_t(unsigned int, nr_pages_left - 1, - data->nr_pages_mid - 1); - if (nr_unpin) - unpin_user_pages(&page_array[j+1], nr_unpin); - j += data->nr_pages_mid; - nr_pages_left -= data->nr_pages_mid; + for (i = 0, j = 0; i < nr_folios; i++) { + struct page *p = compound_head(page_array[j]); + struct folio *folio = page_folio(p); + unsigned int nr; + + WARN_ON_ONCE(i > 0 && p != page_array[j]); + + nr = i ? data->nr_pages_mid : data->nr_pages_head; + nr = min(nr, nr_pages_left); + /* Drop all but one ref, the entire folio will remain pinned. */ + if (nr > 1) + unpin_user_folio(folio, nr - 1); + j += nr; + nr_pages_left -= nr; + new_array[i] = p; } + + WARN_ON_ONCE(j != *nr_pages); + kvfree(page_array); *pages = new_array; *nr_pages = nr_folios; @@ -1062,8 +1061,6 @@ static int io_import_fixed(int ddir, struct iov_iter *iter, size_t offset; int ret; - if (WARN_ON_ONCE(!imu)) - return -EFAULT; ret = validate_fixed_range(buf_addr, len, imu); if (unlikely(ret)) return ret; @@ -1110,13 +1107,19 @@ inline struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req, if (req->flags & REQ_F_BUF_NODE) return req->buf_node; + req->flags |= REQ_F_BUF_NODE; io_ring_submit_lock(ctx, issue_flags); node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index); - if (node) - io_req_assign_buf_node(req, node); + if (node) { + node->refs++; + req->buf_node = node; + io_ring_submit_unlock(ctx, issue_flags); + return node; + } + req->flags &= ~REQ_F_BUF_NODE; io_ring_submit_unlock(ctx, issue_flags); - return node; + return NULL; } int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter, diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index b52242852ff3..0d2138f16322 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -83,7 +83,7 @@ int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, unsigned size, unsigned type); int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, unsigned int size, unsigned int type); -int io_buffer_validate(struct iovec *iov); +int io_validate_user_buf_range(u64 uaddr, u64 ulen); bool io_check_coalesce_buffer(struct page **page_array, int nr_pages, struct io_imu_folio_data *data); @@ -115,32 +115,6 @@ static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx, return true; } -static inline void io_req_put_rsrc_nodes(struct io_kiocb *req) -{ - if (req->file_node) { - io_put_rsrc_node(req->ctx, req->file_node); - req->file_node = NULL; - } - if (req->flags & REQ_F_BUF_NODE) { - io_put_rsrc_node(req->ctx, req->buf_node); - req->buf_node = NULL; - } -} - -static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node, - struct io_rsrc_node *node) -{ - node->refs++; - *dst_node = node; -} - -static inline void io_req_assign_buf_node(struct io_kiocb *req, - struct io_rsrc_node *node) -{ - io_req_assign_rsrc_node(&req->buf_node, node); - req->flags |= REQ_F_BUF_NODE; -} - int io_files_update(struct io_kiocb *req, unsigned int issue_flags); int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); diff --git a/io_uring/rw.c b/io_uring/rw.c index 039e063f7091..710d8cd53ebb 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -119,7 +119,7 @@ static int __io_import_rw_buffer(int ddir, struct io_kiocb *req, return io_import_vec(ddir, req, io, buf, sqe_len); if (io_do_buffer_select(req)) { - buf = io_buffer_select(req, &sqe_len, issue_flags); + buf = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags); if (!buf) return -ENOBUFS; rw->addr = (unsigned long) buf; @@ -253,16 +253,19 @@ static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, int ddir) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); + struct io_async_rw *io; unsigned ioprio; u64 attr_type_mask; int ret; if (io_rw_alloc_async(req)) return -ENOMEM; + io = req->async_data; rw->kiocb.ki_pos = READ_ONCE(sqe->off); /* used for fixed read/write too - just read unconditionally */ req->buf_index = READ_ONCE(sqe->buf_index); + io->buf_group = req->buf_index; ioprio = READ_ONCE(sqe->ioprio); if (ioprio) { @@ -276,6 +279,7 @@ static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, } rw->kiocb.dio_complete = NULL; rw->kiocb.ki_flags = 0; + rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream); if (req->ctx->flags & IORING_SETUP_IOPOLL) rw->kiocb.ki_complete = io_complete_rw_iopoll; @@ -657,7 +661,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, io_req_io_end(req); io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags)); io_req_rw_cleanup(req, issue_flags); - return IOU_OK; + return IOU_COMPLETE; } else { io_rw_done(req, ret); } diff --git a/io_uring/rw.h b/io_uring/rw.h index 81d6d9a8cf69..129a53fe5482 100644 --- a/io_uring/rw.h +++ b/io_uring/rw.h @@ -16,6 +16,8 @@ struct io_async_rw { struct iov_iter iter; struct iov_iter_state iter_state; struct iovec fast_iov; + unsigned buf_group; + /* * wpq is for buffered io, while meta fields are used with * direct io diff --git a/io_uring/splice.c b/io_uring/splice.c index 7b89bd84d486..35ce4e60b495 100644 --- a/io_uring/splice.c +++ b/io_uring/splice.c @@ -103,7 +103,7 @@ done: if (ret != sp->len) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -144,5 +144,5 @@ done: if (ret != sp->len) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/statx.c b/io_uring/statx.c index 6bc4651700a2..5111e9befbfe 100644 --- a/io_uring/statx.c +++ b/io_uring/statx.c @@ -59,7 +59,7 @@ int io_statx(struct io_kiocb *req, unsigned int issue_flags) ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } void io_statx_cleanup(struct io_kiocb *req) diff --git a/io_uring/sync.c b/io_uring/sync.c index 255f68c37e55..cea2d381ffd2 100644 --- a/io_uring/sync.c +++ b/io_uring/sync.c @@ -47,7 +47,7 @@ int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -79,7 +79,7 @@ int io_fsync(struct io_kiocb *req, unsigned int issue_flags) ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, sync->flags & IORING_FSYNC_DATASYNC); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -108,5 +108,5 @@ int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) if (ret >= 0) fsnotify_modify(req->file); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/tctx.c b/io_uring/tctx.c index adc6e42c14df..5b66755579c0 100644 --- a/io_uring/tctx.c +++ b/io_uring/tctx.c @@ -35,8 +35,6 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, data.hash = hash; data.task = task; - data.free_work = io_wq_free_work; - data.do_work = io_wq_submit_work; /* Do QD, or 4 * CPUS, whatever is smallest */ concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 2a107665230b..7f13bfa9f2b6 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -35,6 +35,9 @@ struct io_timeout_rem { bool ltimeout; }; +static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, + struct io_kiocb *link); + static inline bool io_is_timeout_noseq(struct io_kiocb *req) { struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); @@ -218,7 +221,9 @@ void io_disarm_next(struct io_kiocb *req) struct io_ring_ctx *ctx = req->ctx; raw_spin_lock_irq(&ctx->timeout_lock); - link = io_disarm_linked_timeout(req); + if (req->link && req->link->opcode == IORING_OP_LINK_TIMEOUT) + link = __io_disarm_linked_timeout(req, req->link); + raw_spin_unlock_irq(&ctx->timeout_lock); if (link) io_req_queue_tw_complete(link, -ECANCELED); @@ -228,8 +233,8 @@ void io_disarm_next(struct io_kiocb *req) io_fail_links(req); } -struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, - struct io_kiocb *link) +static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, + struct io_kiocb *link) __must_hold(&req->ctx->completion_lock) __must_hold(&req->ctx->timeout_lock) { @@ -500,7 +505,7 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } static int __io_timeout_prep(struct io_kiocb *req, diff --git a/io_uring/timeout.h b/io_uring/timeout.h index e91b32448dcf..2b7c9ad72992 100644 --- a/io_uring/timeout.h +++ b/io_uring/timeout.h @@ -8,19 +8,6 @@ struct io_timeout_data { u32 flags; }; -struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, - struct io_kiocb *link); - -static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req) -{ - struct io_kiocb *link = req->link; - - if (link && link->opcode == IORING_OP_LINK_TIMEOUT) - return __io_disarm_linked_timeout(req, link); - - return NULL; -} - __cold void io_flush_timeouts(struct io_ring_ctx *ctx); struct io_cancel_data; int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd); diff --git a/io_uring/truncate.c b/io_uring/truncate.c index 62ee73d34d72..487baf23b44e 100644 --- a/io_uring/truncate.c +++ b/io_uring/truncate.c @@ -44,5 +44,5 @@ int io_ftruncate(struct io_kiocb *req, unsigned int issue_flags) ret = do_ftruncate(req->file, ft->len, 1); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 430ed620ddfe..929cad6ee326 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -3,13 +3,10 @@ #include <linux/errno.h> #include <linux/file.h> #include <linux/io_uring/cmd.h> -#include <linux/io_uring/net.h> #include <linux/security.h> #include <linux/nospec.h> -#include <net/sock.h> #include <uapi/linux/io_uring.h> -#include <asm/ioctls.h> #include "io_uring.h" #include "alloc_cache.h" @@ -268,7 +265,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) req_set_fail(req); io_req_uring_cleanup(req, issue_flags); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, @@ -278,6 +275,9 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, { struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); + if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED))) + return -EINVAL; + return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags); } EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed); @@ -292,6 +292,9 @@ int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, struct io_async_cmd *ac = req->async_data; int ret; + if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED))) + return -EINVAL; + ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs); if (ret) return ret; @@ -307,83 +310,3 @@ void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) io_req_queue_iowq(req); } - -static inline int io_uring_cmd_getsockopt(struct socket *sock, - struct io_uring_cmd *cmd, - unsigned int issue_flags) -{ - const struct io_uring_sqe *sqe = cmd->sqe; - bool compat = !!(issue_flags & IO_URING_F_COMPAT); - int optlen, optname, level, err; - void __user *optval; - - level = READ_ONCE(sqe->level); - if (level != SOL_SOCKET) - return -EOPNOTSUPP; - - optval = u64_to_user_ptr(READ_ONCE(sqe->optval)); - optname = READ_ONCE(sqe->optname); - optlen = READ_ONCE(sqe->optlen); - - err = do_sock_getsockopt(sock, compat, level, optname, - USER_SOCKPTR(optval), - KERNEL_SOCKPTR(&optlen)); - if (err) - return err; - - /* On success, return optlen */ - return optlen; -} - -static inline int io_uring_cmd_setsockopt(struct socket *sock, - struct io_uring_cmd *cmd, - unsigned int issue_flags) -{ - const struct io_uring_sqe *sqe = cmd->sqe; - bool compat = !!(issue_flags & IO_URING_F_COMPAT); - int optname, optlen, level; - void __user *optval; - sockptr_t optval_s; - - optval = u64_to_user_ptr(READ_ONCE(sqe->optval)); - optname = READ_ONCE(sqe->optname); - optlen = READ_ONCE(sqe->optlen); - level = READ_ONCE(sqe->level); - optval_s = USER_SOCKPTR(optval); - - return do_sock_setsockopt(sock, compat, level, optname, optval_s, - optlen); -} - -#if defined(CONFIG_NET) -int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) -{ - struct socket *sock = cmd->file->private_data; - struct sock *sk = sock->sk; - struct proto *prot = READ_ONCE(sk->sk_prot); - int ret, arg = 0; - - if (!prot || !prot->ioctl) - return -EOPNOTSUPP; - - switch (cmd->cmd_op) { - case SOCKET_URING_OP_SIOCINQ: - ret = prot->ioctl(sk, SIOCINQ, &arg); - if (ret) - return ret; - return arg; - case SOCKET_URING_OP_SIOCOUTQ: - ret = prot->ioctl(sk, SIOCOUTQ, &arg); - if (ret) - return ret; - return arg; - case SOCKET_URING_OP_GETSOCKOPT: - return io_uring_cmd_getsockopt(sock, cmd, issue_flags); - case SOCKET_URING_OP_SETSOCKOPT: - return io_uring_cmd_setsockopt(sock, cmd, issue_flags); - default: - return -EOPNOTSUPP; - } -} -EXPORT_SYMBOL_GPL(io_uring_cmd_sock); -#endif diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h index b04686b6b5d2..e6a5142c890e 100644 --- a/io_uring/uring_cmd.h +++ b/io_uring/uring_cmd.h @@ -17,9 +17,3 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, struct io_uring_task *tctx, bool cancel_all); void io_cmd_cache_free(const void *entry); - -int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, - const struct iovec __user *uvec, - size_t uvec_segs, - int ddir, struct iov_iter *iter, - unsigned issue_flags); diff --git a/io_uring/waitid.c b/io_uring/waitid.c index 54e69984cd8a..e07a94694397 100644 --- a/io_uring/waitid.c +++ b/io_uring/waitid.c @@ -323,5 +323,5 @@ done: if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/xattr.c b/io_uring/xattr.c index de5064fcae8a..322b94ff9e4b 100644 --- a/io_uring/xattr.c +++ b/io_uring/xattr.c @@ -109,7 +109,7 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) ret = file_getxattr(req->file, &ix->ctx); io_xattr_finish(req, ret); - return IOU_OK; + return IOU_COMPLETE; } int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) @@ -122,7 +122,7 @@ int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) ret = filename_getxattr(AT_FDCWD, ix->filename, LOOKUP_FOLLOW, &ix->ctx); ix->filename = NULL; io_xattr_finish(req, ret); - return IOU_OK; + return IOU_COMPLETE; } static int __io_setxattr_prep(struct io_kiocb *req, @@ -190,7 +190,7 @@ int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags) ret = file_setxattr(req->file, &ix->ctx); io_xattr_finish(req, ret); - return IOU_OK; + return IOU_COMPLETE; } int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) @@ -203,5 +203,5 @@ int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) ret = filename_setxattr(AT_FDCWD, ix->filename, LOOKUP_FOLLOW, &ix->ctx); ix->filename = NULL; io_xattr_finish(req, ret); - return IOU_OK; + return IOU_COMPLETE; } diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index fe86606b9f30..9a568d049204 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -26,29 +26,207 @@ #include "zcrx.h" #include "rsrc.h" +#define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp) { return pp->mp_priv; } -#define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov) +{ + struct net_iov_area *owner = net_iov_owner(niov); -static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, - struct io_zcrx_area *area, int nr_mapped) + return container_of(owner, struct io_zcrx_area, nia); +} + +static inline struct page *io_zcrx_iov_page(const struct net_iov *niov) +{ + struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); + + return area->mem.pages[net_iov_idx(niov)]; +} + +static void io_release_dmabuf(struct io_zcrx_mem *mem) +{ + if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) + return; + + if (mem->sgt) + dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt, + DMA_FROM_DEVICE); + if (mem->attach) + dma_buf_detach(mem->dmabuf, mem->attach); + if (mem->dmabuf) + dma_buf_put(mem->dmabuf); + + mem->sgt = NULL; + mem->attach = NULL; + mem->dmabuf = NULL; +} + +static int io_import_dmabuf(struct io_zcrx_ifq *ifq, + struct io_zcrx_mem *mem, + struct io_uring_zcrx_area_reg *area_reg) +{ + unsigned long off = (unsigned long)area_reg->addr; + unsigned long len = (unsigned long)area_reg->len; + unsigned long total_size = 0; + struct scatterlist *sg; + int dmabuf_fd = area_reg->dmabuf_fd; + int i, ret; + + if (WARN_ON_ONCE(!ifq->dev)) + return -EFAULT; + if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) + return -EINVAL; + + mem->is_dmabuf = true; + mem->dmabuf = dma_buf_get(dmabuf_fd); + if (IS_ERR(mem->dmabuf)) { + ret = PTR_ERR(mem->dmabuf); + mem->dmabuf = NULL; + goto err; + } + + mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev); + if (IS_ERR(mem->attach)) { + ret = PTR_ERR(mem->attach); + mem->attach = NULL; + goto err; + } + + mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE); + if (IS_ERR(mem->sgt)) { + ret = PTR_ERR(mem->sgt); + mem->sgt = NULL; + goto err; + } + + for_each_sgtable_dma_sg(mem->sgt, sg, i) + total_size += sg_dma_len(sg); + + if (total_size < off + len) + return -EINVAL; + + mem->dmabuf_offset = off; + mem->size = len; + return 0; +err: + io_release_dmabuf(mem); + return ret; +} + +static int io_zcrx_map_area_dmabuf(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) +{ + unsigned long off = area->mem.dmabuf_offset; + struct scatterlist *sg; + unsigned i, niov_idx = 0; + + if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER)) + return -EINVAL; + + for_each_sgtable_dma_sg(area->mem.sgt, sg, i) { + dma_addr_t dma = sg_dma_address(sg); + unsigned long sg_len = sg_dma_len(sg); + unsigned long sg_off = min(sg_len, off); + + off -= sg_off; + sg_len -= sg_off; + dma += sg_off; + + while (sg_len && niov_idx < area->nia.num_niovs) { + struct net_iov *niov = &area->nia.niovs[niov_idx]; + + if (net_mp_niov_set_dma_addr(niov, dma)) + return 0; + sg_len -= PAGE_SIZE; + dma += PAGE_SIZE; + niov_idx++; + } + } + return niov_idx; +} + +static int io_import_umem(struct io_zcrx_ifq *ifq, + struct io_zcrx_mem *mem, + struct io_uring_zcrx_area_reg *area_reg) +{ + struct page **pages; + int nr_pages; + + if (area_reg->dmabuf_fd) + return -EINVAL; + if (!area_reg->addr) + return -EFAULT; + pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len, + &nr_pages); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + mem->pages = pages; + mem->nr_folios = nr_pages; + mem->size = area_reg->len; + return 0; +} + +static void io_release_area_mem(struct io_zcrx_mem *mem) +{ + if (mem->is_dmabuf) { + io_release_dmabuf(mem); + return; + } + if (mem->pages) { + unpin_user_pages(mem->pages, mem->nr_folios); + kvfree(mem->pages); + } +} + +static int io_import_area(struct io_zcrx_ifq *ifq, + struct io_zcrx_mem *mem, + struct io_uring_zcrx_area_reg *area_reg) +{ + int ret; + + ret = io_validate_user_buf_range(area_reg->addr, area_reg->len); + if (ret) + return ret; + if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK) + return -EINVAL; + + if (area_reg->flags & IORING_ZCRX_AREA_DMABUF) + return io_import_dmabuf(ifq, mem, area_reg); + return io_import_umem(ifq, mem, area_reg); +} + +static void io_zcrx_unmap_umem(struct io_zcrx_ifq *ifq, + struct io_zcrx_area *area, int nr_mapped) { int i; for (i = 0; i < nr_mapped; i++) { - struct net_iov *niov = &area->nia.niovs[i]; - dma_addr_t dma; + netmem_ref netmem = net_iov_to_netmem(&area->nia.niovs[i]); + dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem); - dma = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov)); dma_unmap_page_attrs(ifq->dev, dma, PAGE_SIZE, DMA_FROM_DEVICE, IO_DMA_ATTR); - net_mp_niov_set_dma_addr(niov, 0); } } +static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, + struct io_zcrx_area *area, int nr_mapped) +{ + int i; + + if (area->mem.is_dmabuf) + io_release_dmabuf(&area->mem); + else + io_zcrx_unmap_umem(ifq, area, nr_mapped); + + for (i = 0; i < area->nia.num_niovs; i++) + net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0); +} + static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) { guard(mutex)(&ifq->dma_lock); @@ -58,20 +236,16 @@ static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *are area->is_mapped = false; } -static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) +static int io_zcrx_map_area_umem(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) { int i; - guard(mutex)(&ifq->dma_lock); - if (area->is_mapped) - return 0; - for (i = 0; i < area->nia.num_niovs; i++) { struct net_iov *niov = &area->nia.niovs[i]; dma_addr_t dma; - dma = dma_map_page_attrs(ifq->dev, area->pages[i], 0, PAGE_SIZE, - DMA_FROM_DEVICE, IO_DMA_ATTR); + dma = dma_map_page_attrs(ifq->dev, area->mem.pages[i], 0, + PAGE_SIZE, DMA_FROM_DEVICE, IO_DMA_ATTR); if (dma_mapping_error(ifq->dev, dma)) break; if (net_mp_niov_set_dma_addr(niov, dma)) { @@ -80,9 +254,24 @@ static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) break; } } + return i; +} + +static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) +{ + unsigned nr; + + guard(mutex)(&ifq->dma_lock); + if (area->is_mapped) + return 0; + + if (area->mem.is_dmabuf) + nr = io_zcrx_map_area_dmabuf(ifq, area); + else + nr = io_zcrx_map_area_umem(ifq, area); - if (i != area->nia.num_niovs) { - __io_zcrx_unmap_area(ifq, area, i); + if (nr != area->nia.num_niovs) { + __io_zcrx_unmap_area(ifq, area, nr); return -EINVAL; } @@ -118,13 +307,6 @@ struct io_zcrx_args { static const struct memory_provider_ops io_uring_pp_zc_ops; -static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov) -{ - struct net_iov_area *owner = net_iov_owner(niov); - - return container_of(owner, struct io_zcrx_area, nia); -} - static inline atomic_t *io_get_user_counter(struct net_iov *niov) { struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); @@ -147,17 +329,12 @@ static void io_zcrx_get_niov_uref(struct net_iov *niov) atomic_inc(io_get_user_counter(niov)); } -static inline struct page *io_zcrx_iov_page(const struct net_iov *niov) -{ - struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); - - return area->pages[net_iov_idx(niov)]; -} - static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq, struct io_uring_zcrx_ifq_reg *reg, - struct io_uring_region_desc *rd) + struct io_uring_region_desc *rd, + u32 id) { + u64 mmap_offset; size_t off, size; void *ptr; int ret; @@ -167,12 +344,14 @@ static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq, if (size > rd->size) return -EINVAL; - ret = io_create_region_mmap_safe(ifq->ctx, &ifq->ctx->zcrx_region, rd, - IORING_MAP_OFF_ZCRX_REGION); + mmap_offset = IORING_MAP_OFF_ZCRX_REGION; + mmap_offset += id << IORING_OFF_PBUF_SHIFT; + + ret = io_create_region(ifq->ctx, &ifq->region, rd, mmap_offset); if (ret < 0) return ret; - ptr = io_region_get_ptr(&ifq->ctx->zcrx_region); + ptr = io_region_get_ptr(&ifq->region); ifq->rq_ring = (struct io_uring *)ptr; ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off); return 0; @@ -180,7 +359,7 @@ static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq, static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq) { - io_free_region(ifq->ctx, &ifq->ctx->zcrx_region); + io_free_region(ifq->ctx, &ifq->region); ifq->rq_ring = NULL; ifq->rqes = NULL; } @@ -188,53 +367,44 @@ static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq) static void io_zcrx_free_area(struct io_zcrx_area *area) { io_zcrx_unmap_area(area->ifq, area); + io_release_area_mem(&area->mem); kvfree(area->freelist); kvfree(area->nia.niovs); kvfree(area->user_refs); - if (area->pages) { - unpin_user_pages(area->pages, area->nr_folios); - kvfree(area->pages); - } kfree(area); } +#define IO_ZCRX_AREA_SUPPORTED_FLAGS (IORING_ZCRX_AREA_DMABUF) + static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area **res, struct io_uring_zcrx_area_reg *area_reg) { struct io_zcrx_area *area; - int i, ret, nr_pages, nr_iovs; - struct iovec iov; + unsigned nr_iovs; + int i, ret; - if (area_reg->flags || area_reg->rq_area_token) + if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS) return -EINVAL; - if (area_reg->__resv1 || area_reg->__resv2[0] || area_reg->__resv2[1]) + if (area_reg->rq_area_token) return -EINVAL; - if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK) + if (area_reg->__resv2[0] || area_reg->__resv2[1]) return -EINVAL; - iov.iov_base = u64_to_user_ptr(area_reg->addr); - iov.iov_len = area_reg->len; - ret = io_buffer_validate(&iov); - if (ret) - return ret; - ret = -ENOMEM; area = kzalloc(sizeof(*area), GFP_KERNEL); if (!area) goto err; - area->pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len, - &nr_pages); - if (IS_ERR(area->pages)) { - ret = PTR_ERR(area->pages); - area->pages = NULL; + ret = io_import_area(ifq, &area->mem, area_reg); + if (ret) goto err; - } - area->nr_folios = nr_iovs = nr_pages; + + nr_iovs = area->mem.size >> PAGE_SHIFT; area->nia.num_niovs = nr_iovs; + ret = -ENOMEM; area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]), GFP_KERNEL | __GFP_ZERO); if (!area->nia.niovs) @@ -245,9 +415,6 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, if (!area->freelist) goto err; - for (i = 0; i < nr_iovs; i++) - area->freelist[i] = i; - area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]), GFP_KERNEL | __GFP_ZERO); if (!area->user_refs) @@ -341,6 +508,16 @@ static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq) kfree(ifq); } +struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx, + unsigned int id) +{ + struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id); + + lockdep_assert_held(&ctx->mmap_lock); + + return ifq ? &ifq->region : NULL; +} + int io_register_zcrx_ifq(struct io_ring_ctx *ctx, struct io_uring_zcrx_ifq_reg __user *arg) { @@ -350,6 +527,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, struct io_uring_region_desc rd; struct io_zcrx_ifq *ifq; int ret; + u32 id; /* * 1. Interface queue allocation. @@ -362,8 +540,6 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN && ctx->flags & IORING_SETUP_CQE32)) return -EINVAL; - if (ctx->ifq) - return -EBUSY; if (copy_from_user(®, arg, sizeof(reg))) return -EFAULT; if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd))) @@ -386,29 +562,37 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, ifq = io_zcrx_ifq_alloc(ctx); if (!ifq) return -ENOMEM; + ifq->rq_entries = reg.rq_entries; - ret = io_allocate_rbuf_ring(ifq, ®, &rd); - if (ret) - goto err; + scoped_guard(mutex, &ctx->mmap_lock) { + /* preallocate id */ + ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL); + if (ret) + goto ifq_free; + } - ret = io_zcrx_create_area(ifq, &ifq->area, &area); + ret = io_allocate_rbuf_ring(ifq, ®, &rd, id); if (ret) goto err; - ifq->rq_entries = reg.rq_entries; - - ret = -ENODEV; ifq->netdev = netdev_get_by_index(current->nsproxy->net_ns, reg.if_idx, &ifq->netdev_tracker, GFP_KERNEL); - if (!ifq->netdev) + if (!ifq->netdev) { + ret = -ENODEV; goto err; + } ifq->dev = ifq->netdev->dev.parent; - ret = -EOPNOTSUPP; - if (!ifq->dev) + if (!ifq->dev) { + ret = -EOPNOTSUPP; goto err; + } get_device(ifq->dev); + ret = io_zcrx_create_area(ifq, &ifq->area, &area); + if (ret) + goto err; + mp_param.mp_ops = &io_uring_pp_zc_ops; mp_param.mp_priv = ifq; ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param); @@ -419,6 +603,14 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, reg.offsets.rqes = sizeof(struct io_uring); reg.offsets.head = offsetof(struct io_uring, head); reg.offsets.tail = offsetof(struct io_uring, tail); + reg.zcrx_id = id; + + scoped_guard(mutex, &ctx->mmap_lock) { + /* publish ifq */ + ret = -ENOMEM; + if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL)) + goto err; + } if (copy_to_user(arg, ®, sizeof(reg)) || copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) || @@ -426,24 +618,34 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, ret = -EFAULT; goto err; } - ctx->ifq = ifq; return 0; err: + scoped_guard(mutex, &ctx->mmap_lock) + xa_erase(&ctx->zcrx_ctxs, id); +ifq_free: io_zcrx_ifq_free(ifq); return ret; } void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx) { - struct io_zcrx_ifq *ifq = ctx->ifq; + struct io_zcrx_ifq *ifq; + unsigned long id; lockdep_assert_held(&ctx->uring_lock); - if (!ifq) - return; + while (1) { + scoped_guard(mutex, &ctx->mmap_lock) { + ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT); + if (ifq) + xa_erase(&ctx->zcrx_ctxs, id); + } + if (!ifq) + break; + io_zcrx_ifq_free(ifq); + } - ctx->ifq = NULL; - io_zcrx_ifq_free(ifq); + xa_destroy(&ctx->zcrx_ctxs); } static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area) @@ -500,12 +702,15 @@ static void io_zcrx_scrub(struct io_zcrx_ifq *ifq) void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx) { + struct io_zcrx_ifq *ifq; + unsigned long index; + lockdep_assert_held(&ctx->uring_lock); - if (!ctx->ifq) - return; - io_zcrx_scrub(ctx->ifq); - io_close_queue(ctx->ifq); + xa_for_each(&ctx->zcrx_ctxs, index, ifq) { + io_zcrx_scrub(ifq); + io_close_queue(ifq); + } } static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq) @@ -742,6 +947,9 @@ static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq, size_t copied = 0; int ret = 0; + if (area->mem.is_dmabuf) + return -EFAULT; + while (len) { size_t copy_size = min_t(size_t, PAGE_SIZE, len); const int dst_off = 0; diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h index f2bc811f022c..2f5e26389f22 100644 --- a/io_uring/zcrx.h +++ b/io_uring/zcrx.h @@ -3,10 +3,24 @@ #define IOU_ZC_RX_H #include <linux/io_uring_types.h> +#include <linux/dma-buf.h> #include <linux/socket.h> #include <net/page_pool/types.h> #include <net/net_trackers.h> +struct io_zcrx_mem { + unsigned long size; + bool is_dmabuf; + + struct page **pages; + unsigned long nr_folios; + + struct dma_buf_attachment *attach; + struct dma_buf *dmabuf; + struct sg_table *sgt; + unsigned long dmabuf_offset; +}; + struct io_zcrx_area { struct net_iov_area nia; struct io_zcrx_ifq *ifq; @@ -14,13 +28,13 @@ struct io_zcrx_area { bool is_mapped; u16 area_id; - struct page **pages; - unsigned long nr_folios; /* freelist */ spinlock_t freelist_lock ____cacheline_aligned_in_smp; u32 free_count; u32 *freelist; + + struct io_zcrx_mem mem; }; struct io_zcrx_ifq { @@ -39,6 +53,7 @@ struct io_zcrx_ifq { netdevice_tracker netdev_tracker; spinlock_t lock; struct mutex dma_lock; + struct io_mapped_region region; }; #if defined(CONFIG_IO_URING_ZCRX) @@ -49,6 +64,8 @@ void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx); int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq, struct socket *sock, unsigned int flags, unsigned issue_flags, unsigned int *len); +struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx, + unsigned int id); #else static inline int io_register_zcrx_ifq(struct io_ring_ctx *ctx, struct io_uring_zcrx_ifq_reg __user *arg) @@ -67,6 +84,11 @@ static inline int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq, { return -EOPNOTSUPP; } +static inline struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx, + unsigned int id) +{ + return NULL; +} #endif int io_recvzc(struct io_kiocb *req, unsigned int issue_flags); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 35b4f8659904..82ed2d3c9846 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -913,7 +913,7 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, ro = mnt_want_write(mnt); /* we'll drop it in any case */ inode_lock(d_inode(root)); - path.dentry = lookup_one_len(name->name, root, strlen(name->name)); + path.dentry = lookup_noperm(&QSTR(name->name), root); if (IS_ERR(path.dentry)) { error = PTR_ERR(path.dentry); goto out_putfd; @@ -969,8 +969,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) if (err) goto out_name; inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); - dentry = lookup_one_len(name->name, mnt->mnt_root, - strlen(name->name)); + dentry = lookup_noperm(&QSTR(name->name), mnt->mnt_root); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index dc3aa91a6ba0..5c2e96b19392 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -421,7 +421,7 @@ static int bpf_iter_link_pin_kernel(struct dentry *parent, int ret; inode_lock(parent->d_inode); - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) { inode_unlock(parent->d_inode); return PTR_ERR(dentry); diff --git a/kernel/cpu.c b/kernel/cpu.c index b08bb34b1718..a59e009e0be4 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2069,11 +2069,6 @@ static struct cpuhp_step cpuhp_hp_states[] = { .teardown.single = NULL, .cant_stop = true, }, - [CPUHP_PERF_PREPARE] = { - .name = "perf:prepare", - .startup.single = perf_event_init_cpu, - .teardown.single = perf_event_exit_cpu, - }, [CPUHP_RANDOM_PREPARE] = { .name = "random:prepare", .startup.single = random_prepare_cpu, diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 20154572ede9..a8dd1f27417c 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -146,7 +146,7 @@ static inline bool report_single_step(unsigned long work) return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP; } -static void syscall_exit_work(struct pt_regs *regs, unsigned long work) +void syscall_exit_work(struct pt_regs *regs, unsigned long work) { bool step; @@ -173,53 +173,6 @@ static void syscall_exit_work(struct pt_regs *regs, unsigned long work) ptrace_report_syscall_exit(regs, step); } -/* - * Syscall specific exit to user mode preparation. Runs with interrupts - * enabled. - */ -static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) -{ - unsigned long work = READ_ONCE(current_thread_info()->syscall_work); - unsigned long nr = syscall_get_nr(current, regs); - - CT_WARN_ON(ct_state() != CT_STATE_KERNEL); - - if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { - if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) - local_irq_enable(); - } - - rseq_syscall(regs); - - /* - * Do one-time syscall specific work. If these work items are - * enabled, we want to run them exactly once per syscall exit with - * interrupts enabled. - */ - if (unlikely(work & SYSCALL_WORK_EXIT)) - syscall_exit_work(regs, work); -} - -static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs) -{ - syscall_exit_to_user_mode_prepare(regs); - local_irq_disable_exit_to_user(); - exit_to_user_mode_prepare(regs); -} - -void syscall_exit_to_user_mode_work(struct pt_regs *regs) -{ - __syscall_exit_to_user_mode_work(regs); -} - -__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) -{ - instrumentation_begin(); - __syscall_exit_to_user_mode_work(regs); - instrumentation_end(); - exit_to_user_mode(); -} - noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) { enter_from_user_mode(regs); diff --git a/kernel/events/core.c b/kernel/events/core.c index 95e703891b24..f34c99f8ce8f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1270,6 +1270,10 @@ static void put_ctx(struct perf_event_context *ctx) if (ctx->task && ctx->task != TASK_TOMBSTONE) put_task_struct(ctx->task); call_rcu(&ctx->rcu_head, free_ctx); + } else { + smp_mb__after_atomic(); /* pairs with wait_var_event() */ + if (ctx->task == TASK_TOMBSTONE) + wake_up_var(&ctx->refcount); } } @@ -2167,7 +2171,7 @@ static void perf_put_aux_event(struct perf_event *event) * If the event is an aux_event, tear down all links to * it from other events. */ - for_each_sibling_event(iter, event->group_leader) { + for_each_sibling_event(iter, event) { if (iter->aux_event != event) continue; @@ -2325,7 +2329,11 @@ static void perf_child_detach(struct perf_event *event) if (WARN_ON_ONCE(!parent_event)) return; + /* + * Can't check this from an IPI, the holder is likey another CPU. + * lockdep_assert_held(&parent_event->child_mutex); + */ sync_child_event(event); list_del_init(&event->child_list); @@ -2343,6 +2351,11 @@ event_filter_match(struct perf_event *event) perf_cgroup_match(event); } +static inline bool is_event_in_freq_mode(struct perf_event *event) +{ + return event->attr.freq && event->attr.sample_freq; +} + static void event_sched_out(struct perf_event *event, struct perf_event_context *ctx) { @@ -2380,7 +2393,7 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx) if (!is_software_event(event)) cpc->active_oncpu--; - if (event->attr.freq && event->attr.sample_freq) { + if (is_event_in_freq_mode(event)) { ctx->nr_freq--; epc->nr_freq--; } @@ -2450,8 +2463,9 @@ ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) #define DETACH_GROUP 0x01UL #define DETACH_CHILD 0x02UL -#define DETACH_DEAD 0x04UL -#define DETACH_EXIT 0x08UL +#define DETACH_EXIT 0x04UL +#define DETACH_REVOKE 0x08UL +#define DETACH_DEAD 0x10UL /* * Cross CPU call to remove a performance event @@ -2477,12 +2491,15 @@ __perf_remove_from_context(struct perf_event *event, */ if (flags & DETACH_EXIT) state = PERF_EVENT_STATE_EXIT; + if (flags & DETACH_REVOKE) + state = PERF_EVENT_STATE_REVOKED; if (flags & DETACH_DEAD) { event->pending_disable = 1; state = PERF_EVENT_STATE_DEAD; } event_sched_out(event, ctx); perf_event_set_state(event, min(event->state, state)); + if (flags & DETACH_GROUP) perf_group_detach(event); if (flags & DETACH_CHILD) @@ -2628,6 +2645,41 @@ void perf_event_disable_inatomic(struct perf_event *event) static void perf_log_throttle(struct perf_event *event, int enable); static void perf_log_itrace_start(struct perf_event *event); +static void perf_event_unthrottle(struct perf_event *event, bool start) +{ + event->hw.interrupts = 0; + if (start) + event->pmu->start(event, 0); + if (event == event->group_leader) + perf_log_throttle(event, 1); +} + +static void perf_event_throttle(struct perf_event *event) +{ + event->pmu->stop(event, 0); + event->hw.interrupts = MAX_INTERRUPTS; + if (event == event->group_leader) + perf_log_throttle(event, 0); +} + +static void perf_event_unthrottle_group(struct perf_event *event, bool skip_start_event) +{ + struct perf_event *sibling, *leader = event->group_leader; + + perf_event_unthrottle(leader, skip_start_event ? leader != event : true); + for_each_sibling_event(sibling, leader) + perf_event_unthrottle(sibling, skip_start_event ? sibling != event : true); +} + +static void perf_event_throttle_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + + perf_event_throttle(leader); + for_each_sibling_event(sibling, leader) + perf_event_throttle(sibling); +} + static int event_sched_in(struct perf_event *event, struct perf_event_context *ctx) { @@ -2656,10 +2708,8 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx) * ticks already, also for a heavily scheduling task there is little * guarantee it'll get a tick in a timely manner. */ - if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { - perf_log_throttle(event, 1); - event->hw.interrupts = 0; - } + if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) + perf_event_unthrottle(event, false); perf_pmu_disable(event->pmu); @@ -2674,7 +2724,7 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx) if (!is_software_event(event)) cpc->active_oncpu++; - if (event->attr.freq && event->attr.sample_freq) { + if (is_event_in_freq_mode(event)) { ctx->nr_freq++; epc->nr_freq++; } @@ -4237,14 +4287,10 @@ static void perf_adjust_freq_unthr_events(struct list_head *event_list) hwc = &event->hw; - if (hwc->interrupts == MAX_INTERRUPTS) { - hwc->interrupts = 0; - perf_log_throttle(event, 1); - if (!event->attr.freq || !event->attr.sample_freq) - event->pmu->start(event, 0); - } + if (hwc->interrupts == MAX_INTERRUPTS) + perf_event_unthrottle_group(event, is_event_in_freq_mode(event)); - if (!event->attr.freq || !event->attr.sample_freq) + if (!is_event_in_freq_mode(event)) continue; /* @@ -4516,7 +4562,8 @@ out: static void perf_remove_from_owner(struct perf_event *event); static void perf_event_exit_event(struct perf_event *event, - struct perf_event_context *ctx); + struct perf_event_context *ctx, + bool revoke); /* * Removes all events from the current task that have been marked @@ -4543,7 +4590,7 @@ static void perf_event_remove_on_exec(struct perf_event_context *ctx) modified = true; - perf_event_exit_event(event, ctx); + perf_event_exit_event(event, ctx, false); } raw_spin_lock_irqsave(&ctx->lock, flags); @@ -5125,6 +5172,7 @@ static bool is_sb_event(struct perf_event *event) attr->context_switch || attr->text_poke || attr->bpf_event) return true; + return false; } @@ -5521,6 +5569,8 @@ static void perf_free_addr_filters(struct perf_event *event); /* vs perf_event_alloc() error */ static void __free_event(struct perf_event *event) { + struct pmu *pmu = event->pmu; + if (event->attach_state & PERF_ATTACH_CALLCHAIN) put_callchain_buffers(); @@ -5550,6 +5600,7 @@ static void __free_event(struct perf_event *event) * put_pmu_ctx() needs an event->ctx reference, because of * epc->ctx. */ + WARN_ON_ONCE(!pmu); WARN_ON_ONCE(!event->ctx); WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx); put_pmu_ctx(event->pmu_ctx); @@ -5562,8 +5613,13 @@ static void __free_event(struct perf_event *event) if (event->ctx) put_ctx(event->ctx); - if (event->pmu) - module_put(event->pmu->module); + if (pmu) { + module_put(pmu->module); + scoped_guard (spinlock, &pmu->events_lock) { + list_del(&event->pmu_list); + wake_up_var(pmu); + } + } call_rcu(&event->rcu_head, free_event_rcu); } @@ -5600,13 +5656,13 @@ static void _free_event(struct perf_event *event) /* * Used to free events which have a known refcount of 1, such as in error paths - * where the event isn't exposed yet and inherited events. + * of inherited events. */ static void free_event(struct perf_event *event) { if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, - "unexpected event refcount: %ld; ptr=%p\n", - atomic_long_read(&event->refcount), event)) { + "unexpected event refcount: %ld; ptr=%p\n", + atomic_long_read(&event->refcount), event)) { /* leak to avoid use-after-free */ return; } @@ -5689,7 +5745,6 @@ int perf_event_release_kernel(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct perf_event *child, *tmp; - LIST_HEAD(free_list); /* * If we got here through err_alloc: free_event(event); we will not @@ -5718,15 +5773,17 @@ int perf_event_release_kernel(struct perf_event *event) * Thus this guarantees that we will in fact observe and kill _ALL_ * child events. */ - perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); + if (event->state > PERF_EVENT_STATE_REVOKED) { + perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); + } else { + event->state = PERF_EVENT_STATE_DEAD; + } perf_event_ctx_unlock(event, ctx); again: mutex_lock(&event->child_mutex); list_for_each_entry(child, &event->child_list, child_list) { - void *var = NULL; - /* * Cannot change, child events are not migrated, see the * comment with perf_event_ctx_lock_nested(). @@ -5759,44 +5816,24 @@ again: tmp = list_first_entry_or_null(&event->child_list, struct perf_event, child_list); if (tmp == child) { - perf_remove_from_context(child, DETACH_GROUP); - list_move(&child->child_list, &free_list); + perf_remove_from_context(child, DETACH_GROUP | DETACH_CHILD); } else { - var = &ctx->refcount; + child = NULL; } mutex_unlock(&event->child_mutex); mutex_unlock(&ctx->mutex); - put_ctx(ctx); - if (var) { - /* - * If perf_event_free_task() has deleted all events from the - * ctx while the child_mutex got released above, make sure to - * notify about the preceding put_ctx(). - */ - smp_mb(); /* pairs with wait_var_event() */ - wake_up_var(var); + if (child) { + /* Last reference unless ->pending_task work is pending */ + put_event(child); } + put_ctx(ctx); + goto again; } mutex_unlock(&event->child_mutex); - list_for_each_entry_safe(child, tmp, &free_list, child_list) { - void *var = &child->ctx->refcount; - - list_del(&child->child_list); - /* Last reference unless ->pending_task work is pending */ - put_event(child); - - /* - * Wake any perf_event_free_task() waiting for this event to be - * freed. - */ - smp_mb(); /* pairs with wait_var_event() */ - wake_up_var(var); - } - no_ctx: /* * Last reference unless ->pending_task work is pending on this event @@ -6068,8 +6105,14 @@ static __poll_t perf_poll(struct file *file, poll_table *wait) struct perf_buffer *rb; __poll_t events = EPOLLHUP; + if (event->state <= PERF_EVENT_STATE_REVOKED) + return EPOLLERR; + poll_wait(file, &event->waitq, wait); + if (event->state <= PERF_EVENT_STATE_REVOKED) + return EPOLLERR; + if (is_event_hup(event)) return events; @@ -6167,14 +6210,6 @@ static void __perf_event_period(struct perf_event *event, active = (event->state == PERF_EVENT_STATE_ACTIVE); if (active) { perf_pmu_disable(event->pmu); - /* - * We could be throttled; unthrottle now to avoid the tick - * trying to unthrottle while we already re-started the event. - */ - if (event->hw.interrupts == MAX_INTERRUPTS) { - event->hw.interrupts = 0; - perf_log_throttle(event, 1); - } event->pmu->stop(event, PERF_EF_UPDATE); } @@ -6182,6 +6217,14 @@ static void __perf_event_period(struct perf_event *event, if (active) { event->pmu->start(event, PERF_EF_RELOAD); + /* + * Once the period is force-reset, the event starts immediately. + * But the event/group could be throttled. Unthrottle the + * event/group now to avoid the next tick trying to unthrottle + * while we already re-started the event/group. + */ + if (event->hw.interrupts == MAX_INTERRUPTS) + perf_event_unthrottle_group(event, true); perf_pmu_enable(event->pmu); } } @@ -6239,12 +6282,18 @@ static int perf_event_set_output(struct perf_event *event, static int perf_event_set_filter(struct perf_event *event, void __user *arg); static int perf_copy_attr(struct perf_event_attr __user *uattr, struct perf_event_attr *attr); +static int __perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie); static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) { void (*func)(struct perf_event *); u32 flags = arg; + if (event->state <= PERF_EVENT_STATE_REVOKED) + return -ENODEV; + switch (cmd) { case PERF_EVENT_IOC_ENABLE: func = _perf_event_enable; @@ -6301,7 +6350,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon if (IS_ERR(prog)) return PTR_ERR(prog); - err = perf_event_set_bpf_prog(event, prog, 0); + err = __perf_event_set_bpf_prog(event, prog, 0); if (err) { bpf_prog_put(prog); return err; @@ -6620,9 +6669,22 @@ void ring_buffer_put(struct perf_buffer *rb) call_rcu(&rb->rcu_head, rb_free_rcu); } +typedef void (*mapped_f)(struct perf_event *event, struct mm_struct *mm); + +#define get_mapped(event, func) \ +({ struct pmu *pmu; \ + mapped_f f = NULL; \ + guard(rcu)(); \ + pmu = READ_ONCE(event->pmu); \ + if (pmu) \ + f = pmu->func; \ + f; \ +}) + static void perf_mmap_open(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; + mapped_f mapped = get_mapped(event, event_mapped); atomic_inc(&event->mmap_count); atomic_inc(&event->rb->mmap_count); @@ -6630,8 +6692,8 @@ static void perf_mmap_open(struct vm_area_struct *vma) if (vma->vm_pgoff) atomic_inc(&event->rb->aux_mmap_count); - if (event->pmu->event_mapped) - event->pmu->event_mapped(event, vma->vm_mm); + if (mapped) + mapped(event, vma->vm_mm); } static void perf_pmu_output_stop(struct perf_event *event); @@ -6647,14 +6709,16 @@ static void perf_pmu_output_stop(struct perf_event *event); static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; + mapped_f unmapped = get_mapped(event, event_unmapped); struct perf_buffer *rb = ring_buffer_get(event); struct user_struct *mmap_user = rb->mmap_user; int mmap_locked = rb->mmap_locked; unsigned long size = perf_data_size(rb); bool detach_rest = false; - if (event->pmu->event_unmapped) - event->pmu->event_unmapped(event, vma->vm_mm); + /* FIXIES vs perf_pmu_unregister() */ + if (unmapped) + unmapped(event, vma->vm_mm); /* * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex @@ -6847,6 +6911,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) unsigned long nr_pages; long user_extra = 0, extra = 0; int ret, flags = 0; + mapped_f mapped; /* * Don't allow mmap() of inherited per-task counters. This would @@ -6877,6 +6942,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) mutex_lock(&event->mmap_mutex); ret = -EINVAL; + /* + * This relies on __pmu_detach_event() taking mmap_mutex after marking + * the event REVOKED. Either we observe the state, or __pmu_detach_event() + * will detach the rb created here. + */ + if (event->state <= PERF_EVENT_STATE_REVOKED) { + ret = -ENODEV; + goto unlock; + } + if (vma->vm_pgoff == 0) { nr_pages -= 1; @@ -7055,8 +7130,9 @@ aux_unlock: if (!ret) ret = map_range(rb, vma); - if (!ret && event->pmu->event_mapped) - event->pmu->event_mapped(event, vma->vm_mm); + mapped = get_mapped(event, event_mapped); + if (mapped) + mapped(event, vma->vm_mm); return ret; } @@ -7067,6 +7143,9 @@ static int perf_fasync(int fd, struct file *filp, int on) struct perf_event *event = filp->private_data; int retval; + if (event->state <= PERF_EVENT_STATE_REVOKED) + return -ENODEV; + inode_lock(inode); retval = fasync_helper(fd, filp, on, &event->fasync); inode_unlock(inode); @@ -9946,7 +10025,7 @@ void perf_event_text_poke(const void *addr, const void *old_bytes, void perf_event_itrace_started(struct perf_event *event) { - event->attach_state |= PERF_ATTACH_ITRACE; + WRITE_ONCE(event->attach_state, event->attach_state | PERF_ATTACH_ITRACE); } static void perf_log_itrace_start(struct perf_event *event) @@ -10029,14 +10108,13 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle) hwc->interrupts = 1; } else { hwc->interrupts++; - if (unlikely(throttle && - hwc->interrupts > max_samples_per_tick)) { - __this_cpu_inc(perf_throttled_count); - tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); - hwc->interrupts = MAX_INTERRUPTS; - perf_log_throttle(event, 0); - ret = 1; - } + } + + if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) { + __this_cpu_inc(perf_throttled_count); + tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); + perf_event_throttle_group(event); + ret = 1; } if (event->attr.freq) { @@ -11069,11 +11147,15 @@ static inline bool perf_event_is_tracing(struct perf_event *event) return false; } -int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, - u64 bpf_cookie) +static int __perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) { bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp; + if (event->state <= PERF_EVENT_STATE_REVOKED) + return -ENODEV; + if (!perf_event_is_tracing(event)) return perf_event_set_bpf_handler(event, prog, bpf_cookie); @@ -11108,6 +11190,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, return perf_event_attach_bpf_prog(event, prog, bpf_cookie); } +int perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) +{ + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie); + perf_event_ctx_unlock(event, ctx); + + return ret; +} + void perf_event_free_bpf_prog(struct perf_event *event) { if (!event->prog) @@ -11130,7 +11226,15 @@ static void perf_event_free_filter(struct perf_event *event) { } -int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, +static int __perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, + u64 bpf_cookie) +{ + return -ENOENT; +} + +int perf_event_set_bpf_prog(struct perf_event *event, + struct bpf_prog *prog, u64 bpf_cookie) { return -ENOENT; @@ -12235,6 +12339,9 @@ int perf_pmu_register(struct pmu *_pmu, const char *name, int type) if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default; + INIT_LIST_HEAD(&pmu->events); + spin_lock_init(&pmu->events_lock); + /* * Now that the PMU is complete, make it visible to perf_try_init_event(). */ @@ -12248,21 +12355,143 @@ int perf_pmu_register(struct pmu *_pmu, const char *name, int type) } EXPORT_SYMBOL_GPL(perf_pmu_register); -void perf_pmu_unregister(struct pmu *pmu) +static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event, + struct perf_event_context *ctx) +{ + /* + * De-schedule the event and mark it REVOKED. + */ + perf_event_exit_event(event, ctx, true); + + /* + * All _free_event() bits that rely on event->pmu: + * + * Notably, perf_mmap() relies on the ordering here. + */ + scoped_guard (mutex, &event->mmap_mutex) { + WARN_ON_ONCE(pmu->event_unmapped); + /* + * Mostly an empty lock sequence, such that perf_mmap(), which + * relies on mmap_mutex, is sure to observe the state change. + */ + } + + perf_event_free_bpf_prog(event); + perf_free_addr_filters(event); + + if (event->destroy) { + event->destroy(event); + event->destroy = NULL; + } + + if (event->pmu_ctx) { + put_pmu_ctx(event->pmu_ctx); + event->pmu_ctx = NULL; + } + + exclusive_event_destroy(event); + module_put(pmu->module); + + event->pmu = NULL; /* force fault instead of UAF */ +} + +static void pmu_detach_event(struct pmu *pmu, struct perf_event *event) +{ + struct perf_event_context *ctx; + + ctx = perf_event_ctx_lock(event); + __pmu_detach_event(pmu, event, ctx); + perf_event_ctx_unlock(event, ctx); + + scoped_guard (spinlock, &pmu->events_lock) + list_del(&event->pmu_list); +} + +static struct perf_event *pmu_get_event(struct pmu *pmu) +{ + struct perf_event *event; + + guard(spinlock)(&pmu->events_lock); + list_for_each_entry(event, &pmu->events, pmu_list) { + if (atomic_long_inc_not_zero(&event->refcount)) + return event; + } + + return NULL; +} + +static bool pmu_empty(struct pmu *pmu) +{ + guard(spinlock)(&pmu->events_lock); + return list_empty(&pmu->events); +} + +static void pmu_detach_events(struct pmu *pmu) +{ + struct perf_event *event; + + for (;;) { + event = pmu_get_event(pmu); + if (!event) + break; + + pmu_detach_event(pmu, event); + put_event(event); + } + + /* + * wait for pending _free_event()s + */ + wait_var_event(pmu, pmu_empty(pmu)); +} + +int perf_pmu_unregister(struct pmu *pmu) { scoped_guard (mutex, &pmus_lock) { + if (!idr_cmpxchg(&pmu_idr, pmu->type, pmu, NULL)) + return -EINVAL; + list_del_rcu(&pmu->entry); - idr_remove(&pmu_idr, pmu->type); } /* * We dereference the pmu list under both SRCU and regular RCU, so * synchronize against both of those. + * + * Notably, the entirety of event creation, from perf_init_event() + * (which will now fail, because of the above) until + * perf_install_in_context() should be under SRCU such that + * this synchronizes against event creation. This avoids trying to + * detach events that are not fully formed. */ synchronize_srcu(&pmus_srcu); synchronize_rcu(); + if (pmu->event_unmapped && !pmu_empty(pmu)) { + /* + * Can't force remove events when pmu::event_unmapped() + * is used in perf_mmap_close(). + */ + guard(mutex)(&pmus_lock); + idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu); + list_add_rcu(&pmu->entry, &pmus); + return -EBUSY; + } + + scoped_guard (mutex, &pmus_lock) + idr_remove(&pmu_idr, pmu->type); + + /* + * PMU is removed from the pmus list, so no new events will + * be created, now take care of the existing ones. + */ + pmu_detach_events(pmu); + + /* + * PMU is unused, make it go away. + */ perf_pmu_free(pmu); + return 0; } EXPORT_SYMBOL_GPL(perf_pmu_unregister); @@ -12356,7 +12585,7 @@ static struct pmu *perf_init_event(struct perf_event *event) struct pmu *pmu; int type, ret; - guard(srcu)(&pmus_srcu); + guard(srcu)(&pmus_srcu); /* pmu idr/list access */ /* * Save original type before calling pmu->event_init() since certain @@ -12580,6 +12809,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, INIT_LIST_HEAD(&event->active_entry); INIT_LIST_HEAD(&event->addr_filters.list); INIT_HLIST_NODE(&event->hlist_entry); + INIT_LIST_HEAD(&event->pmu_list); init_waitqueue_head(&event->waitq); @@ -12651,7 +12881,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, hwc = &event->hw; hwc->sample_period = attr->sample_period; - if (attr->freq && attr->sample_freq) + if (is_event_in_freq_mode(event)) hwc->sample_period = 1; hwc->last_period = hwc->sample_period; @@ -12758,6 +12988,13 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, /* symmetric to unaccount_event() in _free_event() */ account_event(event); + /* + * Event creation should be under SRCU, see perf_pmu_unregister(). + */ + lockdep_assert_held(&pmus_srcu); + scoped_guard (spinlock, &pmu->events_lock) + list_add(&event->pmu_list, &pmu->events); + return_ptr(event); } @@ -12957,6 +13194,9 @@ set: goto unlock; if (output_event) { + if (output_event->state <= PERF_EVENT_STATE_REVOKED) + goto unlock; + /* get the rb we want to redirect to */ rb = ring_buffer_get(output_event); if (!rb) @@ -13138,6 +13378,11 @@ SYSCALL_DEFINE5(perf_event_open, if (event_fd < 0) return event_fd; + /* + * Event creation should be under SRCU, see perf_pmu_unregister(). + */ + guard(srcu)(&pmus_srcu); + CLASS(fd, group)(group_fd); // group_fd == -1 => empty if (group_fd != -1) { if (!is_perf_file(group)) { @@ -13145,6 +13390,10 @@ SYSCALL_DEFINE5(perf_event_open, goto err_fd; } group_leader = fd_file(group)->private_data; + if (group_leader->state <= PERF_EVENT_STATE_REVOKED) { + err = -ENODEV; + goto err_fd; + } if (flags & PERF_FLAG_FD_OUTPUT) output_event = group_leader; if (flags & PERF_FLAG_FD_NO_GROUP) @@ -13441,7 +13690,7 @@ err_cred: if (task) up_read(&task->signal->exec_update_lock); err_alloc: - free_event(event); + put_event(event); err_task: if (task) put_task_struct(task); @@ -13478,6 +13727,11 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, if (attr->aux_output || attr->aux_action) return ERR_PTR(-EINVAL); + /* + * Event creation should be under SRCU, see perf_pmu_unregister(). + */ + guard(srcu)(&pmus_srcu); + event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler, context, -1); if (IS_ERR(event)) { @@ -13549,7 +13803,7 @@ err_unlock: perf_unpin_context(ctx); put_ctx(ctx); err_alloc: - free_event(event); + put_event(event); err: return ERR_PTR(err); } @@ -13689,10 +13943,12 @@ static void sync_child_event(struct perf_event *child_event) } static void -perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) +perf_event_exit_event(struct perf_event *event, + struct perf_event_context *ctx, bool revoke) { struct perf_event *parent_event = event->parent; - unsigned long detach_flags = 0; + unsigned long detach_flags = DETACH_EXIT; + unsigned int attach_state; if (parent_event) { /* @@ -13707,22 +13963,38 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) * Do destroy all inherited groups, we don't care about those * and being thorough is better. */ - detach_flags = DETACH_GROUP | DETACH_CHILD; + detach_flags |= DETACH_GROUP | DETACH_CHILD; mutex_lock(&parent_event->child_mutex); + /* PERF_ATTACH_ITRACE might be set concurrently */ + attach_state = READ_ONCE(event->attach_state); } - perf_remove_from_context(event, detach_flags | DETACH_EXIT); + if (revoke) + detach_flags |= DETACH_GROUP | DETACH_REVOKE; + perf_remove_from_context(event, detach_flags); /* * Child events can be freed. */ if (parent_event) { mutex_unlock(&parent_event->child_mutex); + /* - * Kick perf_poll() for is_event_hup(); + * Match the refcount initialization. Make sure it doesn't happen + * twice if pmu_detach_event() calls it on an already exited task. */ - perf_event_wakeup(parent_event); - put_event(event); + if (attach_state & PERF_ATTACH_CHILD) { + /* + * Kick perf_poll() for is_event_hup(); + */ + perf_event_wakeup(parent_event); + /* + * pmu_detach_event() will have an extra refcount. + * perf_pending_task() might have one too. + */ + put_event(event); + } + return; } @@ -13732,15 +14004,13 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) perf_event_wakeup(event); } -static void perf_event_exit_task_context(struct task_struct *child) +static void perf_event_exit_task_context(struct task_struct *task, bool exit) { - struct perf_event_context *child_ctx, *clone_ctx = NULL; + struct perf_event_context *ctx, *clone_ctx = NULL; struct perf_event *child_event, *next; - WARN_ON_ONCE(child != current); - - child_ctx = perf_pin_task_context(child); - if (!child_ctx) + ctx = perf_pin_task_context(task); + if (!ctx) return; /* @@ -13753,27 +14023,28 @@ static void perf_event_exit_task_context(struct task_struct *child) * without ctx::mutex (it cannot because of the move_group double mutex * lock thing). See the comments in perf_install_in_context(). */ - mutex_lock(&child_ctx->mutex); + mutex_lock(&ctx->mutex); /* * In a single ctx::lock section, de-schedule the events and detach the * context from the task such that we cannot ever get it scheduled back * in. */ - raw_spin_lock_irq(&child_ctx->lock); - task_ctx_sched_out(child_ctx, NULL, EVENT_ALL); + raw_spin_lock_irq(&ctx->lock); + if (exit) + task_ctx_sched_out(ctx, NULL, EVENT_ALL); /* * Now that the context is inactive, destroy the task <-> ctx relation * and mark the context dead. */ - RCU_INIT_POINTER(child->perf_event_ctxp, NULL); - put_ctx(child_ctx); /* cannot be last */ - WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); - put_task_struct(current); /* cannot be last */ + RCU_INIT_POINTER(task->perf_event_ctxp, NULL); + put_ctx(ctx); /* cannot be last */ + WRITE_ONCE(ctx->task, TASK_TOMBSTONE); + put_task_struct(task); /* cannot be last */ - clone_ctx = unclone_ctx(child_ctx); - raw_spin_unlock_irq(&child_ctx->lock); + clone_ctx = unclone_ctx(ctx); + raw_spin_unlock_irq(&ctx->lock); if (clone_ctx) put_ctx(clone_ctx); @@ -13783,28 +14054,48 @@ static void perf_event_exit_task_context(struct task_struct *child) * won't get any samples after PERF_RECORD_EXIT. We can however still * get a few PERF_RECORD_READ events. */ - perf_event_task(child, child_ctx, 0); + if (exit) + perf_event_task(task, ctx, 0); - list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) - perf_event_exit_event(child_event, child_ctx); + list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry) + perf_event_exit_event(child_event, ctx, false); - mutex_unlock(&child_ctx->mutex); + mutex_unlock(&ctx->mutex); - put_ctx(child_ctx); + if (!exit) { + /* + * perf_event_release_kernel() could still have a reference on + * this context. In that case we must wait for these events to + * have been freed (in particular all their references to this + * task must've been dropped). + * + * Without this copy_process() will unconditionally free this + * task (irrespective of its reference count) and + * _free_event()'s put_task_struct(event->hw.target) will be a + * use-after-free. + * + * Wait for all events to drop their context reference. + */ + wait_var_event(&ctx->refcount, + refcount_read(&ctx->refcount) == 1); + } + put_ctx(ctx); } /* - * When a child task exits, feed back event values to parent events. + * When a task exits, feed back event values to parent events. * * Can be called with exec_update_lock held when called from * setup_new_exec(). */ -void perf_event_exit_task(struct task_struct *child) +void perf_event_exit_task(struct task_struct *task) { struct perf_event *event, *tmp; - mutex_lock(&child->perf_event_mutex); - list_for_each_entry_safe(event, tmp, &child->perf_event_list, + WARN_ON_ONCE(task != current); + + mutex_lock(&task->perf_event_mutex); + list_for_each_entry_safe(event, tmp, &task->perf_event_list, owner_entry) { list_del_init(&event->owner_entry); @@ -13815,42 +14106,23 @@ void perf_event_exit_task(struct task_struct *child) */ smp_store_release(&event->owner, NULL); } - mutex_unlock(&child->perf_event_mutex); + mutex_unlock(&task->perf_event_mutex); - perf_event_exit_task_context(child); + perf_event_exit_task_context(task, true); /* * The perf_event_exit_task_context calls perf_event_task - * with child's task_ctx, which generates EXIT events for - * child contexts and sets child->perf_event_ctxp[] to NULL. + * with task's task_ctx, which generates EXIT events for + * task contexts and sets task->perf_event_ctxp[] to NULL. * At this point we need to send EXIT events to cpu contexts. */ - perf_event_task(child, NULL, 0); + perf_event_task(task, NULL, 0); /* * Detach the perf_ctx_data for the system-wide event. */ guard(percpu_read)(&global_ctx_data_rwsem); - detach_task_ctx_data(child); -} - -static void perf_free_event(struct perf_event *event, - struct perf_event_context *ctx) -{ - struct perf_event *parent = event->parent; - - if (WARN_ON_ONCE(!parent)) - return; - - mutex_lock(&parent->child_mutex); - list_del_init(&event->child_list); - mutex_unlock(&parent->child_mutex); - - raw_spin_lock_irq(&ctx->lock); - perf_group_detach(event); - list_del_event(event, ctx); - raw_spin_unlock_irq(&ctx->lock); - put_event(event); + detach_task_ctx_data(task); } /* @@ -13862,48 +14134,7 @@ static void perf_free_event(struct perf_event *event, */ void perf_event_free_task(struct task_struct *task) { - struct perf_event_context *ctx; - struct perf_event *event, *tmp; - - ctx = rcu_access_pointer(task->perf_event_ctxp); - if (!ctx) - return; - - mutex_lock(&ctx->mutex); - raw_spin_lock_irq(&ctx->lock); - /* - * Destroy the task <-> ctx relation and mark the context dead. - * - * This is important because even though the task hasn't been - * exposed yet the context has been (through child_list). - */ - RCU_INIT_POINTER(task->perf_event_ctxp, NULL); - WRITE_ONCE(ctx->task, TASK_TOMBSTONE); - put_task_struct(task); /* cannot be last */ - raw_spin_unlock_irq(&ctx->lock); - - - list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) - perf_free_event(event, ctx); - - mutex_unlock(&ctx->mutex); - - /* - * perf_event_release_kernel() could've stolen some of our - * child events and still have them on its free_list. In that - * case we must wait for these events to have been freed (in - * particular all their references to this task must've been - * dropped). - * - * Without this copy_process() will unconditionally free this - * task (irrespective of its reference count) and - * _free_event()'s put_task_struct(event->hw.target) will be a - * use-after-free. - * - * Wait for all events to drop their context reference. - */ - wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1); - put_ctx(ctx); /* must be last */ + perf_event_exit_task_context(task, false); } void perf_event_delayed_put(struct task_struct *task) @@ -13980,6 +14211,14 @@ inherit_event(struct perf_event *parent_event, if (parent_event->parent) parent_event = parent_event->parent; + if (parent_event->state <= PERF_EVENT_STATE_REVOKED) + return NULL; + + /* + * Event creation should be under SRCU, see perf_pmu_unregister(). + */ + guard(srcu)(&pmus_srcu); + child_event = perf_event_alloc(&parent_event->attr, parent_event->cpu, child, diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 5130b119d0ae..d2aef87c7e9f 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -679,7 +679,15 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, { bool overwrite = !(flags & RING_BUFFER_WRITABLE); int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); - int ret = -ENOMEM, max_order; + bool use_contiguous_pages = event->pmu->capabilities & ( + PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_PREFER_LARGE); + /* + * Initialize max_order to 0 for page allocation. This allocates single + * pages to minimize memory fragmentation. This is overridden if the + * PMU needs or prefers contiguous pages (use_contiguous_pages = true). + */ + int max_order = 0; + int ret = -ENOMEM; if (!has_aux(event)) return -EOPNOTSUPP; @@ -689,8 +697,8 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, if (!overwrite) { /* - * Watermark defaults to half the buffer, and so does the - * max_order, to aid PMU drivers in double buffering. + * Watermark defaults to half the buffer, to aid PMU drivers + * in double buffering. */ if (!watermark) watermark = min_t(unsigned long, @@ -698,16 +706,19 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, (unsigned long)nr_pages << (PAGE_SHIFT - 1)); /* - * Use aux_watermark as the basis for chunking to - * help PMU drivers honor the watermark. + * If using contiguous pages, use aux_watermark as the basis + * for chunking to help PMU drivers honor the watermark. */ - max_order = get_order(watermark); + if (use_contiguous_pages) + max_order = get_order(watermark); } else { /* - * We need to start with the max_order that fits in nr_pages, - * not the other way around, hence ilog2() and not get_order. + * If using contiguous pages, we need to start with the + * max_order that fits in nr_pages, not the other way around, + * hence ilog2() and not get_order. */ - max_order = ilog2(nr_pages); + if (use_contiguous_pages) + max_order = ilog2(nr_pages); watermark = 0; } diff --git a/kernel/exit.c b/kernel/exit.c index 1b51dc099f1e..38645039dd8f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -133,8 +133,13 @@ struct release_task_post { static void __unhash_process(struct release_task_post *post, struct task_struct *p, bool group_dead) { + struct pid *pid = task_pid(p); + nr_threads--; + detach_pid(post->pids, p, PIDTYPE_PID); + wake_up_all(&pid->wait_pidfd); + if (group_dead) { detach_pid(post->pids, p, PIDTYPE_TGID); detach_pid(post->pids, p, PIDTYPE_PGID); @@ -253,7 +258,8 @@ repeat: pidfs_exit(p); cgroup_release(p); - thread_pid = get_pid(p->thread_pid); + /* Retrieve @thread_pid before __unhash_process() may set it to NULL. */ + thread_pid = task_pid(p); write_lock_irq(&tasklist_lock); ptrace_release_task(p); @@ -282,8 +288,8 @@ repeat: } write_unlock_irq(&tasklist_lock); + /* @thread_pid can't go away until free_pids() below */ proc_flush_pid(thread_pid); - put_pid(thread_pid); add_device_randomness(&p->se.sum_exec_runtime, sizeof(p->se.sum_exec_runtime)); free_pids(post.pids); @@ -936,12 +942,12 @@ void __noreturn do_exit(long code) tsk->exit_code = code; taskstats_exit(tsk, group_dead); + trace_sched_process_exit(tsk, group_dead); exit_mm(); if (group_dead) acct_process(); - trace_sched_process_exit(tsk); exit_sem(tsk); exit_shm(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index 168681fc4b25..85afccfdf3b1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1306,6 +1306,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, RCU_INIT_POINTER(mm->exe_file, NULL); mmu_notifier_subscriptions_init(mm); init_tlb_flush_pending(mm); + futex_mm_init(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS) mm->pmd_huge_pte = NULL; #endif @@ -1388,6 +1389,7 @@ static inline void __mmput(struct mm_struct *mm) if (mm->binfmt) module_put(mm->binfmt->module); lru_gen_del_mm(mm); + futex_hash_free(mm); mmdrop(mm); } @@ -2037,17 +2039,16 @@ static inline void rcu_copy_process(struct task_struct *p) } /** - * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd + * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd * @pid: the struct pid for which to create a pidfd * @flags: flags of the new @pidfd - * @ret: Where to return the file for the pidfd. + * @ret_file: return the new pidfs file * * Allocate a new file that stashes @pid and reserve a new pidfd number in the * caller's file descriptor table. The pidfd is reserved but not installed yet. * - * The helper doesn't perform checks on @pid which makes it useful for pidfds - * created via CLONE_PIDFD where @pid has no task attached when the pidfd and - * pidfd file are prepared. + * The helper verifies that @pid is still in use, without PIDFD_THREAD the + * task identified by @pid must be a thread-group leader. * * If this function returns successfully the caller is responsible to either * call fd_install() passing the returned pidfd and pidfd file as arguments in @@ -2064,59 +2065,50 @@ static inline void rcu_copy_process(struct task_struct *p) * error, a negative error code is returned from the function and the * last argument remains unchanged. */ -static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) +int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file) { - struct file *pidfd_file; + struct file *pidfs_file; + + /* + * PIDFD_STALE is only allowed to be passed if the caller knows + * that @pid is already registered in pidfs and thus + * PIDFD_INFO_EXIT information is guaranteed to be available. + */ + if (!(flags & PIDFD_STALE)) { + /* + * While holding the pidfd waitqueue lock removing the + * task linkage for the thread-group leader pid + * (PIDTYPE_TGID) isn't possible. Thus, if there's still + * task linkage for PIDTYPE_PID not having thread-group + * leader linkage for the pid means it wasn't a + * thread-group leader in the first place. + */ + guard(spinlock_irq)(&pid->wait_pidfd.lock); + + /* Task has already been reaped. */ + if (!pid_has_task(pid, PIDTYPE_PID)) + return -ESRCH; + /* + * If this struct pid isn't used as a thread-group + * leader but the caller requested to create a + * thread-group leader pidfd then report ENOENT. + */ + if (!(flags & PIDFD_THREAD) && !pid_has_task(pid, PIDTYPE_TGID)) + return -ENOENT; + } CLASS(get_unused_fd, pidfd)(O_CLOEXEC); if (pidfd < 0) return pidfd; - pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR); - if (IS_ERR(pidfd_file)) - return PTR_ERR(pidfd_file); + pidfs_file = pidfs_alloc_file(pid, flags | O_RDWR); + if (IS_ERR(pidfs_file)) + return PTR_ERR(pidfs_file); - *ret = pidfd_file; + *ret_file = pidfs_file; return take_fd(pidfd); } -/** - * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd - * @pid: the struct pid for which to create a pidfd - * @flags: flags of the new @pidfd - * @ret: Where to return the pidfd. - * - * Allocate a new file that stashes @pid and reserve a new pidfd number in the - * caller's file descriptor table. The pidfd is reserved but not installed yet. - * - * The helper verifies that @pid is still in use, without PIDFD_THREAD the - * task identified by @pid must be a thread-group leader. - * - * If this function returns successfully the caller is responsible to either - * call fd_install() passing the returned pidfd and pidfd file as arguments in - * order to install the pidfd into its file descriptor table or they must use - * put_unused_fd() and fput() on the returned pidfd and pidfd file - * respectively. - * - * This function is useful when a pidfd must already be reserved but there - * might still be points of failure afterwards and the caller wants to ensure - * that no pidfd is leaked into its file descriptor table. - * - * Return: On success, a reserved pidfd is returned from the function and a new - * pidfd file is returned in the last argument to the function. On - * error, a negative error code is returned from the function and the - * last argument remains unchanged. - */ -int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) -{ - bool thread = flags & PIDFD_THREAD; - - if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID)) - return -EINVAL; - - return __pidfd_prepare(pid, flags, ret); -} - static void __delayed_free_task(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); @@ -2163,6 +2155,13 @@ static void rv_task_fork(struct task_struct *p) #define rv_task_fork(p) do {} while (0) #endif +static bool need_futex_hash_allocate_default(u64 clone_flags) +{ + if ((clone_flags & (CLONE_THREAD | CLONE_VM)) != (CLONE_THREAD | CLONE_VM)) + return false; + return true; +} + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -2463,7 +2462,7 @@ __latent_entropy struct task_struct *copy_process( * Note that no task has been attached to @pid yet indicate * that via CLONE_PIDFD. */ - retval = __pidfd_prepare(pid, flags | PIDFD_CLONE, &pidfile); + retval = pidfd_prepare(pid, flags | PIDFD_STALE, &pidfile); if (retval < 0) goto bad_fork_free_pid; pidfd = retval; @@ -2544,6 +2543,21 @@ __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; /* + * Allocate a default futex hash for the user process once the first + * thread spawns. + */ + if (need_futex_hash_allocate_default(clone_flags)) { + retval = futex_hash_allocate_default(); + if (retval) + goto bad_fork_core_free; + /* + * If we fail beyond this point we don't free the allocated + * futex hash map. We assume that another thread will be created + * and makes use of it. The hash map will be freed once the main + * thread terminates. + */ + } + /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do * not want user-space to be able to predict the process start-time by diff --git a/kernel/futex/core.c b/kernel/futex/core.c index cca15859a50b..19a2c65f3d37 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -36,9 +36,15 @@ #include <linux/pagemap.h> #include <linux/debugfs.h> #include <linux/plist.h> +#include <linux/gfp.h> +#include <linux/vmalloc.h> #include <linux/memblock.h> #include <linux/fault-inject.h> #include <linux/slab.h> +#include <linux/prctl.h> +#include <linux/rcuref.h> +#include <linux/mempolicy.h> +#include <linux/mmap_lock.h> #include "futex.h" #include "../locking/rtmutex_common.h" @@ -49,12 +55,24 @@ * reside in the same cacheline. */ static struct { - struct futex_hash_bucket *queues; unsigned long hashmask; + unsigned int hashshift; + struct futex_hash_bucket *queues[MAX_NUMNODES]; } __futex_data __read_mostly __aligned(2*sizeof(long)); -#define futex_queues (__futex_data.queues) -#define futex_hashmask (__futex_data.hashmask) +#define futex_hashmask (__futex_data.hashmask) +#define futex_hashshift (__futex_data.hashshift) +#define futex_queues (__futex_data.queues) + +struct futex_private_hash { + rcuref_t users; + unsigned int hash_mask; + struct rcu_head rcu; + void *mm; + bool custom; + bool immutable; + struct futex_hash_bucket queues[]; +}; /* * Fault injections for futexes. @@ -107,21 +125,328 @@ late_initcall(fail_futex_debugfs); #endif /* CONFIG_FAIL_FUTEX */ +static struct futex_hash_bucket * +__futex_hash(union futex_key *key, struct futex_private_hash *fph); + +#ifdef CONFIG_FUTEX_PRIVATE_HASH +static inline bool futex_key_is_private(union futex_key *key) +{ + /* + * Relies on get_futex_key() to set either bit for shared + * futexes -- see comment with union futex_key. + */ + return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED)); +} + +bool futex_private_hash_get(struct futex_private_hash *fph) +{ + if (fph->immutable) + return true; + return rcuref_get(&fph->users); +} + +void futex_private_hash_put(struct futex_private_hash *fph) +{ + /* Ignore return value, last put is verified via rcuref_is_dead() */ + if (fph->immutable) + return; + if (rcuref_put(&fph->users)) + wake_up_var(fph->mm); +} + /** - * futex_hash - Return the hash bucket in the global hash - * @key: Pointer to the futex key for which the hash is calculated + * futex_hash_get - Get an additional reference for the local hash. + * @hb: ptr to the private local hash. * - * We hash on the keys returned from get_futex_key (see below) and return the - * corresponding hash bucket in the global hash. + * Obtain an additional reference for the already obtained hash bucket. The + * caller must already own an reference. */ +void futex_hash_get(struct futex_hash_bucket *hb) +{ + struct futex_private_hash *fph = hb->priv; + + if (!fph) + return; + WARN_ON_ONCE(!futex_private_hash_get(fph)); +} + +void futex_hash_put(struct futex_hash_bucket *hb) +{ + struct futex_private_hash *fph = hb->priv; + + if (!fph) + return; + futex_private_hash_put(fph); +} + +static struct futex_hash_bucket * +__futex_hash_private(union futex_key *key, struct futex_private_hash *fph) +{ + u32 hash; + + if (!futex_key_is_private(key)) + return NULL; + + if (!fph) + fph = rcu_dereference(key->private.mm->futex_phash); + if (!fph || !fph->hash_mask) + return NULL; + + hash = jhash2((void *)&key->private.address, + sizeof(key->private.address) / 4, + key->both.offset); + return &fph->queues[hash & fph->hash_mask]; +} + +static void futex_rehash_private(struct futex_private_hash *old, + struct futex_private_hash *new) +{ + struct futex_hash_bucket *hb_old, *hb_new; + unsigned int slots = old->hash_mask + 1; + unsigned int i; + + for (i = 0; i < slots; i++) { + struct futex_q *this, *tmp; + + hb_old = &old->queues[i]; + + spin_lock(&hb_old->lock); + plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) { + + plist_del(&this->list, &hb_old->chain); + futex_hb_waiters_dec(hb_old); + + WARN_ON_ONCE(this->lock_ptr != &hb_old->lock); + + hb_new = __futex_hash(&this->key, new); + futex_hb_waiters_inc(hb_new); + /* + * The new pointer isn't published yet but an already + * moved user can be unqueued due to timeout or signal. + */ + spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING); + plist_add(&this->list, &hb_new->chain); + this->lock_ptr = &hb_new->lock; + spin_unlock(&hb_new->lock); + } + spin_unlock(&hb_old->lock); + } +} + +static bool __futex_pivot_hash(struct mm_struct *mm, + struct futex_private_hash *new) +{ + struct futex_private_hash *fph; + + WARN_ON_ONCE(mm->futex_phash_new); + + fph = rcu_dereference_protected(mm->futex_phash, + lockdep_is_held(&mm->futex_hash_lock)); + if (fph) { + if (!rcuref_is_dead(&fph->users)) { + mm->futex_phash_new = new; + return false; + } + + futex_rehash_private(fph, new); + } + rcu_assign_pointer(mm->futex_phash, new); + kvfree_rcu(fph, rcu); + return true; +} + +static void futex_pivot_hash(struct mm_struct *mm) +{ + scoped_guard(mutex, &mm->futex_hash_lock) { + struct futex_private_hash *fph; + + fph = mm->futex_phash_new; + if (fph) { + mm->futex_phash_new = NULL; + __futex_pivot_hash(mm, fph); + } + } +} + +struct futex_private_hash *futex_private_hash(void) +{ + struct mm_struct *mm = current->mm; + /* + * Ideally we don't loop. If there is a replacement in progress + * then a new private hash is already prepared and a reference can't be + * obtained once the last user dropped it's. + * In that case we block on mm_struct::futex_hash_lock and either have + * to perform the replacement or wait while someone else is doing the + * job. Eitherway, on the second iteration we acquire a reference on the + * new private hash or loop again because a new replacement has been + * requested. + */ +again: + scoped_guard(rcu) { + struct futex_private_hash *fph; + + fph = rcu_dereference(mm->futex_phash); + if (!fph) + return NULL; + + if (fph->immutable) + return fph; + if (rcuref_get(&fph->users)) + return fph; + } + futex_pivot_hash(mm); + goto again; +} + struct futex_hash_bucket *futex_hash(union futex_key *key) { - u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, - key->both.offset); + struct futex_private_hash *fph; + struct futex_hash_bucket *hb; + +again: + scoped_guard(rcu) { + hb = __futex_hash(key, NULL); + fph = hb->priv; + + if (!fph || futex_private_hash_get(fph)) + return hb; + } + futex_pivot_hash(key->private.mm); + goto again; +} + +#else /* !CONFIG_FUTEX_PRIVATE_HASH */ + +static struct futex_hash_bucket * +__futex_hash_private(union futex_key *key, struct futex_private_hash *fph) +{ + return NULL; +} + +struct futex_hash_bucket *futex_hash(union futex_key *key) +{ + return __futex_hash(key, NULL); +} + +#endif /* CONFIG_FUTEX_PRIVATE_HASH */ + +#ifdef CONFIG_FUTEX_MPOL + +static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = vma_lookup(mm, addr); + struct mempolicy *mpol; + int node = FUTEX_NO_NODE; + + if (!vma) + return FUTEX_NO_NODE; + + mpol = vma_policy(vma); + if (!mpol) + return FUTEX_NO_NODE; + + switch (mpol->mode) { + case MPOL_PREFERRED: + node = first_node(mpol->nodes); + break; + case MPOL_PREFERRED_MANY: + case MPOL_BIND: + if (mpol->home_node != NUMA_NO_NODE) + node = mpol->home_node; + break; + default: + break; + } + + return node; +} + +static int futex_key_to_node_opt(struct mm_struct *mm, unsigned long addr) +{ + int seq, node; + + guard(rcu)(); + + if (!mmap_lock_speculate_try_begin(mm, &seq)) + return -EBUSY; + + node = __futex_key_to_node(mm, addr); + + if (mmap_lock_speculate_retry(mm, seq)) + return -EAGAIN; + + return node; +} + +static int futex_mpol(struct mm_struct *mm, unsigned long addr) +{ + int node; + + node = futex_key_to_node_opt(mm, addr); + if (node >= FUTEX_NO_NODE) + return node; - return &futex_queues[hash & futex_hashmask]; + guard(mmap_read_lock)(mm); + return __futex_key_to_node(mm, addr); } +#else /* !CONFIG_FUTEX_MPOL */ + +static int futex_mpol(struct mm_struct *mm, unsigned long addr) +{ + return FUTEX_NO_NODE; +} + +#endif /* CONFIG_FUTEX_MPOL */ + +/** + * __futex_hash - Return the hash bucket + * @key: Pointer to the futex key for which the hash is calculated + * @fph: Pointer to private hash if known + * + * We hash on the keys returned from get_futex_key (see below) and return the + * corresponding hash bucket. + * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the + * private hash) is returned if existing. Otherwise a hash bucket from the + * global hash is returned. + */ +static struct futex_hash_bucket * +__futex_hash(union futex_key *key, struct futex_private_hash *fph) +{ + int node = key->both.node; + u32 hash; + + if (node == FUTEX_NO_NODE) { + struct futex_hash_bucket *hb; + + hb = __futex_hash_private(key, fph); + if (hb) + return hb; + } + + hash = jhash2((u32 *)key, + offsetof(typeof(*key), both.offset) / sizeof(u32), + key->both.offset); + + if (node == FUTEX_NO_NODE) { + /* + * In case of !FLAGS_NUMA, use some unused hash bits to pick a + * node -- this ensures regular futexes are interleaved across + * the nodes and avoids having to allocate multiple + * hash-tables. + * + * NOTE: this isn't perfectly uniform, but it is fast and + * handles sparse node masks. + */ + node = (hash >> futex_hashshift) % nr_node_ids; + if (!node_possible(node)) { + node = find_next_bit_wrap(node_possible_map.bits, + nr_node_ids, node); + } + } + + return &futex_queues[node][hash & futex_hashmask]; +} /** * futex_setup_timer - set up the sleeping hrtimer. @@ -227,25 +552,60 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, struct page *page; struct folio *folio; struct address_space *mapping; - int err, ro = 0; + int node, err, size, ro = 0; + bool node_updated = false; bool fshared; fshared = flags & FLAGS_SHARED; + size = futex_size(flags); + if (flags & FLAGS_NUMA) + size *= 2; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; - if (unlikely((address % sizeof(u32)) != 0)) + if (unlikely((address % size) != 0)) return -EINVAL; address -= key->both.offset; - if (unlikely(!access_ok(uaddr, sizeof(u32)))) + if (unlikely(!access_ok(uaddr, size))) return -EFAULT; if (unlikely(should_fail_futex(fshared))) return -EFAULT; + node = FUTEX_NO_NODE; + + if (flags & FLAGS_NUMA) { + u32 __user *naddr = (void *)uaddr + size / 2; + + if (futex_get_value(&node, naddr)) + return -EFAULT; + + if (node != FUTEX_NO_NODE && + (node >= MAX_NUMNODES || !node_possible(node))) + return -EINVAL; + } + + if (node == FUTEX_NO_NODE && (flags & FLAGS_MPOL)) { + node = futex_mpol(mm, address); + node_updated = true; + } + + if (flags & FLAGS_NUMA) { + u32 __user *naddr = (void *)uaddr + size / 2; + + if (node == FUTEX_NO_NODE) { + node = numa_node_id(); + node_updated = true; + } + if (node_updated && futex_put_value(node, naddr)) + return -EFAULT; + } + + key->both.node = node; + /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs @@ -502,13 +862,9 @@ void __futex_unqueue(struct futex_q *q) } /* The key must be already stored in q->key. */ -struct futex_hash_bucket *futex_q_lock(struct futex_q *q) +void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb) __acquires(&hb->lock) { - struct futex_hash_bucket *hb; - - hb = futex_hash(&q->key); - /* * Increment the counter before taking the lock so that * a potential waker won't miss a to-be-slept task that is @@ -522,14 +878,13 @@ struct futex_hash_bucket *futex_q_lock(struct futex_q *q) q->lock_ptr = &hb->lock; spin_lock(&hb->lock); - return hb; } void futex_q_unlock(struct futex_hash_bucket *hb) __releases(&hb->lock) { - spin_unlock(&hb->lock); futex_hb_waiters_dec(hb); + spin_unlock(&hb->lock); } void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb, @@ -568,6 +923,8 @@ int futex_unqueue(struct futex_q *q) spinlock_t *lock_ptr; int ret = 0; + /* RCU so lock_ptr is not going away during locking. */ + guard(rcu)(); /* In the common case we don't take the spinlock, which is nice. */ retry: /* @@ -606,6 +963,24 @@ retry: return ret; } +void futex_q_lockptr_lock(struct futex_q *q) +{ + spinlock_t *lock_ptr; + + /* + * See futex_unqueue() why lock_ptr can change. + */ + guard(rcu)(); +retry: + lock_ptr = READ_ONCE(q->lock_ptr); + spin_lock(lock_ptr); + + if (unlikely(lock_ptr != q->lock_ptr)) { + spin_unlock(lock_ptr); + goto retry; + } +} + /* * PI futexes can not be requeued and must remove themselves from the hash * bucket. The hash bucket lock (i.e. lock_ptr) is held. @@ -949,10 +1324,20 @@ static void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; - struct futex_hash_bucket *hb; union futex_key key = FUTEX_KEY_INIT; /* + * The mutex mm_struct::futex_hash_lock might be acquired. + */ + might_sleep(); + /* + * Ensure the hash remains stable (no resize) during the while loop + * below. The hb pointer is acquired under the pi_lock so we can't block + * on the mutex. + */ + WARN_ON(curr != current); + guard(private_hash)(); + /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: @@ -962,50 +1347,52 @@ static void exit_pi_state_list(struct task_struct *curr) next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; - hb = futex_hash(&key); - - /* - * We can race against put_pi_state() removing itself from the - * list (a waiter going away). put_pi_state() will first - * decrement the reference count and then modify the list, so - * its possible to see the list entry but fail this reference - * acquire. - * - * In that case; drop the locks to let put_pi_state() make - * progress and retry the loop. - */ - if (!refcount_inc_not_zero(&pi_state->refcount)) { + if (1) { + CLASS(hb, hb)(&key); + + /* + * We can race against put_pi_state() removing itself from the + * list (a waiter going away). put_pi_state() will first + * decrement the reference count and then modify the list, so + * its possible to see the list entry but fail this reference + * acquire. + * + * In that case; drop the locks to let put_pi_state() make + * progress and retry the loop. + */ + if (!refcount_inc_not_zero(&pi_state->refcount)) { + raw_spin_unlock_irq(&curr->pi_lock); + cpu_relax(); + raw_spin_lock_irq(&curr->pi_lock); + continue; + } raw_spin_unlock_irq(&curr->pi_lock); - cpu_relax(); - raw_spin_lock_irq(&curr->pi_lock); - continue; - } - raw_spin_unlock_irq(&curr->pi_lock); - spin_lock(&hb->lock); - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - raw_spin_lock(&curr->pi_lock); - /* - * We dropped the pi-lock, so re-check whether this - * task still owns the PI-state: - */ - if (head->next != next) { - /* retain curr->pi_lock for the loop invariant */ - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); + spin_lock(&hb->lock); + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + raw_spin_lock(&curr->pi_lock); + /* + * We dropped the pi-lock, so re-check whether this + * task still owns the PI-state: + */ + if (head->next != next) { + /* retain curr->pi_lock for the loop invariant */ + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); + spin_unlock(&hb->lock); + put_pi_state(pi_state); + continue; + } + + WARN_ON(pi_state->owner != curr); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + pi_state->owner = NULL; + + raw_spin_unlock(&curr->pi_lock); + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); spin_unlock(&hb->lock); - put_pi_state(pi_state); - continue; } - WARN_ON(pi_state->owner != curr); - WARN_ON(list_empty(&pi_state->list)); - list_del_init(&pi_state->list); - pi_state->owner = NULL; - - raw_spin_unlock(&curr->pi_lock); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - spin_unlock(&hb->lock); - rt_mutex_futex_unlock(&pi_state->pi_mutex); put_pi_state(pi_state); @@ -1125,30 +1512,304 @@ void futex_exit_release(struct task_struct *tsk) futex_cleanup_end(tsk, FUTEX_STATE_DEAD); } +static void futex_hash_bucket_init(struct futex_hash_bucket *fhb, + struct futex_private_hash *fph) +{ +#ifdef CONFIG_FUTEX_PRIVATE_HASH + fhb->priv = fph; +#endif + atomic_set(&fhb->waiters, 0); + plist_head_init(&fhb->chain); + spin_lock_init(&fhb->lock); +} + +#define FH_CUSTOM 0x01 +#define FH_IMMUTABLE 0x02 + +#ifdef CONFIG_FUTEX_PRIVATE_HASH +void futex_hash_free(struct mm_struct *mm) +{ + struct futex_private_hash *fph; + + kvfree(mm->futex_phash_new); + fph = rcu_dereference_raw(mm->futex_phash); + if (fph) { + WARN_ON_ONCE(rcuref_read(&fph->users) > 1); + kvfree(fph); + } +} + +static bool futex_pivot_pending(struct mm_struct *mm) +{ + struct futex_private_hash *fph; + + guard(rcu)(); + + if (!mm->futex_phash_new) + return true; + + fph = rcu_dereference(mm->futex_phash); + return rcuref_is_dead(&fph->users); +} + +static bool futex_hash_less(struct futex_private_hash *a, + struct futex_private_hash *b) +{ + /* user provided always wins */ + if (!a->custom && b->custom) + return true; + if (a->custom && !b->custom) + return false; + + /* zero-sized hash wins */ + if (!b->hash_mask) + return true; + if (!a->hash_mask) + return false; + + /* keep the biggest */ + if (a->hash_mask < b->hash_mask) + return true; + if (a->hash_mask > b->hash_mask) + return false; + + return false; /* equal */ +} + +static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags) +{ + struct mm_struct *mm = current->mm; + struct futex_private_hash *fph; + bool custom = flags & FH_CUSTOM; + int i; + + if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots))) + return -EINVAL; + + /* + * Once we've disabled the global hash there is no way back. + */ + scoped_guard(rcu) { + fph = rcu_dereference(mm->futex_phash); + if (fph && (!fph->hash_mask || fph->immutable)) { + if (custom) + return -EBUSY; + return 0; + } + } + + fph = kvzalloc(struct_size(fph, queues, hash_slots), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); + if (!fph) + return -ENOMEM; + + rcuref_init(&fph->users, 1); + fph->hash_mask = hash_slots ? hash_slots - 1 : 0; + fph->custom = custom; + fph->immutable = !!(flags & FH_IMMUTABLE); + fph->mm = mm; + + for (i = 0; i < hash_slots; i++) + futex_hash_bucket_init(&fph->queues[i], fph); + + if (custom) { + /* + * Only let prctl() wait / retry; don't unduly delay clone(). + */ +again: + wait_var_event(mm, futex_pivot_pending(mm)); + } + + scoped_guard(mutex, &mm->futex_hash_lock) { + struct futex_private_hash *free __free(kvfree) = NULL; + struct futex_private_hash *cur, *new; + + cur = rcu_dereference_protected(mm->futex_phash, + lockdep_is_held(&mm->futex_hash_lock)); + new = mm->futex_phash_new; + mm->futex_phash_new = NULL; + + if (fph) { + if (cur && !new) { + /* + * If we have an existing hash, but do not yet have + * allocated a replacement hash, drop the initial + * reference on the existing hash. + */ + futex_private_hash_put(cur); + } + + if (new) { + /* + * Two updates raced; throw out the lesser one. + */ + if (futex_hash_less(new, fph)) { + free = new; + new = fph; + } else { + free = fph; + } + } else { + new = fph; + } + fph = NULL; + } + + if (new) { + /* + * Will set mm->futex_phash_new on failure; + * futex_private_hash_get() will try again. + */ + if (!__futex_pivot_hash(mm, new) && custom) + goto again; + } + } + return 0; +} + +int futex_hash_allocate_default(void) +{ + unsigned int threads, buckets, current_buckets = 0; + struct futex_private_hash *fph; + + if (!current->mm) + return 0; + + scoped_guard(rcu) { + threads = min_t(unsigned int, + get_nr_threads(current), + num_online_cpus()); + + fph = rcu_dereference(current->mm->futex_phash); + if (fph) { + if (fph->custom) + return 0; + + current_buckets = fph->hash_mask + 1; + } + } + + /* + * The default allocation will remain within + * 16 <= threads * 4 <= global hash size + */ + buckets = roundup_pow_of_two(4 * threads); + buckets = clamp(buckets, 16, futex_hashmask + 1); + + if (current_buckets >= buckets) + return 0; + + return futex_hash_allocate(buckets, 0); +} + +static int futex_hash_get_slots(void) +{ + struct futex_private_hash *fph; + + guard(rcu)(); + fph = rcu_dereference(current->mm->futex_phash); + if (fph && fph->hash_mask) + return fph->hash_mask + 1; + return 0; +} + +static int futex_hash_get_immutable(void) +{ + struct futex_private_hash *fph; + + guard(rcu)(); + fph = rcu_dereference(current->mm->futex_phash); + if (fph && fph->immutable) + return 1; + if (fph && !fph->hash_mask) + return 1; + return 0; +} + +#else + +static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags) +{ + return -EINVAL; +} + +static int futex_hash_get_slots(void) +{ + return 0; +} + +static int futex_hash_get_immutable(void) +{ + return 0; +} +#endif + +int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) +{ + unsigned int flags = FH_CUSTOM; + int ret; + + switch (arg2) { + case PR_FUTEX_HASH_SET_SLOTS: + if (arg4 & ~FH_FLAG_IMMUTABLE) + return -EINVAL; + if (arg4 & FH_FLAG_IMMUTABLE) + flags |= FH_IMMUTABLE; + ret = futex_hash_allocate(arg3, flags); + break; + + case PR_FUTEX_HASH_GET_SLOTS: + ret = futex_hash_get_slots(); + break; + + case PR_FUTEX_HASH_GET_IMMUTABLE: + ret = futex_hash_get_immutable(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + static int __init futex_init(void) { unsigned long hashsize, i; - unsigned int futex_shift; + unsigned int order, n; + unsigned long size; #ifdef CONFIG_BASE_SMALL hashsize = 16; #else - hashsize = roundup_pow_of_two(256 * num_possible_cpus()); + hashsize = 256 * num_possible_cpus(); + hashsize /= num_possible_nodes(); + hashsize = max(4, hashsize); + hashsize = roundup_pow_of_two(hashsize); #endif + futex_hashshift = ilog2(hashsize); + size = sizeof(struct futex_hash_bucket) * hashsize; + order = get_order(size); + + for_each_node(n) { + struct futex_hash_bucket *table; + + if (order > MAX_PAGE_ORDER) + table = vmalloc_huge_node(size, GFP_KERNEL, n); + else + table = alloc_pages_exact_nid(n, size, GFP_KERNEL); + + BUG_ON(!table); - futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), - hashsize, 0, 0, - &futex_shift, NULL, - hashsize, hashsize); - hashsize = 1UL << futex_shift; + for (i = 0; i < hashsize; i++) + futex_hash_bucket_init(&table[i], NULL); - for (i = 0; i < hashsize; i++) { - atomic_set(&futex_queues[i].waiters, 0); - plist_head_init(&futex_queues[i].chain); - spin_lock_init(&futex_queues[i].lock); + futex_queues[n] = table; } futex_hashmask = hashsize - 1; + pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n", + hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024, + order > MAX_PAGE_ORDER ? "vmalloc" : "linear"); return 0; } core_initcall(futex_init); diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index 6b2f4c7eb720..fcd1617212ee 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -7,6 +7,7 @@ #include <linux/sched/wake_q.h> #include <linux/compat.h> #include <linux/uaccess.h> +#include <linux/cleanup.h> #ifdef CONFIG_PREEMPT_RT #include <linux/rcuwait.h> @@ -38,6 +39,7 @@ #define FLAGS_HAS_TIMEOUT 0x0040 #define FLAGS_NUMA 0x0080 #define FLAGS_STRICT 0x0100 +#define FLAGS_MPOL 0x0200 /* FUTEX_ to FLAGS_ */ static inline unsigned int futex_to_flags(unsigned int op) @@ -53,7 +55,7 @@ static inline unsigned int futex_to_flags(unsigned int op) return flags; } -#define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_PRIVATE) +#define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_NUMA | FUTEX2_MPOL | FUTEX2_PRIVATE) /* FUTEX2_ to FLAGS_ */ static inline unsigned int futex2_to_flags(unsigned int flags2) @@ -66,6 +68,9 @@ static inline unsigned int futex2_to_flags(unsigned int flags2) if (flags2 & FUTEX2_NUMA) flags |= FLAGS_NUMA; + if (flags2 & FUTEX2_MPOL) + flags |= FLAGS_MPOL; + return flags; } @@ -86,6 +91,19 @@ static inline bool futex_flags_valid(unsigned int flags) if ((flags & FLAGS_SIZE_MASK) != FLAGS_SIZE_32) return false; + /* + * Must be able to represent both FUTEX_NO_NODE and every valid nodeid + * in a futex word. + */ + if (flags & FLAGS_NUMA) { + int bits = 8 * futex_size(flags); + u64 max = ~0ULL; + + max >>= 64 - bits; + if (nr_node_ids >= max) + return false; + } + return true; } @@ -117,6 +135,7 @@ struct futex_hash_bucket { atomic_t waiters; spinlock_t lock; struct plist_head chain; + struct futex_private_hash *priv; } ____cacheline_aligned_in_smp; /* @@ -156,6 +175,7 @@ typedef void (futex_wake_fn)(struct wake_q_head *wake_q, struct futex_q *q); * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * @requeue_state: State field for futex_requeue_pi() + * @drop_hb_ref: Waiter should drop the extra hash bucket reference if true * @requeue_wait: RCU wait for futex_requeue_pi() (RT only) * * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so @@ -182,6 +202,7 @@ struct futex_q { union futex_key *requeue_pi_key; u32 bitset; atomic_t requeue_state; + bool drop_hb_ref; #ifdef CONFIG_PREEMPT_RT struct rcuwait requeue_wait; #endif @@ -196,12 +217,35 @@ enum futex_access { extern int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, enum futex_access rw); - +extern void futex_q_lockptr_lock(struct futex_q *q); extern struct hrtimer_sleeper * futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, int flags, u64 range_ns); extern struct futex_hash_bucket *futex_hash(union futex_key *key); +#ifdef CONFIG_FUTEX_PRIVATE_HASH +extern void futex_hash_get(struct futex_hash_bucket *hb); +extern void futex_hash_put(struct futex_hash_bucket *hb); + +extern struct futex_private_hash *futex_private_hash(void); +extern bool futex_private_hash_get(struct futex_private_hash *fph); +extern void futex_private_hash_put(struct futex_private_hash *fph); + +#else /* !CONFIG_FUTEX_PRIVATE_HASH */ +static inline void futex_hash_get(struct futex_hash_bucket *hb) { } +static inline void futex_hash_put(struct futex_hash_bucket *hb) { } +static inline struct futex_private_hash *futex_private_hash(void) { return NULL; } +static inline bool futex_private_hash_get(void) { return false; } +static inline void futex_private_hash_put(struct futex_private_hash *fph) { } +#endif + +DEFINE_CLASS(hb, struct futex_hash_bucket *, + if (_T) futex_hash_put(_T), + futex_hash(key), union futex_key *key); + +DEFINE_CLASS(private_hash, struct futex_private_hash *, + if (_T) futex_private_hash_put(_T), + futex_private_hash(), void); /** * futex_match - Check whether two futex keys are equal @@ -219,9 +263,9 @@ static inline int futex_match(union futex_key *key1, union futex_key *key2) } extern int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, - struct futex_q *q, struct futex_hash_bucket **hb); -extern void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q, - struct hrtimer_sleeper *timeout); + struct futex_q *q, union futex_key *key2, + struct task_struct *task); +extern void futex_do_wait(struct futex_q *q, struct hrtimer_sleeper *timeout); extern bool __futex_wake_mark(struct futex_q *q); extern void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q); @@ -256,7 +300,7 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 * This looks a bit overkill, but generally just results in a couple * of instructions. */ -static __always_inline int futex_read_inatomic(u32 *dest, u32 __user *from) +static __always_inline int futex_get_value(u32 *dest, u32 __user *from) { u32 val; @@ -273,12 +317,26 @@ Efault: return -EFAULT; } +static __always_inline int futex_put_value(u32 val, u32 __user *to) +{ + if (can_do_masked_user_access()) + to = masked_user_access_begin(to); + else if (!user_read_access_begin(to, sizeof(*to))) + return -EFAULT; + unsafe_put_user(val, to, Efault); + user_read_access_end(); + return 0; +Efault: + user_read_access_end(); + return -EFAULT; +} + static inline int futex_get_value_locked(u32 *dest, u32 __user *from) { int ret; pagefault_disable(); - ret = futex_read_inatomic(dest, from); + ret = futex_get_value(dest, from); pagefault_enable(); return ret; @@ -354,7 +412,7 @@ static inline int futex_hb_waiters_pending(struct futex_hash_bucket *hb) #endif } -extern struct futex_hash_bucket *futex_q_lock(struct futex_q *q); +extern void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb); extern void futex_q_unlock(struct futex_hash_bucket *hb); diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c index 7a941845f7ee..dacb2330f1fb 100644 --- a/kernel/futex/pi.c +++ b/kernel/futex/pi.c @@ -806,7 +806,7 @@ handle_err: break; } - spin_lock(q->lock_ptr); + futex_q_lockptr_lock(q); raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); /* @@ -920,7 +920,6 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl struct hrtimer_sleeper timeout, *to; struct task_struct *exiting = NULL; struct rt_mutex_waiter rt_waiter; - struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; DEFINE_WAKE_Q(wake_q); int res, ret; @@ -939,151 +938,183 @@ retry: goto out; retry_private: - hb = futex_q_lock(&q); + if (1) { + CLASS(hb, hb)(&q.key); - ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, - &exiting, 0); - if (unlikely(ret)) { - /* - * Atomic work succeeded and we got the lock, - * or failed. Either way, we do _not_ block. - */ - switch (ret) { - case 1: - /* We got the lock. */ - ret = 0; - goto out_unlock_put_key; - case -EFAULT: - goto uaddr_faulted; - case -EBUSY: - case -EAGAIN: - /* - * Two reasons for this: - * - EBUSY: Task is exiting and we just wait for the - * exit to complete. - * - EAGAIN: The user space value changed. - */ - futex_q_unlock(hb); + futex_q_lock(&q, hb); + + ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, + &exiting, 0); + if (unlikely(ret)) { /* - * Handle the case where the owner is in the middle of - * exiting. Wait for the exit to complete otherwise - * this task might loop forever, aka. live lock. + * Atomic work succeeded and we got the lock, + * or failed. Either way, we do _not_ block. */ - wait_for_owner_exiting(ret, exiting); - cond_resched(); - goto retry; - default: - goto out_unlock_put_key; + switch (ret) { + case 1: + /* We got the lock. */ + ret = 0; + goto out_unlock_put_key; + case -EFAULT: + goto uaddr_faulted; + case -EBUSY: + case -EAGAIN: + /* + * Two reasons for this: + * - EBUSY: Task is exiting and we just wait for the + * exit to complete. + * - EAGAIN: The user space value changed. + */ + futex_q_unlock(hb); + /* + * Handle the case where the owner is in the middle of + * exiting. Wait for the exit to complete otherwise + * this task might loop forever, aka. live lock. + */ + wait_for_owner_exiting(ret, exiting); + cond_resched(); + goto retry; + default: + goto out_unlock_put_key; + } } - } - WARN_ON(!q.pi_state); + WARN_ON(!q.pi_state); - /* - * Only actually queue now that the atomic ops are done: - */ - __futex_queue(&q, hb, current); + /* + * Only actually queue now that the atomic ops are done: + */ + __futex_queue(&q, hb, current); - if (trylock) { - ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); - /* Fixup the trylock return value: */ - ret = ret ? 0 : -EWOULDBLOCK; - goto no_block; - } + if (trylock) { + ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); + /* Fixup the trylock return value: */ + ret = ret ? 0 : -EWOULDBLOCK; + goto no_block; + } - /* - * Must be done before we enqueue the waiter, here is unfortunately - * under the hb lock, but that *should* work because it does nothing. - */ - rt_mutex_pre_schedule(); + /* + * Caution; releasing @hb in-scope. The hb->lock is still locked + * while the reference is dropped. The reference can not be dropped + * after the unlock because if a user initiated resize is in progress + * then we might need to wake him. This can not be done after the + * rt_mutex_pre_schedule() invocation. The hb will remain valid because + * the thread, performing resize, will block on hb->lock during + * the requeue. + */ + futex_hash_put(no_free_ptr(hb)); + /* + * Must be done before we enqueue the waiter, here is unfortunately + * under the hb lock, but that *should* work because it does nothing. + */ + rt_mutex_pre_schedule(); - rt_mutex_init_waiter(&rt_waiter); + rt_mutex_init_waiter(&rt_waiter); - /* - * On PREEMPT_RT, when hb->lock becomes an rt_mutex, we must not - * hold it while doing rt_mutex_start_proxy(), because then it will - * include hb->lock in the blocking chain, even through we'll not in - * fact hold it while blocking. This will lead it to report -EDEADLK - * and BUG when futex_unlock_pi() interleaves with this. - * - * Therefore acquire wait_lock while holding hb->lock, but drop the - * latter before calling __rt_mutex_start_proxy_lock(). This - * interleaves with futex_unlock_pi() -- which does a similar lock - * handoff -- such that the latter can observe the futex_q::pi_state - * before __rt_mutex_start_proxy_lock() is done. - */ - raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); - spin_unlock(q.lock_ptr); - /* - * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter - * such that futex_unlock_pi() is guaranteed to observe the waiter when - * it sees the futex_q::pi_state. - */ - ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q); - raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q); + /* + * On PREEMPT_RT, when hb->lock becomes an rt_mutex, we must not + * hold it while doing rt_mutex_start_proxy(), because then it will + * include hb->lock in the blocking chain, even through we'll not in + * fact hold it while blocking. This will lead it to report -EDEADLK + * and BUG when futex_unlock_pi() interleaves with this. + * + * Therefore acquire wait_lock while holding hb->lock, but drop the + * latter before calling __rt_mutex_start_proxy_lock(). This + * interleaves with futex_unlock_pi() -- which does a similar lock + * handoff -- such that the latter can observe the futex_q::pi_state + * before __rt_mutex_start_proxy_lock() is done. + */ + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); + spin_unlock(q.lock_ptr); + /* + * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter + * such that futex_unlock_pi() is guaranteed to observe the waiter when + * it sees the futex_q::pi_state. + */ + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q); + raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q); - if (ret) { - if (ret == 1) - ret = 0; - goto cleanup; - } + if (ret) { + if (ret == 1) + ret = 0; + goto cleanup; + } - if (unlikely(to)) - hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS); + if (unlikely(to)) + hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS); - ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); + ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); cleanup: - /* - * If we failed to acquire the lock (deadlock/signal/timeout), we must - * must unwind the above, however we canont lock hb->lock because - * rt_mutex already has a waiter enqueued and hb->lock can itself try - * and enqueue an rt_waiter through rtlock. - * - * Doing the cleanup without holding hb->lock can cause inconsistent - * state between hb and pi_state, but only in the direction of not - * seeing a waiter that is leaving. - * - * See futex_unlock_pi(), it deals with this inconsistency. - * - * There be dragons here, since we must deal with the inconsistency on - * the way out (here), it is impossible to detect/warn about the race - * the other way around (missing an incoming waiter). - * - * What could possibly go wrong... - */ - if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) - ret = 0; + /* + * If we failed to acquire the lock (deadlock/signal/timeout), we must + * unwind the above, however we canont lock hb->lock because + * rt_mutex already has a waiter enqueued and hb->lock can itself try + * and enqueue an rt_waiter through rtlock. + * + * Doing the cleanup without holding hb->lock can cause inconsistent + * state between hb and pi_state, but only in the direction of not + * seeing a waiter that is leaving. + * + * See futex_unlock_pi(), it deals with this inconsistency. + * + * There be dragons here, since we must deal with the inconsistency on + * the way out (here), it is impossible to detect/warn about the race + * the other way around (missing an incoming waiter). + * + * What could possibly go wrong... + */ + if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) + ret = 0; - /* - * Now that the rt_waiter has been dequeued, it is safe to use - * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up - * the - */ - spin_lock(q.lock_ptr); - /* - * Waiter is unqueued. - */ - rt_mutex_post_schedule(); + /* + * Now that the rt_waiter has been dequeued, it is safe to use + * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up + * the + */ + futex_q_lockptr_lock(&q); + /* + * Waiter is unqueued. + */ + rt_mutex_post_schedule(); no_block: - /* - * Fixup the pi_state owner and possibly acquire the lock if we - * haven't already. - */ - res = fixup_pi_owner(uaddr, &q, !ret); - /* - * If fixup_pi_owner() returned an error, propagate that. If it acquired - * the lock, clear our -ETIMEDOUT or -EINTR. - */ - if (res) - ret = (res < 0) ? res : 0; - - futex_unqueue_pi(&q); - spin_unlock(q.lock_ptr); - goto out; + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. + */ + res = fixup_pi_owner(uaddr, &q, !ret); + /* + * If fixup_pi_owner() returned an error, propagate that. If it acquired + * the lock, clear our -ETIMEDOUT or -EINTR. + */ + if (res) + ret = (res < 0) ? res : 0; + + futex_unqueue_pi(&q); + spin_unlock(q.lock_ptr); + if (q.drop_hb_ref) { + CLASS(hb, hb)(&q.key); + /* Additional reference from futex_unlock_pi() */ + futex_hash_put(hb); + } + goto out; out_unlock_put_key: - futex_q_unlock(hb); + futex_q_unlock(hb); + goto out; + +uaddr_faulted: + futex_q_unlock(hb); + + ret = fault_in_user_writeable(uaddr); + if (ret) + goto out; + + if (!(flags & FLAGS_SHARED)) + goto retry_private; + + goto retry; + } out: if (to) { @@ -1091,18 +1122,6 @@ out: destroy_hrtimer_on_stack(&to->timer); } return ret != -EINTR ? ret : -ERESTARTNOINTR; - -uaddr_faulted: - futex_q_unlock(hb); - - ret = fault_in_user_writeable(uaddr); - if (ret) - goto out; - - if (!(flags & FLAGS_SHARED)) - goto retry_private; - - goto retry; } /* @@ -1114,7 +1133,6 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { u32 curval, uval, vpid = task_pid_vnr(current); union futex_key key = FUTEX_KEY_INIT; - struct futex_hash_bucket *hb; struct futex_q *top_waiter; int ret; @@ -1134,7 +1152,7 @@ retry: if (ret) return ret; - hb = futex_hash(&key); + CLASS(hb, hb)(&key); spin_lock(&hb->lock); retry_hb: @@ -1187,6 +1205,12 @@ retry_hb: */ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex); if (!rt_waiter) { + /* + * Acquire a reference for the leaving waiter to ensure + * valid futex_q::lock_ptr. + */ + futex_hash_get(hb); + top_waiter->drop_hb_ref = true; __futex_unqueue(top_waiter); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); goto retry_hb; diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c index b47bb764b352..c716a66f8692 100644 --- a/kernel/futex/requeue.c +++ b/kernel/futex/requeue.c @@ -87,6 +87,11 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, futex_hb_waiters_inc(hb2); plist_add(&q->list, &hb2->chain); q->lock_ptr = &hb2->lock; + /* + * hb1 and hb2 belong to the same futex_hash_bucket_private + * because if we managed get a reference on hb1 then it can't be + * replaced. Therefore we avoid put(hb1)+get(hb2) here. + */ } q->key = *key2; } @@ -231,7 +236,12 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; - + /* + * Acquire a reference for the waiter to ensure valid + * futex_q::lock_ptr. + */ + futex_hash_get(hb); + q->drop_hb_ref = true; q->lock_ptr = &hb->lock; /* Signal locked state to the waiter */ @@ -371,7 +381,6 @@ int futex_requeue(u32 __user *uaddr1, unsigned int flags1, union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; int task_count = 0, ret; struct futex_pi_state *pi_state = NULL; - struct futex_hash_bucket *hb1, *hb2; struct futex_q *this, *next; DEFINE_WAKE_Q(wake_q); @@ -443,240 +452,242 @@ retry: if (requeue_pi && futex_match(&key1, &key2)) return -EINVAL; - hb1 = futex_hash(&key1); - hb2 = futex_hash(&key2); - retry_private: - futex_hb_waiters_inc(hb2); - double_lock_hb(hb1, hb2); + if (1) { + CLASS(hb, hb1)(&key1); + CLASS(hb, hb2)(&key2); - if (likely(cmpval != NULL)) { - u32 curval; + futex_hb_waiters_inc(hb2); + double_lock_hb(hb1, hb2); - ret = futex_get_value_locked(&curval, uaddr1); + if (likely(cmpval != NULL)) { + u32 curval; - if (unlikely(ret)) { - double_unlock_hb(hb1, hb2); - futex_hb_waiters_dec(hb2); + ret = futex_get_value_locked(&curval, uaddr1); - ret = get_user(curval, uaddr1); - if (ret) - return ret; + if (unlikely(ret)) { + futex_hb_waiters_dec(hb2); + double_unlock_hb(hb1, hb2); - if (!(flags1 & FLAGS_SHARED)) - goto retry_private; + ret = get_user(curval, uaddr1); + if (ret) + return ret; - goto retry; - } - if (curval != *cmpval) { - ret = -EAGAIN; - goto out_unlock; - } - } + if (!(flags1 & FLAGS_SHARED)) + goto retry_private; - if (requeue_pi) { - struct task_struct *exiting = NULL; + goto retry; + } + if (curval != *cmpval) { + ret = -EAGAIN; + goto out_unlock; + } + } - /* - * Attempt to acquire uaddr2 and wake the top waiter. If we - * intend to requeue waiters, force setting the FUTEX_WAITERS - * bit. We force this here where we are able to easily handle - * faults rather in the requeue loop below. - * - * Updates topwaiter::requeue_state if a top waiter exists. - */ - ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, - &key2, &pi_state, - &exiting, nr_requeue); + if (requeue_pi) { + struct task_struct *exiting = NULL; - /* - * At this point the top_waiter has either taken uaddr2 or - * is waiting on it. In both cases pi_state has been - * established and an initial refcount on it. In case of an - * error there's nothing. - * - * The top waiter's requeue_state is up to date: - * - * - If the lock was acquired atomically (ret == 1), then - * the state is Q_REQUEUE_PI_LOCKED. - * - * The top waiter has been dequeued and woken up and can - * return to user space immediately. The kernel/user - * space state is consistent. In case that there must be - * more waiters requeued the WAITERS bit in the user - * space futex is set so the top waiter task has to go - * into the syscall slowpath to unlock the futex. This - * will block until this requeue operation has been - * completed and the hash bucket locks have been - * dropped. - * - * - If the trylock failed with an error (ret < 0) then - * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing - * happened", or Q_REQUEUE_PI_IGNORE when there was an - * interleaved early wakeup. - * - * - If the trylock did not succeed (ret == 0) then the - * state is either Q_REQUEUE_PI_IN_PROGRESS or - * Q_REQUEUE_PI_WAIT if an early wakeup interleaved. - * This will be cleaned up in the loop below, which - * cannot fail because futex_proxy_trylock_atomic() did - * the same sanity checks for requeue_pi as the loop - * below does. - */ - switch (ret) { - case 0: - /* We hold a reference on the pi state. */ - break; - - case 1: /* - * futex_proxy_trylock_atomic() acquired the user space - * futex. Adjust task_count. + * Attempt to acquire uaddr2 and wake the top waiter. If we + * intend to requeue waiters, force setting the FUTEX_WAITERS + * bit. We force this here where we are able to easily handle + * faults rather in the requeue loop below. + * + * Updates topwaiter::requeue_state if a top waiter exists. */ - task_count++; - ret = 0; - break; + ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, + &key2, &pi_state, + &exiting, nr_requeue); - /* - * If the above failed, then pi_state is NULL and - * waiter::requeue_state is correct. - */ - case -EFAULT: - double_unlock_hb(hb1, hb2); - futex_hb_waiters_dec(hb2); - ret = fault_in_user_writeable(uaddr2); - if (!ret) - goto retry; - return ret; - case -EBUSY: - case -EAGAIN: - /* - * Two reasons for this: - * - EBUSY: Owner is exiting and we just wait for the - * exit to complete. - * - EAGAIN: The user space value changed. - */ - double_unlock_hb(hb1, hb2); - futex_hb_waiters_dec(hb2); /* - * Handle the case where the owner is in the middle of - * exiting. Wait for the exit to complete otherwise - * this task might loop forever, aka. live lock. + * At this point the top_waiter has either taken uaddr2 or + * is waiting on it. In both cases pi_state has been + * established and an initial refcount on it. In case of an + * error there's nothing. + * + * The top waiter's requeue_state is up to date: + * + * - If the lock was acquired atomically (ret == 1), then + * the state is Q_REQUEUE_PI_LOCKED. + * + * The top waiter has been dequeued and woken up and can + * return to user space immediately. The kernel/user + * space state is consistent. In case that there must be + * more waiters requeued the WAITERS bit in the user + * space futex is set so the top waiter task has to go + * into the syscall slowpath to unlock the futex. This + * will block until this requeue operation has been + * completed and the hash bucket locks have been + * dropped. + * + * - If the trylock failed with an error (ret < 0) then + * the state is either Q_REQUEUE_PI_NONE, i.e. "nothing + * happened", or Q_REQUEUE_PI_IGNORE when there was an + * interleaved early wakeup. + * + * - If the trylock did not succeed (ret == 0) then the + * state is either Q_REQUEUE_PI_IN_PROGRESS or + * Q_REQUEUE_PI_WAIT if an early wakeup interleaved. + * This will be cleaned up in the loop below, which + * cannot fail because futex_proxy_trylock_atomic() did + * the same sanity checks for requeue_pi as the loop + * below does. */ - wait_for_owner_exiting(ret, exiting); - cond_resched(); - goto retry; - default: - goto out_unlock; - } - } - - plist_for_each_entry_safe(this, next, &hb1->chain, list) { - if (task_count - nr_wake >= nr_requeue) - break; - - if (!futex_match(&this->key, &key1)) - continue; - - /* - * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always - * be paired with each other and no other futex ops. - * - * We should never be requeueing a futex_q with a pi_state, - * which is awaiting a futex_unlock_pi(). - */ - if ((requeue_pi && !this->rt_waiter) || - (!requeue_pi && this->rt_waiter) || - this->pi_state) { - ret = -EINVAL; - break; + switch (ret) { + case 0: + /* We hold a reference on the pi state. */ + break; + + case 1: + /* + * futex_proxy_trylock_atomic() acquired the user space + * futex. Adjust task_count. + */ + task_count++; + ret = 0; + break; + + /* + * If the above failed, then pi_state is NULL and + * waiter::requeue_state is correct. + */ + case -EFAULT: + futex_hb_waiters_dec(hb2); + double_unlock_hb(hb1, hb2); + ret = fault_in_user_writeable(uaddr2); + if (!ret) + goto retry; + return ret; + case -EBUSY: + case -EAGAIN: + /* + * Two reasons for this: + * - EBUSY: Owner is exiting and we just wait for the + * exit to complete. + * - EAGAIN: The user space value changed. + */ + futex_hb_waiters_dec(hb2); + double_unlock_hb(hb1, hb2); + /* + * Handle the case where the owner is in the middle of + * exiting. Wait for the exit to complete otherwise + * this task might loop forever, aka. live lock. + */ + wait_for_owner_exiting(ret, exiting); + cond_resched(); + goto retry; + default: + goto out_unlock; + } } - /* Plain futexes just wake or requeue and are done */ - if (!requeue_pi) { - if (++task_count <= nr_wake) - this->wake(&wake_q, this); - else - requeue_futex(this, hb1, hb2, &key2); - continue; - } + plist_for_each_entry_safe(this, next, &hb1->chain, list) { + if (task_count - nr_wake >= nr_requeue) + break; - /* Ensure we requeue to the expected futex for requeue_pi. */ - if (!futex_match(this->requeue_pi_key, &key2)) { - ret = -EINVAL; - break; - } + if (!futex_match(&this->key, &key1)) + continue; - /* - * Requeue nr_requeue waiters and possibly one more in the case - * of requeue_pi if we couldn't acquire the lock atomically. - * - * Prepare the waiter to take the rt_mutex. Take a refcount - * on the pi_state and store the pointer in the futex_q - * object of the waiter. - */ - get_pi_state(pi_state); - - /* Don't requeue when the waiter is already on the way out. */ - if (!futex_requeue_pi_prepare(this, pi_state)) { /* - * Early woken waiter signaled that it is on the - * way out. Drop the pi_state reference and try the - * next waiter. @this->pi_state is still NULL. + * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always + * be paired with each other and no other futex ops. + * + * We should never be requeueing a futex_q with a pi_state, + * which is awaiting a futex_unlock_pi(). */ - put_pi_state(pi_state); - continue; - } + if ((requeue_pi && !this->rt_waiter) || + (!requeue_pi && this->rt_waiter) || + this->pi_state) { + ret = -EINVAL; + break; + } + + /* Plain futexes just wake or requeue and are done */ + if (!requeue_pi) { + if (++task_count <= nr_wake) + this->wake(&wake_q, this); + else + requeue_futex(this, hb1, hb2, &key2); + continue; + } + + /* Ensure we requeue to the expected futex for requeue_pi. */ + if (!futex_match(this->requeue_pi_key, &key2)) { + ret = -EINVAL; + break; + } - ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, - this->rt_waiter, - this->task); - - if (ret == 1) { - /* - * We got the lock. We do neither drop the refcount - * on pi_state nor clear this->pi_state because the - * waiter needs the pi_state for cleaning up the - * user space value. It will drop the refcount - * after doing so. this::requeue_state is updated - * in the wakeup as well. - */ - requeue_pi_wake_futex(this, &key2, hb2); - task_count++; - } else if (!ret) { - /* Waiter is queued, move it to hb2 */ - requeue_futex(this, hb1, hb2, &key2); - futex_requeue_pi_complete(this, 0); - task_count++; - } else { /* - * rt_mutex_start_proxy_lock() detected a potential - * deadlock when we tried to queue that waiter. - * Drop the pi_state reference which we took above - * and remove the pointer to the state from the - * waiters futex_q object. + * Requeue nr_requeue waiters and possibly one more in the case + * of requeue_pi if we couldn't acquire the lock atomically. + * + * Prepare the waiter to take the rt_mutex. Take a refcount + * on the pi_state and store the pointer in the futex_q + * object of the waiter. */ - this->pi_state = NULL; - put_pi_state(pi_state); - futex_requeue_pi_complete(this, ret); - /* - * We stop queueing more waiters and let user space - * deal with the mess. - */ - break; + get_pi_state(pi_state); + + /* Don't requeue when the waiter is already on the way out. */ + if (!futex_requeue_pi_prepare(this, pi_state)) { + /* + * Early woken waiter signaled that it is on the + * way out. Drop the pi_state reference and try the + * next waiter. @this->pi_state is still NULL. + */ + put_pi_state(pi_state); + continue; + } + + ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, + this->rt_waiter, + this->task); + + if (ret == 1) { + /* + * We got the lock. We do neither drop the refcount + * on pi_state nor clear this->pi_state because the + * waiter needs the pi_state for cleaning up the + * user space value. It will drop the refcount + * after doing so. this::requeue_state is updated + * in the wakeup as well. + */ + requeue_pi_wake_futex(this, &key2, hb2); + task_count++; + } else if (!ret) { + /* Waiter is queued, move it to hb2 */ + requeue_futex(this, hb1, hb2, &key2); + futex_requeue_pi_complete(this, 0); + task_count++; + } else { + /* + * rt_mutex_start_proxy_lock() detected a potential + * deadlock when we tried to queue that waiter. + * Drop the pi_state reference which we took above + * and remove the pointer to the state from the + * waiters futex_q object. + */ + this->pi_state = NULL; + put_pi_state(pi_state); + futex_requeue_pi_complete(this, ret); + /* + * We stop queueing more waiters and let user space + * deal with the mess. + */ + break; + } } - } - /* - * We took an extra initial reference to the pi_state in - * futex_proxy_trylock_atomic(). We need to drop it here again. - */ - put_pi_state(pi_state); + /* + * We took an extra initial reference to the pi_state in + * futex_proxy_trylock_atomic(). We need to drop it here again. + */ + put_pi_state(pi_state); out_unlock: - double_unlock_hb(hb1, hb2); + futex_hb_waiters_dec(hb2); + double_unlock_hb(hb1, hb2); + } wake_up_q(&wake_q); - futex_hb_waiters_dec(hb2); return ret ? ret : task_count; } @@ -769,7 +780,6 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, { struct hrtimer_sleeper timeout, *to; struct rt_mutex_waiter rt_waiter; - struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; struct rt_mutex_base *pi_mutex; @@ -805,35 +815,28 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * Prepare to wait on uaddr. On success, it holds hb->lock and q * is initialized. */ - ret = futex_wait_setup(uaddr, val, flags, &q, &hb); + ret = futex_wait_setup(uaddr, val, flags, &q, &key2, current); if (ret) goto out; - /* - * The check above which compares uaddrs is not sufficient for - * shared futexes. We need to compare the keys: - */ - if (futex_match(&q.key, &key2)) { - futex_q_unlock(hb); - ret = -EINVAL; - goto out; - } - /* Queue the futex_q, drop the hb lock, wait for wakeup. */ - futex_wait_queue(hb, &q, to); + futex_do_wait(&q, to); switch (futex_requeue_pi_wakeup_sync(&q)) { case Q_REQUEUE_PI_IGNORE: - /* The waiter is still on uaddr1 */ - spin_lock(&hb->lock); - ret = handle_early_requeue_pi_wakeup(hb, &q, to); - spin_unlock(&hb->lock); + { + CLASS(hb, hb)(&q.key); + /* The waiter is still on uaddr1 */ + spin_lock(&hb->lock); + ret = handle_early_requeue_pi_wakeup(hb, &q, to); + spin_unlock(&hb->lock); + } break; case Q_REQUEUE_PI_LOCKED: /* The requeue acquired the lock */ if (q.pi_state && (q.pi_state->owner != current)) { - spin_lock(q.lock_ptr); + futex_q_lockptr_lock(&q); ret = fixup_pi_owner(uaddr2, &q, true); /* * Drop the reference to the pi state which the @@ -860,7 +863,7 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) ret = 0; - spin_lock(q.lock_ptr); + futex_q_lockptr_lock(&q); debug_rt_mutex_free_waiter(&rt_waiter); /* * Fixup the pi_state owner and possibly acquire the lock if we @@ -892,6 +895,11 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, default: BUG(); } + if (q.drop_hb_ref) { + CLASS(hb, hb)(&q.key); + /* Additional reference from requeue_pi_wake_futex() */ + futex_hash_put(hb); + } out: if (to) { diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c index 25877d4f2f8f..e2bbe5509ec2 100644 --- a/kernel/futex/waitwake.c +++ b/kernel/futex/waitwake.c @@ -154,7 +154,6 @@ void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q) */ int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) { - struct futex_hash_bucket *hb; struct futex_q *this, *next; union futex_key key = FUTEX_KEY_INIT; DEFINE_WAKE_Q(wake_q); @@ -170,7 +169,7 @@ int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) if ((flags & FLAGS_STRICT) && !nr_wake) return 0; - hb = futex_hash(&key); + CLASS(hb, hb)(&key); /* Make sure we really have tasks to wakeup */ if (!futex_hb_waiters_pending(hb)) @@ -253,7 +252,6 @@ int futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_wake2, int op) { union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; - struct futex_hash_bucket *hb1, *hb2; struct futex_q *this, *next; int ret, op_ret; DEFINE_WAKE_Q(wake_q); @@ -266,67 +264,69 @@ retry: if (unlikely(ret != 0)) return ret; - hb1 = futex_hash(&key1); - hb2 = futex_hash(&key2); - retry_private: - double_lock_hb(hb1, hb2); - op_ret = futex_atomic_op_inuser(op, uaddr2); - if (unlikely(op_ret < 0)) { - double_unlock_hb(hb1, hb2); - - if (!IS_ENABLED(CONFIG_MMU) || - unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) { - /* - * we don't get EFAULT from MMU faults if we don't have - * an MMU, but we might get them from range checking - */ - ret = op_ret; - return ret; - } - - if (op_ret == -EFAULT) { - ret = fault_in_user_writeable(uaddr2); - if (ret) + if (1) { + CLASS(hb, hb1)(&key1); + CLASS(hb, hb2)(&key2); + + double_lock_hb(hb1, hb2); + op_ret = futex_atomic_op_inuser(op, uaddr2); + if (unlikely(op_ret < 0)) { + double_unlock_hb(hb1, hb2); + + if (!IS_ENABLED(CONFIG_MMU) || + unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) { + /* + * we don't get EFAULT from MMU faults if we don't have + * an MMU, but we might get them from range checking + */ + ret = op_ret; return ret; - } - - cond_resched(); - if (!(flags & FLAGS_SHARED)) - goto retry_private; - goto retry; - } + } - plist_for_each_entry_safe(this, next, &hb1->chain, list) { - if (futex_match (&this->key, &key1)) { - if (this->pi_state || this->rt_waiter) { - ret = -EINVAL; - goto out_unlock; + if (op_ret == -EFAULT) { + ret = fault_in_user_writeable(uaddr2); + if (ret) + return ret; } - this->wake(&wake_q, this); - if (++ret >= nr_wake) - break; + + cond_resched(); + if (!(flags & FLAGS_SHARED)) + goto retry_private; + goto retry; } - } - if (op_ret > 0) { - op_ret = 0; - plist_for_each_entry_safe(this, next, &hb2->chain, list) { - if (futex_match (&this->key, &key2)) { + plist_for_each_entry_safe(this, next, &hb1->chain, list) { + if (futex_match(&this->key, &key1)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; goto out_unlock; } this->wake(&wake_q, this); - if (++op_ret >= nr_wake2) + if (++ret >= nr_wake) break; } } - ret += op_ret; - } + + if (op_ret > 0) { + op_ret = 0; + plist_for_each_entry_safe(this, next, &hb2->chain, list) { + if (futex_match(&this->key, &key2)) { + if (this->pi_state || this->rt_waiter) { + ret = -EINVAL; + goto out_unlock; + } + this->wake(&wake_q, this); + if (++op_ret >= nr_wake2) + break; + } + } + ret += op_ret; + } out_unlock: - double_unlock_hb(hb1, hb2); + double_unlock_hb(hb1, hb2); + } wake_up_q(&wake_q); return ret; } @@ -334,23 +334,12 @@ out_unlock: static long futex_wait_restart(struct restart_block *restart); /** - * futex_wait_queue() - futex_queue() and wait for wakeup, timeout, or signal - * @hb: the futex hash bucket, must be locked by the caller + * futex_do_wait() - wait for wakeup, timeout, or signal * @q: the futex_q to queue up on * @timeout: the prepared hrtimer_sleeper, or null for no timeout */ -void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q, - struct hrtimer_sleeper *timeout) +void futex_do_wait(struct futex_q *q, struct hrtimer_sleeper *timeout) { - /* - * The task state is guaranteed to be set before another task can - * wake it. set_current_state() is implemented using smp_store_mb() and - * futex_queue() calls spin_unlock() upon completion, both serializing - * access to the hash list and forcing another memory barrier. - */ - set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); - futex_queue(q, hb, current); - /* Arm the timer */ if (timeout) hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS); @@ -412,12 +401,17 @@ int futex_unqueue_multiple(struct futex_vector *v, int count) */ int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken) { - struct futex_hash_bucket *hb; bool retry = false; int ret, i; u32 uval; /* + * Make sure to have a reference on the private_hash such that we + * don't block on rehash after changing the task state below. + */ + guard(private_hash)(); + + /* * Enqueuing multiple futexes is tricky, because we need to enqueue * each futex on the list before dealing with the next one to avoid * deadlocking on the hash bucket. But, before enqueuing, we need to @@ -451,20 +445,24 @@ retry: struct futex_q *q = &vs[i].q; u32 val = vs[i].w.val; - hb = futex_q_lock(q); - ret = futex_get_value_locked(&uval, uaddr); + if (1) { + CLASS(hb, hb)(&q->key); - if (!ret && uval == val) { - /* - * The bucket lock can't be held while dealing with the - * next futex. Queue each futex at this moment so hb can - * be unlocked. - */ - futex_queue(q, hb, current); - continue; - } + futex_q_lock(q, hb); + ret = futex_get_value_locked(&uval, uaddr); + + if (!ret && uval == val) { + /* + * The bucket lock can't be held while dealing with the + * next futex. Queue each futex at this moment so hb can + * be unlocked. + */ + futex_queue(q, hb, current); + continue; + } - futex_q_unlock(hb); + futex_q_unlock(hb); + } __set_current_state(TASK_RUNNING); /* @@ -578,7 +576,8 @@ int futex_wait_multiple(struct futex_vector *vs, unsigned int count, * @val: the expected value * @flags: futex flags (FLAGS_SHARED, etc.) * @q: the associated futex_q - * @hb: storage for hash_bucket pointer to be returned to caller + * @key2: the second futex_key if used for requeue PI + * @task: Task queueing this futex * * Setup the futex_q and locate the hash_bucket. Get the futex value and * compare it with the expected value. Handle atomic faults internally. @@ -586,10 +585,12 @@ int futex_wait_multiple(struct futex_vector *vs, unsigned int count, * * Return: * - 0 - uaddr contains val and hb has been locked; - * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked + * - <0 - On error and the hb is unlocked. A possible reason: the uaddr can not + * be read, does not contain the expected value or is not properly aligned. */ int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, - struct futex_q *q, struct futex_hash_bucket **hb) + struct futex_q *q, union futex_key *key2, + struct task_struct *task) { u32 uval; int ret; @@ -618,26 +619,45 @@ retry: return ret; retry_private: - *hb = futex_q_lock(q); + if (1) { + CLASS(hb, hb)(&q->key); - ret = futex_get_value_locked(&uval, uaddr); + futex_q_lock(q, hb); - if (ret) { - futex_q_unlock(*hb); + ret = futex_get_value_locked(&uval, uaddr); - ret = get_user(uval, uaddr); - if (ret) - return ret; + if (ret) { + futex_q_unlock(hb); - if (!(flags & FLAGS_SHARED)) - goto retry_private; + ret = get_user(uval, uaddr); + if (ret) + return ret; - goto retry; - } + if (!(flags & FLAGS_SHARED)) + goto retry_private; + + goto retry; + } - if (uval != val) { - futex_q_unlock(*hb); - ret = -EWOULDBLOCK; + if (uval != val) { + futex_q_unlock(hb); + return -EWOULDBLOCK; + } + + if (key2 && futex_match(&q->key, key2)) { + futex_q_unlock(hb); + return -EINVAL; + } + + /* + * The task state is guaranteed to be set before another task can + * wake it. set_current_state() is implemented using smp_store_mb() and + * futex_queue() calls spin_unlock() upon completion, both serializing + * access to the hash list and forcing another memory barrier. + */ + if (task == current) + set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); + futex_queue(q, hb, task); } return ret; @@ -647,7 +667,6 @@ int __futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, struct hrtimer_sleeper *to, u32 bitset) { struct futex_q q = futex_q_init; - struct futex_hash_bucket *hb; int ret; if (!bitset) @@ -660,12 +679,12 @@ retry: * Prepare to wait on uaddr. On success, it holds hb->lock and q * is initialized. */ - ret = futex_wait_setup(uaddr, val, flags, &q, &hb); + ret = futex_wait_setup(uaddr, val, flags, &q, NULL, current); if (ret) return ret; /* futex_queue and wait for wakeup, timeout, or a signal. */ - futex_wait_queue(hb, &q, to); + futex_do_wait(&q, to); /* If we were woken (and unqueued), we succeeded, whatever. */ if (!futex_unqueue(&q)) diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index ae60cae24e9a..d0af8a8b3ae6 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -43,18 +43,16 @@ unsigned long probe_irq_on(void) * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { /* * Some chips need to know about probing in * progress: */ if (desc->irq_data.chip->irq_set_type) - desc->irq_data.chip->irq_set_type(&desc->irq_data, - IRQ_TYPE_PROBE); + desc->irq_data.chip->irq_set_type(&desc->irq_data, IRQ_TYPE_PROBE); irq_activate_and_startup(desc, IRQ_NORESEND); } - raw_spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ @@ -66,13 +64,12 @@ unsigned long probe_irq_on(void) * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; if (irq_activate_and_startup(desc, IRQ_NORESEND)) desc->istate |= IRQS_PENDING; } - raw_spin_unlock_irq(&desc->lock); } /* @@ -84,18 +81,16 @@ unsigned long probe_irq_on(void) * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { - raw_spin_lock_irq(&desc->lock); - + guard(raw_spinlock_irq)(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { /* It triggered already - consider it spurious. */ if (!(desc->istate & IRQS_WAITING)) { desc->istate &= ~IRQS_AUTODETECT; irq_shutdown_and_deactivate(desc); - } else - if (i < 32) - mask |= 1 << i; + } else if (i < 32) { + mask |= 1 << i; + } } - raw_spin_unlock_irq(&desc->lock); } return mask; @@ -121,7 +116,7 @@ unsigned int probe_irq_mask(unsigned long val) int i; for_each_irq_desc(i, desc) { - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { if (i < 16 && !(desc->istate & IRQS_WAITING)) mask |= 1 << i; @@ -129,7 +124,6 @@ unsigned int probe_irq_mask(unsigned long val) desc->istate &= ~IRQS_AUTODETECT; irq_shutdown_and_deactivate(desc); } - raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); @@ -160,8 +154,7 @@ int probe_irq_off(unsigned long val) struct irq_desc *desc; for_each_irq_desc(i, desc) { - raw_spin_lock_irq(&desc->lock); - + guard(raw_spinlock_irq)(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { if (!(desc->istate & IRQS_WAITING)) { if (!nr_of_irqs) @@ -171,7 +164,6 @@ int probe_irq_off(unsigned long val) desc->istate &= ~IRQS_AUTODETECT; irq_shutdown_and_deactivate(desc); } - raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 36cf1b09cc84..b0e0a7332993 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -34,98 +34,80 @@ struct irqaction chained_action = { }; /** - * irq_set_chip - set the irq chip for an irq - * @irq: irq number - * @chip: pointer to irq chip description structure + * irq_set_chip - set the irq chip for an irq + * @irq: irq number + * @chip: pointer to irq chip description structure */ int irq_set_chip(unsigned int irq, const struct irq_chip *chip) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); + int ret = -EINVAL; - if (!desc) - return -EINVAL; - - desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); - irq_put_desc_unlock(desc, flags); - /* - * For !CONFIG_SPARSE_IRQ make the irq show up in - * allocated_irqs. - */ - irq_mark_irq(irq); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); + ret = 0; + } + /* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */ + if (!ret) + irq_mark_irq(irq); + return ret; } EXPORT_SYMBOL(irq_set_chip); /** - * irq_set_irq_type - set the irq trigger type for an irq - * @irq: irq number - * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h + * irq_set_irq_type - set the irq trigger type for an irq + * @irq: irq number + * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h */ int irq_set_irq_type(unsigned int irq, unsigned int type) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - int ret = 0; - - if (!desc) - return -EINVAL; - - ret = __irq_set_trigger(desc, type); - irq_put_desc_busunlock(desc, flags); - return ret; + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) + return __irq_set_trigger(scoped_irqdesc, type); + return -EINVAL; } EXPORT_SYMBOL(irq_set_irq_type); /** - * irq_set_handler_data - set irq handler data for an irq - * @irq: Interrupt number - * @data: Pointer to interrupt specific data + * irq_set_handler_data - set irq handler data for an irq + * @irq: Interrupt number + * @data: Pointer to interrupt specific data * - * Set the hardware irq controller data for an irq + * Set the hardware irq controller data for an irq */ int irq_set_handler_data(unsigned int irq, void *data) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return -EINVAL; - desc->irq_common_data.handler_data = data; - irq_put_desc_unlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->irq_common_data.handler_data = data; + return 0; + } + return -EINVAL; } EXPORT_SYMBOL(irq_set_handler_data); /** - * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset - * @irq_base: Interrupt number base - * @irq_offset: Interrupt number offset - * @entry: Pointer to MSI descriptor data + * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset + * @irq_base: Interrupt number base + * @irq_offset: Interrupt number offset + * @entry: Pointer to MSI descriptor data * - * Set the MSI descriptor entry for an irq at offset + * Set the MSI descriptor entry for an irq at offset */ -int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, - struct msi_desc *entry) -{ - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - - if (!desc) - return -EINVAL; - desc->irq_common_data.msi_desc = entry; - if (entry && !irq_offset) - entry->irq = irq_base; - irq_put_desc_unlock(desc, flags); - return 0; +int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry) +{ + scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) { + scoped_irqdesc->irq_common_data.msi_desc = entry; + if (entry && !irq_offset) + entry->irq = irq_base; + return 0; + } + return -EINVAL; } /** - * irq_set_msi_desc - set MSI descriptor data for an irq - * @irq: Interrupt number - * @entry: Pointer to MSI descriptor data + * irq_set_msi_desc - set MSI descriptor data for an irq + * @irq: Interrupt number + * @entry: Pointer to MSI descriptor data * - * Set the MSI descriptor entry for an irq + * Set the MSI descriptor entry for an irq */ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) { @@ -133,22 +115,19 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) } /** - * irq_set_chip_data - set irq chip data for an irq - * @irq: Interrupt number - * @data: Pointer to chip specific data + * irq_set_chip_data - set irq chip data for an irq + * @irq: Interrupt number + * @data: Pointer to chip specific data * - * Set the hardware irq chip data for an irq + * Set the hardware irq chip data for an irq */ int irq_set_chip_data(unsigned int irq, void *data) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return -EINVAL; - desc->irq_data.chip_data = data; - irq_put_desc_unlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->irq_data.chip_data = data; + return 0; + } + return -EINVAL; } EXPORT_SYMBOL(irq_set_chip_data); @@ -223,6 +202,19 @@ __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, return IRQ_STARTUP_ABORT; return IRQ_STARTUP_MANAGED; } + +void irq_startup_managed(struct irq_desc *desc) +{ + /* + * Only start it up when the disable depth is 1, so that a disable, + * hotunplug, hotplug sequence does not end up enabling it during + * hotplug unconditionally. + */ + desc->depth--; + if (!desc->depth) + irq_startup(desc, IRQ_RESEND, IRQ_START_COND); +} + #else static __always_inline int __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, @@ -290,6 +282,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) ret = __irq_startup(desc); break; case IRQ_STARTUP_ABORT: + desc->depth = 1; irqd_set_managed_shutdown(d); return 0; } @@ -322,7 +315,13 @@ void irq_shutdown(struct irq_desc *desc) { if (irqd_is_started(&desc->irq_data)) { clear_irq_resend(desc); - desc->depth = 1; + /* + * Increment disable depth, so that a managed shutdown on + * CPU hotunplug preserves the actual disabled state when the + * CPU comes back online. See irq_startup_managed(). + */ + desc->depth++; + if (desc->irq_data.chip->irq_shutdown) { desc->irq_data.chip->irq_shutdown(&desc->irq_data); irq_state_set_disabled(desc); @@ -450,48 +449,6 @@ void unmask_threaded_irq(struct irq_desc *desc) unmask_irq(desc); } -/* - * handle_nested_irq - Handle a nested irq from a irq thread - * @irq: the interrupt number - * - * Handle interrupts which are nested into a threaded interrupt - * handler. The handler function is called inside the calling - * threads context. - */ -void handle_nested_irq(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irqaction *action; - irqreturn_t action_ret; - - might_sleep(); - - raw_spin_lock_irq(&desc->lock); - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - action = desc->action; - if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - raw_spin_unlock_irq(&desc->lock); - return; - } - - kstat_incr_irqs_this_cpu(desc); - atomic_inc(&desc->threads_active); - raw_spin_unlock_irq(&desc->lock); - - action_ret = IRQ_NONE; - for_each_action_of_desc(desc, action) - action_ret |= action->thread_fn(action->irq, action->dev_id); - - if (!irq_settings_no_debug(desc)) - note_interrupt(desc, action_ret); - - wake_threads_waitq(desc); -} -EXPORT_SYMBOL_GPL(handle_nested_irq); - static bool irq_check_poll(struct irq_desc *desc) { if (!(desc->istate & IRQS_POLL_INPROGRESS)) @@ -499,7 +456,7 @@ static bool irq_check_poll(struct irq_desc *desc) return irq_wait_for_poll(desc); } -static bool irq_may_run(struct irq_desc *desc) +static bool irq_can_handle_pm(struct irq_desc *desc) { unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; @@ -524,77 +481,111 @@ static bool irq_may_run(struct irq_desc *desc) return irq_check_poll(desc); } +static inline bool irq_can_handle_actions(struct irq_desc *desc) +{ + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; + return false; + } + return true; +} + +static inline bool irq_can_handle(struct irq_desc *desc) +{ + if (!irq_can_handle_pm(desc)) + return false; + + return irq_can_handle_actions(desc); +} + /** - * handle_simple_irq - Simple and software-decoded IRQs. - * @desc: the interrupt description structure for this irq - * - * Simple interrupts are either sent from a demultiplexing interrupt - * handler or come from hardware, where no interrupt hardware control - * is necessary. + * handle_nested_irq - Handle a nested irq from a irq thread + * @irq: the interrupt number * - * Note: The caller is expected to handle the ack, clear, mask and - * unmask issues if necessary. + * Handle interrupts which are nested into a threaded interrupt + * handler. The handler function is called inside the calling threads + * context. */ -void handle_simple_irq(struct irq_desc *desc) +void handle_nested_irq(unsigned int irq) { - raw_spin_lock(&desc->lock); + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + irqreturn_t action_ret; - if (!irq_may_run(desc)) - goto out_unlock; + might_sleep(); - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + scoped_guard(raw_spinlock_irq, &desc->lock) { + if (!irq_can_handle_actions(desc)) + return; - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - goto out_unlock; + action = desc->action; + kstat_incr_irqs_this_cpu(desc); + atomic_inc(&desc->threads_active); } + action_ret = IRQ_NONE; + for_each_action_of_desc(desc, action) + action_ret |= action->thread_fn(action->irq, action->dev_id); + + if (!irq_settings_no_debug(desc)) + note_interrupt(desc, action_ret); + + wake_threads_waitq(desc); +} +EXPORT_SYMBOL_GPL(handle_nested_irq); + +/** + * handle_simple_irq - Simple and software-decoded IRQs. + * @desc: the interrupt description structure for this irq + * + * Simple interrupts are either sent from a demultiplexing interrupt + * handler or come from hardware, where no interrupt hardware control is + * necessary. + * + * Note: The caller is expected to handle the ack, clear, mask and unmask + * issues if necessary. + */ +void handle_simple_irq(struct irq_desc *desc) +{ + guard(raw_spinlock)(&desc->lock); + + if (!irq_can_handle(desc)) + return; + kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); - -out_unlock: - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_simple_irq); /** - * handle_untracked_irq - Simple and software-decoded IRQs. - * @desc: the interrupt description structure for this irq + * handle_untracked_irq - Simple and software-decoded IRQs. + * @desc: the interrupt description structure for this irq * - * Untracked interrupts are sent from a demultiplexing interrupt - * handler when the demultiplexer does not know which device it its - * multiplexed irq domain generated the interrupt. IRQ's handled - * through here are not subjected to stats tracking, randomness, or - * spurious interrupt detection. + * Untracked interrupts are sent from a demultiplexing interrupt handler + * when the demultiplexer does not know which device it its multiplexed irq + * domain generated the interrupt. IRQ's handled through here are not + * subjected to stats tracking, randomness, or spurious interrupt + * detection. * - * Note: Like handle_simple_irq, the caller is expected to handle - * the ack, clear, mask and unmask issues if necessary. + * Note: Like handle_simple_irq, the caller is expected to handle the ack, + * clear, mask and unmask issues if necessary. */ void handle_untracked_irq(struct irq_desc *desc) { - raw_spin_lock(&desc->lock); - - if (!irq_may_run(desc)) - goto out_unlock; - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + scoped_guard(raw_spinlock, &desc->lock) { + if (!irq_can_handle(desc)) + return; - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - goto out_unlock; + desc->istate &= ~IRQS_PENDING; + irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); } - desc->istate &= ~IRQS_PENDING; - irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); - raw_spin_unlock(&desc->lock); - __handle_irq_event_percpu(desc); - raw_spin_lock(&desc->lock); - irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); - -out_unlock: - raw_spin_unlock(&desc->lock); + scoped_guard(raw_spinlock, &desc->lock) + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); } EXPORT_SYMBOL_GPL(handle_untracked_irq); @@ -617,40 +608,26 @@ static void cond_unmask_irq(struct irq_desc *desc) } /** - * handle_level_irq - Level type irq handler - * @desc: the interrupt description structure for this irq + * handle_level_irq - Level type irq handler + * @desc: the interrupt description structure for this irq * - * Level type interrupts are active as long as the hardware line has - * the active level. This may require to mask the interrupt and unmask - * it after the associated handler has acknowledged the device, so the - * interrupt line is back to inactive. + * Level type interrupts are active as long as the hardware line has the + * active level. This may require to mask the interrupt and unmask it after + * the associated handler has acknowledged the device, so the interrupt + * line is back to inactive. */ void handle_level_irq(struct irq_desc *desc) { - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); mask_ack_irq(desc); - if (!irq_may_run(desc)) - goto out_unlock; - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - /* - * If its disabled or no action available - * keep it masked and get out of here - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - goto out_unlock; - } + if (!irq_can_handle(desc)) + return; kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); cond_unmask_irq(desc); - -out_unlock: - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_level_irq); @@ -675,42 +652,43 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) } } +static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data) +{ + if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) + chip->irq_eoi(data); +} + /** - * handle_fasteoi_irq - irq handler for transparent controllers - * @desc: the interrupt description structure for this irq + * handle_fasteoi_irq - irq handler for transparent controllers + * @desc: the interrupt description structure for this irq * - * Only a single callback will be issued to the chip: an ->eoi() - * call when the interrupt has been serviced. This enables support - * for modern forms of interrupt handlers, which handle the flow - * details in hardware, transparently. + * Only a single callback will be issued to the chip: an ->eoi() call when + * the interrupt has been serviced. This enables support for modern forms + * of interrupt handlers, which handle the flow details in hardware, + * transparently. */ void handle_fasteoi_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); /* * When an affinity change races with IRQ handling, the next interrupt * can arrive on the new CPU before the original CPU has completed * handling the previous one - it may need to be resent. */ - if (!irq_may_run(desc)) { + if (!irq_can_handle_pm(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data)) desc->istate |= IRQS_PENDING; - goto out; + cond_eoi_irq(chip, &desc->irq_data); + return; } - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - /* - * If its disabled or no action available - * then mask it and get out of here: - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; + if (!irq_can_handle_actions(desc)) { mask_irq(desc); - goto out; + cond_eoi_irq(chip, &desc->irq_data); + return; } kstat_incr_irqs_this_cpu(desc); @@ -726,13 +704,6 @@ void handle_fasteoi_irq(struct irq_desc *desc) */ if (unlikely(desc->istate & IRQS_PENDING)) check_irq_resend(desc, false); - - raw_spin_unlock(&desc->lock); - return; -out: - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) - chip->irq_eoi(&desc->irq_data); - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_irq); @@ -770,40 +741,27 @@ void handle_fasteoi_nmi(struct irq_desc *desc) EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); /** - * handle_edge_irq - edge type IRQ handler - * @desc: the interrupt description structure for this irq + * handle_edge_irq - edge type IRQ handler + * @desc: the interrupt description structure for this irq * - * Interrupt occurs on the falling and/or rising edge of a hardware - * signal. The occurrence is latched into the irq controller hardware - * and must be acked in order to be reenabled. After the ack another - * interrupt can happen on the same source even before the first one - * is handled by the associated event handler. If this happens it - * might be necessary to disable (mask) the interrupt depending on the - * controller hardware. This requires to reenable the interrupt inside - * of the loop which handles the interrupts which have arrived while - * the handler was running. If all pending interrupts are handled, the - * loop is left. + * Interrupt occurs on the falling and/or rising edge of a hardware + * signal. The occurrence is latched into the irq controller hardware and + * must be acked in order to be reenabled. After the ack another interrupt + * can happen on the same source even before the first one is handled by + * the associated event handler. If this happens it might be necessary to + * disable (mask) the interrupt depending on the controller hardware. This + * requires to reenable the interrupt inside of the loop which handles the + * interrupts which have arrived while the handler was running. If all + * pending interrupts are handled, the loop is left. */ void handle_edge_irq(struct irq_desc *desc) { - raw_spin_lock(&desc->lock); - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - if (!irq_may_run(desc)) { - desc->istate |= IRQS_PENDING; - mask_ack_irq(desc); - goto out_unlock; - } + guard(raw_spinlock)(&desc->lock); - /* - * If its disabled or no action available then mask it and get - * out of here. - */ - if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { + if (!irq_can_handle(desc)) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); - goto out_unlock; + return; } kstat_incr_irqs_this_cpu(desc); @@ -814,7 +772,7 @@ void handle_edge_irq(struct irq_desc *desc) do { if (unlikely(!desc->action)) { mask_irq(desc); - goto out_unlock; + return; } /* @@ -830,11 +788,7 @@ void handle_edge_irq(struct irq_desc *desc) handle_irq_event(desc); - } while ((desc->istate & IRQS_PENDING) && - !irqd_irq_disabled(&desc->irq_data)); - -out_unlock: - raw_spin_unlock(&desc->lock); + } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); } EXPORT_SYMBOL(handle_edge_irq); @@ -1007,35 +961,23 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, } } -void -__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, - const char *name) +void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + const char *name) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); - - if (!desc) - return; - - __irq_do_set_handler(desc, handle, is_chained, name); - irq_put_desc_busunlock(desc, flags); + scoped_irqdesc_get_and_lock(irq, 0) + __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); } EXPORT_SYMBOL_GPL(__irq_set_handler); -void -irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, - void *data) +void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, + void *data) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); - - if (!desc) - return; - - desc->irq_common_data.handler_data = data; - __irq_do_set_handler(desc, handle, 1, NULL); + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; - irq_put_desc_busunlock(desc, flags); + desc->irq_common_data.handler_data = data; + __irq_do_set_handler(desc, handle, 1, NULL); + } } EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); @@ -1050,38 +992,34 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { - unsigned long flags, trigger, tmp; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return; - - /* - * Warn when a driver sets the no autoenable flag on an already - * active interrupt. - */ - WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); - - irq_settings_clr_and_set(desc, clr, set); + scoped_irqdesc_get_and_lock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; + unsigned long trigger, tmp; + /* + * Warn when a driver sets the no autoenable flag on an already + * active interrupt. + */ + WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); - trigger = irqd_get_trigger_type(&desc->irq_data); + irq_settings_clr_and_set(desc, clr, set); - irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | - IRQD_TRIGGER_MASK | IRQD_LEVEL); - if (irq_settings_has_no_balance_set(desc)) - irqd_set(&desc->irq_data, IRQD_NO_BALANCING); - if (irq_settings_is_per_cpu(desc)) - irqd_set(&desc->irq_data, IRQD_PER_CPU); - if (irq_settings_is_level(desc)) - irqd_set(&desc->irq_data, IRQD_LEVEL); + trigger = irqd_get_trigger_type(&desc->irq_data); - tmp = irq_settings_get_trigger_mask(desc); - if (tmp != IRQ_TYPE_NONE) - trigger = tmp; + irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | + IRQD_TRIGGER_MASK | IRQD_LEVEL); + if (irq_settings_has_no_balance_set(desc)) + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); + if (irq_settings_is_per_cpu(desc)) + irqd_set(&desc->irq_data, IRQD_PER_CPU); + if (irq_settings_is_level(desc)) + irqd_set(&desc->irq_data, IRQD_LEVEL); - irqd_set(&desc->irq_data, trigger); + tmp = irq_settings_get_trigger_mask(desc); + if (tmp != IRQ_TYPE_NONE) + trigger = tmp; - irq_put_desc_unlock(desc, flags); + irqd_set(&desc->irq_data, trigger); + } } EXPORT_SYMBOL_GPL(irq_modify_status); @@ -1094,25 +1032,21 @@ EXPORT_SYMBOL_GPL(irq_modify_status); */ void irq_cpu_online(void) { - struct irq_desc *desc; - struct irq_chip *chip; - unsigned long flags; unsigned int irq; for_each_active_irq(irq) { - desc = irq_to_desc(irq); + struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip; + if (!desc) continue; - raw_spin_lock_irqsave(&desc->lock, flags); - + guard(raw_spinlock_irqsave)(&desc->lock); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_online && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_online(&desc->irq_data); - - raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -1124,25 +1058,21 @@ void irq_cpu_online(void) */ void irq_cpu_offline(void) { - struct irq_desc *desc; - struct irq_chip *chip; - unsigned long flags; unsigned int irq; for_each_active_irq(irq) { - desc = irq_to_desc(irq); + struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip; + if (!desc) continue; - raw_spin_lock_irqsave(&desc->lock, flags); - + guard(raw_spinlock_irqsave)(&desc->lock); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_offline && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_offline(&desc->irq_data); - - raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif @@ -1151,102 +1081,69 @@ void irq_cpu_offline(void) #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS /** - * handle_fasteoi_ack_irq - irq handler for edge hierarchy - * stacked on transparent controllers + * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on + * transparent controllers * - * @desc: the interrupt description structure for this irq + * @desc: the interrupt description structure for this irq * - * Like handle_fasteoi_irq(), but for use with hierarchy where - * the irq_chip also needs to have its ->irq_ack() function - * called. + * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip + * also needs to have its ->irq_ack() function called. */ void handle_fasteoi_ack_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; - raw_spin_lock(&desc->lock); - - if (!irq_may_run(desc)) - goto out; + guard(raw_spinlock)(&desc->lock); - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + if (!irq_can_handle_pm(desc)) { + cond_eoi_irq(chip, &desc->irq_data); + return; + } - /* - * If its disabled or no action available - * then mask it and get out of here: - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; + if (unlikely(!irq_can_handle_actions(desc))) { mask_irq(desc); - goto out; + cond_eoi_irq(chip, &desc->irq_data); + return; } kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT) mask_irq(desc); - /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); - - raw_spin_unlock(&desc->lock); - return; -out: - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) - chip->irq_eoi(&desc->irq_data); - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); /** - * handle_fasteoi_mask_irq - irq handler for level hierarchy - * stacked on transparent controllers + * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on + * transparent controllers * - * @desc: the interrupt description structure for this irq + * @desc: the interrupt description structure for this irq * - * Like handle_fasteoi_irq(), but for use with hierarchy where - * the irq_chip also needs to have its ->irq_mask_ack() function - * called. + * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip + * also needs to have its ->irq_mask_ack() function called. */ void handle_fasteoi_mask_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); mask_ack_irq(desc); - if (!irq_may_run(desc)) - goto out; - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - /* - * If its disabled or no action available - * then mask it and get out of here: - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - mask_irq(desc); - goto out; + if (!irq_can_handle(desc)) { + cond_eoi_irq(chip, &desc->irq_data); + return; } kstat_incr_irqs_this_cpu(desc); - if (desc->istate & IRQS_ONESHOT) - mask_irq(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); - - raw_spin_unlock(&desc->lock); - return; -out: - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) - chip->irq_eoi(&desc->irq_data); - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 15a7654eff68..f07529ae4895 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -177,9 +177,8 @@ void irq_migrate_all_off_this_cpu(void) bool affinity_broken; desc = irq_to_desc(irq); - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); + scoped_guard(raw_spinlock, &desc->lock) + affinity_broken = migrate_one_irq(desc); if (affinity_broken) { pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n", @@ -219,7 +218,7 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) return; if (irqd_is_managed_and_shutdown(data)) - irq_startup(desc, IRQ_RESEND, IRQ_START_COND); + irq_startup_managed(desc); /* * If the interrupt can only be directed to a single target @@ -244,9 +243,8 @@ int irq_affinity_online_cpu(unsigned int cpu) irq_lock_sparse(); for_each_active_irq(irq) { desc = irq_to_desc(irq); - raw_spin_lock_irq(&desc->lock); - irq_restore_affinity_of_irq(desc, cpu); - raw_spin_unlock_irq(&desc->lock); + scoped_guard(raw_spinlock_irq, &desc->lock) + irq_restore_affinity_of_irq(desc, cpu); } irq_unlock_sparse(); diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index ca142b9a4db3..3527defd2890 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -160,7 +160,7 @@ static int irq_debug_show(struct seq_file *m, void *p) struct irq_desc *desc = m->private; struct irq_data *data; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); data = irq_desc_get_irq_data(desc); seq_printf(m, "handler: %ps\n", desc->handle_irq); seq_printf(m, "device: %s\n", desc->dev_name); @@ -178,7 +178,6 @@ static int irq_debug_show(struct seq_file *m, void *p) seq_printf(m, "node: %d\n", irq_data_get_node(data)); irq_debug_show_masks(m, desc); irq_debug_show_data(m, data, 0); - raw_spin_unlock_irq(&desc->lock); return 0; } @@ -226,12 +225,12 @@ void irq_debugfs_copy_devname(int irq, struct device *dev) void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) { - char name [10]; + char name [12]; if (!irq_dir || !desc || desc->debugfs_file) return; - sprintf(name, "%d", irq); + sprintf(name, "%u", irq); desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc, &dfs_irq_ops); } diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index c4a8bca5f2b0..bf59e37d650a 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -40,10 +40,9 @@ void irq_gc_mask_disable_reg(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.disable); *ct->mask_cache &= ~mask; - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg); @@ -60,10 +59,9 @@ void irq_gc_mask_set_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); *ct->mask_cache |= mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit); @@ -80,10 +78,9 @@ void irq_gc_mask_clr_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); *ct->mask_cache &= ~mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit); @@ -100,10 +97,9 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.enable); *ct->mask_cache |= mask; - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg); @@ -117,9 +113,8 @@ void irq_gc_ack_set_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit); @@ -133,9 +128,8 @@ void irq_gc_ack_clr_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = ~d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); } /** @@ -156,11 +150,10 @@ void irq_gc_mask_disable_and_ack_set(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.disable); *ct->mask_cache &= ~mask; irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_disable_and_ack_set); @@ -174,9 +167,8 @@ void irq_gc_eoi(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.eoi); - irq_gc_unlock(gc); } /** @@ -196,12 +188,11 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on) if (!(mask & gc->wake_enabled)) return -EINVAL; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); if (on) gc->wake_active |= mask; else gc->wake_active &= ~mask; - irq_gc_unlock(gc); return 0; } EXPORT_SYMBOL_GPL(irq_gc_set_wake); @@ -288,7 +279,6 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d, { struct irq_domain_chip_generic *dgc; struct irq_chip_generic *gc; - unsigned long flags; int numchips, i; size_t dgc_sz; size_t gc_sz; @@ -340,9 +330,8 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d, goto err; } - raw_spin_lock_irqsave(&gc_lock, flags); - list_add_tail(&gc->list, &gc_list); - raw_spin_unlock_irqrestore(&gc_lock, flags); + scoped_guard (raw_spinlock_irqsave, &gc_lock) + list_add_tail(&gc->list, &gc_list); /* Calc pointer to the next generic chip */ tmp += gc_sz; } @@ -459,7 +448,6 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, struct irq_chip_generic *gc; struct irq_chip_type *ct; struct irq_chip *chip; - unsigned long flags; int idx; gc = __irq_get_domain_generic_chip(d, hw_irq); @@ -479,9 +467,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, /* We only init the cache for the first mapping of a generic chip */ if (!gc->installed) { - raw_spin_lock_irqsave(&gc->lock, flags); + guard(raw_spinlock_irqsave)(&gc->lock); irq_gc_init_mask_cache(gc, dgc->gc_flags); - raw_spin_unlock_irqrestore(&gc->lock, flags); } /* Mark the interrupt as installed */ @@ -548,9 +535,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, struct irq_chip *chip = &ct->chip; unsigned int i; - raw_spin_lock(&gc_lock); - list_add_tail(&gc->list, &gc_list); - raw_spin_unlock(&gc_lock); + scoped_guard (raw_spinlock, &gc_lock) + list_add_tail(&gc->list, &gc_list); irq_gc_init_mask_cache(gc, flags); @@ -616,9 +602,8 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, { unsigned int i, virq; - raw_spin_lock(&gc_lock); - list_del(&gc->list); - raw_spin_unlock(&gc_lock); + scoped_guard (raw_spinlock, &gc_lock) + list_del(&gc->list); for (i = 0; msk; msk >>= 1, i++) { if (!(msk & 0x01)) diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index b0290849c395..aebfe225c9a6 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -87,6 +87,7 @@ extern void __enable_irq(struct irq_desc *desc); extern int irq_activate(struct irq_desc *desc); extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); extern int irq_startup(struct irq_desc *desc, bool resend, bool force); +extern void irq_startup_managed(struct irq_desc *desc); extern void irq_shutdown(struct irq_desc *desc); extern void irq_shutdown_and_deactivate(struct irq_desc *desc); @@ -141,6 +142,10 @@ extern int irq_setup_affinity(struct irq_desc *desc); static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } #endif + +#define for_each_action_of_desc(desc, act) \ + for (act = desc->action; act; act = act->next) + /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(struct irq_desc *desc) { @@ -160,38 +165,33 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) -#define for_each_action_of_desc(desc, act) \ - for (act = desc->action; act; act = act->next) - -struct irq_desc * -__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, - unsigned int check); +struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, + unsigned int check); void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); -static inline struct irq_desc * -irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) -{ - return __irq_get_desc_lock(irq, flags, true, check); -} +__DEFINE_CLASS_IS_CONDITIONAL(irqdesc_lock, true); +__DEFINE_UNLOCK_GUARD(irqdesc_lock, struct irq_desc, + __irq_put_desc_unlock(_T->lock, _T->flags, _T->bus), + unsigned long flags; bool bus); -static inline void -irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) +static inline class_irqdesc_lock_t class_irqdesc_lock_constructor(unsigned int irq, bool bus, + unsigned int check) { - __irq_put_desc_unlock(desc, flags, true); -} + class_irqdesc_lock_t _t = { .bus = bus, }; -static inline struct irq_desc * -irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) -{ - return __irq_get_desc_lock(irq, flags, false, check); -} + _t.lock = __irq_get_desc_lock(irq, &_t.flags, bus, check); -static inline void -irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) -{ - __irq_put_desc_unlock(desc, flags, false); + return _t; } +#define scoped_irqdesc_get_and_lock(_irq, _check) \ + scoped_guard(irqdesc_lock, _irq, false, _check) + +#define scoped_irqdesc_get_and_buslock(_irq, _check) \ + scoped_guard(irqdesc_lock, _irq, true, _check) + +#define scoped_irqdesc ((struct irq_desc *)(__guard_ptr(irqdesc_lock)(&scope))) + #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) static inline unsigned int irqd_get(struct irq_data *d) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4258cd6bd3b4..b64c57b44c20 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -246,8 +246,7 @@ static struct kobject *irq_kobj_base; #define IRQ_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) -static ssize_t per_cpu_count_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; @@ -257,112 +256,83 @@ static ssize_t per_cpu_count_show(struct kobject *kobj, for_each_possible_cpu(cpu) { unsigned int c = irq_desc_kstat_cpu(desc, cpu); - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); + ret += sysfs_emit_at(buf, ret, "%s%u", p, c); p = ","; } - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + ret += sysfs_emit_at(buf, ret, "\n"); return ret; } IRQ_ATTR_RO(per_cpu_count); -static ssize_t chip_name_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - - raw_spin_lock_irq(&desc->lock); - if (desc->irq_data.chip && desc->irq_data.chip->name) { - ret = scnprintf(buf, PAGE_SIZE, "%s\n", - desc->irq_data.chip->name); - } - raw_spin_unlock_irq(&desc->lock); - return ret; + guard(raw_spinlock_irq)(&desc->lock); + if (desc->irq_data.chip && desc->irq_data.chip->name) + return sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); + return 0; } IRQ_ATTR_RO(chip_name); -static ssize_t hwirq_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (desc->irq_data.domain) - ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); - raw_spin_unlock_irq(&desc->lock); - - return ret; + return sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); + return 0; } IRQ_ATTR_RO(hwirq); -static ssize_t type_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - raw_spin_lock_irq(&desc->lock); - ret = sprintf(buf, "%s\n", - irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); - raw_spin_unlock_irq(&desc->lock); - - return ret; + guard(raw_spinlock_irq)(&desc->lock); + return sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); } IRQ_ATTR_RO(type); -static ssize_t wakeup_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - - raw_spin_lock_irq(&desc->lock); - ret = sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); - raw_spin_unlock_irq(&desc->lock); - - return ret; + guard(raw_spinlock_irq)(&desc->lock); + return sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); } IRQ_ATTR_RO(wakeup); -static ssize_t name_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (desc->name) - ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); - raw_spin_unlock_irq(&desc->lock); - - return ret; + return sysfs_emit(buf, "%s\n", desc->name); + return 0; } IRQ_ATTR_RO(name); -static ssize_t actions_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); struct irqaction *action; ssize_t ret = 0; char *p = ""; - raw_spin_lock_irq(&desc->lock); - for_each_action_of_desc(desc, action) { - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", - p, action->name); - p = ","; + scoped_guard(raw_spinlock_irq, &desc->lock) { + for_each_action_of_desc(desc, action) { + ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); + p = ","; + } } - raw_spin_unlock_irq(&desc->lock); if (ret) - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); - + ret += sysfs_emit_at(buf, ret, "\n"); return ret; } IRQ_ATTR_RO(actions); @@ -418,19 +388,14 @@ static int __init irq_sysfs_init(void) int irq; /* Prevent concurrent irq alloc/free */ - irq_lock_sparse(); - + guard(mutex)(&sparse_irq_lock); irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); - if (!irq_kobj_base) { - irq_unlock_sparse(); + if (!irq_kobj_base) return -ENOMEM; - } /* Add the already allocated interrupts */ for_each_irq_desc(irq, desc) irq_sysfs_add(irq, desc); - irq_unlock_sparse(); - return 0; } postcore_initcall(irq_sysfs_init); @@ -573,12 +538,12 @@ err: return -ENOMEM; } -static int irq_expand_nr_irqs(unsigned int nr) +static bool irq_expand_nr_irqs(unsigned int nr) { if (nr > MAX_SPARSE_IRQS) - return -ENOMEM; + return false; nr_irqs = nr; - return 0; + return true; } int __init early_irq_init(void) @@ -656,11 +621,9 @@ EXPORT_SYMBOL(irq_to_desc); static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - raw_spin_lock_irqsave(&desc->lock, flags); - desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) + desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); delete_irq_desc(irq); } @@ -679,16 +642,15 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, return start; } -static int irq_expand_nr_irqs(unsigned int nr) +static inline bool irq_expand_nr_irqs(unsigned int nr) { - return -ENOMEM; + return false; } void irq_mark_irq(unsigned int irq) { - mutex_lock(&sparse_irq_lock); + guard(mutex)(&sparse_irq_lock); irq_insert_desc(irq, irq_desc + irq); - mutex_unlock(&sparse_irq_lock); } #ifdef CONFIG_GENERIC_IRQ_LEGACY @@ -827,11 +789,9 @@ void irq_free_descs(unsigned int from, unsigned int cnt) if (from >= nr_irqs || (from + cnt) > nr_irqs) return; - mutex_lock(&sparse_irq_lock); + guard(mutex)(&sparse_irq_lock); for (i = 0; i < cnt; i++) free_desc(from + i); - - mutex_unlock(&sparse_irq_lock); } EXPORT_SYMBOL_GPL(irq_free_descs); @@ -848,11 +808,10 @@ EXPORT_SYMBOL_GPL(irq_free_descs); * * Returns the first irq number or error code */ -int __ref -__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner, const struct irq_affinity_desc *affinity) +int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, + struct module *owner, const struct irq_affinity_desc *affinity) { - int start, ret; + int start; if (!cnt) return -EINVAL; @@ -870,22 +829,17 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, from = arch_dynirq_lower_bound(from); } - mutex_lock(&sparse_irq_lock); + guard(mutex)(&sparse_irq_lock); start = irq_find_free_area(from, cnt); - ret = -EEXIST; if (irq >=0 && start != irq) - goto unlock; + return -EEXIST; if (start + cnt > nr_irqs) { - ret = irq_expand_nr_irqs(start + cnt); - if (ret) - goto unlock; + if (!irq_expand_nr_irqs(start + cnt)) + return -ENOMEM; } - ret = alloc_descs(start, cnt, node, affinity, owner); -unlock: - mutex_unlock(&sparse_irq_lock); - return ret; + return alloc_descs(start, cnt, node, affinity, owner); } EXPORT_SYMBOL_GPL(__irq_alloc_descs); @@ -900,27 +854,27 @@ unsigned int irq_get_next_irq(unsigned int offset) return irq_find_at_or_after(offset); } -struct irq_desc * -__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, - unsigned int check) +struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, + unsigned int check) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc; - if (desc) { - if (check & _IRQ_DESC_CHECK) { - if ((check & _IRQ_DESC_PERCPU) && - !irq_settings_is_per_cpu_devid(desc)) - return NULL; - - if (!(check & _IRQ_DESC_PERCPU) && - irq_settings_is_per_cpu_devid(desc)) - return NULL; - } + desc = irq_to_desc(irq); + if (!desc) + return NULL; + + if (check & _IRQ_DESC_CHECK) { + if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) + return NULL; - if (bus) - chip_bus_lock(desc); - raw_spin_lock_irqsave(&desc->lock, *flags); + if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) + return NULL; } + + if (bus) + chip_bus_lock(desc); + raw_spin_lock_irqsave(&desc->lock, *flags); + return desc; } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 9d5c8651492d..c8b6de09047b 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -480,33 +480,6 @@ struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(irq_domain_create_simple); -/** - * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. - * @of_node: pointer to interrupt controller's device tree node. - * @size: total number of irqs in legacy mapping - * @first_irq: first number of irq block assigned to the domain - * @first_hwirq: first hwirq number to use for the translation. Should normally - * be '0', but a positive integer can be used if the effective - * hwirqs numbering does not begin at zero. - * @ops: map/unmap domain callbacks - * @host_data: Controller private data pointer - * - * Note: the map() callback will be called before this function returns - * for all legacy interrupts except 0 (which is always the invalid irq for - * a legacy controller). - */ -struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - irq_hw_number_t first_hwirq, - const struct irq_domain_ops *ops, - void *host_data) -{ - return irq_domain_create_legacy(of_node_to_fwnode(of_node), size, - first_irq, first_hwirq, ops, host_data); -} -EXPORT_SYMBOL_GPL(irq_domain_add_legacy); - struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, @@ -885,7 +858,7 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, { int i; - fwspec->fwnode = of_node_to_fwnode(np); + fwspec->fwnode = of_fwnode_handle(np); fwspec->param_count = count; for (i = 0; i < count; i++) @@ -1133,6 +1106,31 @@ int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); /** + * irq_domain_xlate_twothreecell() - Generic xlate for direct two or three cell bindings + * @d: Interrupt domain involved in the translation + * @ctrlr: The device tree node for the device whose interrupt is translated + * @intspec: The interrupt specifier data from the device tree + * @intsize: The number of entries in @intspec + * @out_hwirq: Pointer to storage for the hardware interrupt number + * @out_type: Pointer to storage for the interrupt type + * + * Device Tree interrupt specifier translation function for two or three + * cell bindings, where the cell values map directly to the hardware + * interrupt number and the type specifier. + */ +int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type) +{ + struct irq_fwspec fwspec; + + of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec); + + return irq_domain_translate_twothreecell(d, &fwspec, out_hwirq, out_type); +} +EXPORT_SYMBOL_GPL(irq_domain_xlate_twothreecell); + +/** * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated @@ -1216,6 +1214,37 @@ int irq_domain_translate_twocell(struct irq_domain *d, } EXPORT_SYMBOL_GPL(irq_domain_translate_twocell); +/** + * irq_domain_translate_twothreecell() - Generic translate for direct two or three cell + * bindings + * @d: Interrupt domain involved in the translation + * @fwspec: The firmware interrupt specifier to translate + * @out_hwirq: Pointer to storage for the hardware interrupt number + * @out_type: Pointer to storage for the interrupt type + * + * Firmware interrupt specifier translation function for two or three cell + * specifications, where the parameter values map directly to the hardware + * interrupt number and the type specifier. + */ +int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (fwspec->param_count == 2) { + *out_hwirq = fwspec->param[0]; + *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + return 0; + } + + if (fwspec->param_count == 3) { + *out_hwirq = fwspec->param[1]; + *out_type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(irq_domain_translate_twothreecell); + int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity) { @@ -1252,47 +1281,6 @@ void irq_domain_reset_irq_data(struct irq_data *irq_data) EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -/** - * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy - * @parent: Parent irq domain to associate with the new domain - * @flags: Irq domain flags associated to the domain - * @size: Size of the domain. See below - * @fwnode: Optional fwnode of the interrupt controller - * @ops: Pointer to the interrupt domain callbacks - * @host_data: Controller private data pointer - * - * If @size is 0 a tree domain is created, otherwise a linear domain. - * - * If successful the parent is associated to the new domain and the - * domain flags are set. - * Returns pointer to IRQ domain, or NULL on failure. - */ -struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, - unsigned int flags, - unsigned int size, - struct fwnode_handle *fwnode, - const struct irq_domain_ops *ops, - void *host_data) -{ - struct irq_domain_info info = { - .fwnode = fwnode, - .size = size, - .hwirq_max = size, - .ops = ops, - .host_data = host_data, - .domain_flags = flags, - .parent = parent, - }; - struct irq_domain *d; - - if (!info.size) - info.hwirq_max = ~0U; - - d = irq_domain_instantiate(&info); - return IS_ERR(d) ? NULL : d; -} -EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy); - static void irq_domain_insert_irq(int virq) { struct irq_data *data; @@ -2008,7 +1996,7 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain) domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; } #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -/** +/* * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain * @domain: domain to match * @virq: IRQ number to get irq_data @@ -2022,7 +2010,7 @@ struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, } EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); -/** +/* * irq_domain_set_info - Set the complete data for a @virq in @domain * @domain: Interrupt domain to match * @virq: IRQ number diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 753eef8e041c..c94837382037 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -43,8 +43,6 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) bool inprogress; do { - unsigned long flags; - /* * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. @@ -53,7 +51,7 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); inprogress = irqd_irq_inprogress(&desc->irq_data); /* @@ -69,33 +67,30 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, &inprogress); } - raw_spin_unlock_irqrestore(&desc->lock, flags); - /* Oops, that failed? */ } while (inprogress); } /** - * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) - * @irq: interrupt number to wait for + * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) + * @irq: interrupt number to wait for * - * This function waits for any pending hard IRQ handlers for this - * interrupt to complete before returning. If you use this - * function while holding a resource the IRQ handler may need you - * will deadlock. It does not take associated threaded handlers - * into account. + * This function waits for any pending hard IRQ handlers for this interrupt + * to complete before returning. If you use this function while holding a + * resource the IRQ handler may need you will deadlock. It does not take + * associated threaded handlers into account. * - * Do not use this for shutdown scenarios where you must be sure - * that all parts (hardirq and threaded handler) have completed. + * Do not use this for shutdown scenarios where you must be sure that all + * parts (hardirq and threaded handler) have completed. * - * Returns: false if a threaded handler is active. + * Returns: false if a threaded handler is active. * - * This function may be called - with care - from IRQ context. + * This function may be called - with care - from IRQ context. * - * It does not check whether there is an interrupt in flight at the - * hardware level, but not serviced yet, as this might deadlock when - * called with interrupts disabled and the target CPU of the interrupt - * is the current CPU. + * It does not check whether there is an interrupt in flight at the + * hardware level, but not serviced yet, as this might deadlock when called + * with interrupts disabled and the target CPU of the interrupt is the + * current CPU. */ bool synchronize_hardirq(unsigned int irq) { @@ -121,19 +116,19 @@ static void __synchronize_irq(struct irq_desc *desc) } /** - * synchronize_irq - wait for pending IRQ handlers (on other CPUs) - * @irq: interrupt number to wait for + * synchronize_irq - wait for pending IRQ handlers (on other CPUs) + * @irq: interrupt number to wait for * - * This function waits for any pending IRQ handlers for this interrupt - * to complete before returning. If you use this function while - * holding a resource the IRQ handler may need you will deadlock. + * This function waits for any pending IRQ handlers for this interrupt to + * complete before returning. If you use this function while holding a + * resource the IRQ handler may need you will deadlock. * - * Can only be called from preemptible code as it might sleep when - * an interrupt thread is associated to @irq. + * Can only be called from preemptible code as it might sleep when + * an interrupt thread is associated to @irq. * - * It optionally makes sure (when the irq chip supports that method) - * that the interrupt is not pending in any CPU and waiting for - * service. + * It optionally makes sure (when the irq chip supports that method) + * that the interrupt is not pending in any CPU and waiting for + * service. */ void synchronize_irq(unsigned int irq) { @@ -156,8 +151,8 @@ static bool __irq_can_set_affinity(struct irq_desc *desc) } /** - * irq_can_set_affinity - Check if the affinity of a given irq can be set - * @irq: Interrupt to check + * irq_can_set_affinity - Check if the affinity of a given irq can be set + * @irq: Interrupt to check * */ int irq_can_set_affinity(unsigned int irq) @@ -181,13 +176,13 @@ bool irq_can_set_affinity_usr(unsigned int irq) } /** - * irq_set_thread_affinity - Notify irq threads to adjust affinity - * @desc: irq descriptor which has affinity changed + * irq_set_thread_affinity - Notify irq threads to adjust affinity + * @desc: irq descriptor which has affinity changed * - * We just set IRQTF_AFFINITY and delegate the affinity setting - * to the interrupt thread itself. We can not call - * set_cpus_allowed_ptr() here as we hold desc->lock and this - * code can be called from hard interrupt context. + * Just set IRQTF_AFFINITY and delegate the affinity setting to the + * interrupt thread itself. We can not call set_cpus_allowed_ptr() here as + * we hold desc->lock and this code can be called from hard interrupt + * context. */ static void irq_set_thread_affinity(struct irq_desc *desc) { @@ -400,14 +395,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, * an interrupt which is already started or which has already been configured * as managed will also fail, as these mean invalid init state or double init. */ -int irq_update_affinity_desc(unsigned int irq, - struct irq_affinity_desc *affinity) +int irq_update_affinity_desc(unsigned int irq, struct irq_affinity_desc *affinity) { - struct irq_desc *desc; - unsigned long flags; - bool activated; - int ret = 0; - /* * Supporting this with the reservation scheme used by x86 needs * some more thought. Fail it for now. @@ -415,60 +404,50 @@ int irq_update_affinity_desc(unsigned int irq, if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) return -EOPNOTSUPP; - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return -EINVAL; - - /* Requires the interrupt to be shut down */ - if (irqd_is_started(&desc->irq_data)) { - ret = -EBUSY; - goto out_unlock; - } + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; + bool activated; - /* Interrupts which are already managed cannot be modified */ - if (irqd_affinity_is_managed(&desc->irq_data)) { - ret = -EBUSY; - goto out_unlock; - } - - /* - * Deactivate the interrupt. That's required to undo - * anything an earlier activation has established. - */ - activated = irqd_is_activated(&desc->irq_data); - if (activated) - irq_domain_deactivate_irq(&desc->irq_data); + /* Requires the interrupt to be shut down */ + if (irqd_is_started(&desc->irq_data)) + return -EBUSY; - if (affinity->is_managed) { - irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); - irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); - } + /* Interrupts which are already managed cannot be modified */ + if (irqd_affinity_is_managed(&desc->irq_data)) + return -EBUSY; + /* + * Deactivate the interrupt. That's required to undo + * anything an earlier activation has established. + */ + activated = irqd_is_activated(&desc->irq_data); + if (activated) + irq_domain_deactivate_irq(&desc->irq_data); - cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); + if (affinity->is_managed) { + irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); + irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); + } - /* Restore the activation state */ - if (activated) - irq_domain_activate_irq(&desc->irq_data, false); + cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); -out_unlock: - irq_put_desc_busunlock(desc, flags); - return ret; + /* Restore the activation state */ + if (activated) + irq_domain_activate_irq(&desc->irq_data, false); + return 0; + } + return -EINVAL; } static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - int ret; if (!desc) return -EINVAL; - raw_spin_lock_irqsave(&desc->lock, flags); - ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); - raw_spin_unlock_irqrestore(&desc->lock, flags); - return ret; + guard(raw_spinlock_irqsave)(&desc->lock); + return irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); } /** @@ -501,39 +480,36 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) } EXPORT_SYMBOL_GPL(irq_force_affinity); -int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, - bool setaffinity) +int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, bool setaffinity) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); + int ret = -EINVAL; - if (!desc) - return -EINVAL; - desc->affinity_hint = m; - irq_put_desc_unlock(desc, flags); - if (m && setaffinity) + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + scoped_irqdesc->affinity_hint = m; + ret = 0; + } + + if (!ret && m && setaffinity) __irq_set_affinity(irq, m, false); - return 0; + return ret; } EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint); static void irq_affinity_notify(struct work_struct *work) { - struct irq_affinity_notify *notify = - container_of(work, struct irq_affinity_notify, work); + struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; - unsigned long flags; if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) goto out; - raw_spin_lock_irqsave(&desc->lock, flags); - if (irq_move_pending(&desc->irq_data)) - irq_get_pending(cpumask, desc); - else - cpumask_copy(cpumask, desc->irq_common_data.affinity); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + if (irq_move_pending(&desc->irq_data)) + irq_get_pending(cpumask, desc); + else + cpumask_copy(cpumask, desc->irq_common_data.affinity); + } notify->notify(notify, cpumask); @@ -543,22 +519,20 @@ out: } /** - * irq_set_affinity_notifier - control notification of IRQ affinity changes - * @irq: Interrupt for which to enable/disable notification - * @notify: Context for notification, or %NULL to disable - * notification. Function pointers must be initialised; - * the other fields will be initialised by this function. - * - * Must be called in process context. Notification may only be enabled - * after the IRQ is allocated and must be disabled before the IRQ is - * freed using free_irq(). + * irq_set_affinity_notifier - control notification of IRQ affinity changes + * @irq: Interrupt for which to enable/disable notification + * @notify: Context for notification, or %NULL to disable + * notification. Function pointers must be initialised; + * the other fields will be initialised by this function. + * + * Must be called in process context. Notification may only be enabled + * after the IRQ is allocated and must be disabled before the IRQ is freed + * using free_irq(). */ -int -irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { struct irq_desc *desc = irq_to_desc(irq); struct irq_affinity_notify *old_notify; - unsigned long flags; /* The release function is promised process context */ might_sleep(); @@ -573,10 +547,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) INIT_WORK(¬ify->work, irq_affinity_notify); } - raw_spin_lock_irqsave(&desc->lock, flags); - old_notify = desc->affinity_notify; - desc->affinity_notify = notify; - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + old_notify = desc->affinity_notify; + desc->affinity_notify = notify; + } if (old_notify) { if (cancel_work_sync(&old_notify->work)) { @@ -597,7 +571,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); int irq_setup_affinity(struct irq_desc *desc) { struct cpumask *set = irq_default_affinity; - int ret, node = irq_desc_get_node(desc); + int node = irq_desc_get_node(desc); + static DEFINE_RAW_SPINLOCK(mask_lock); static struct cpumask mask; @@ -605,7 +580,7 @@ int irq_setup_affinity(struct irq_desc *desc) if (!__irq_can_set_affinity(desc)) return 0; - raw_spin_lock(&mask_lock); + guard(raw_spinlock)(&mask_lock); /* * Preserve the managed affinity setting and a userspace affinity * setup, but make sure that one of the targets is online. @@ -630,9 +605,7 @@ int irq_setup_affinity(struct irq_desc *desc) if (cpumask_intersects(&mask, nodemask)) cpumask_and(&mask, &mask, nodemask); } - ret = irq_do_set_affinity(&desc->irq_data, &mask, false); - raw_spin_unlock(&mask_lock); - return ret; + return irq_do_set_affinity(&desc->irq_data, &mask, false); } #else /* Wrapper for ALPHA specific affinity selector magic */ @@ -645,44 +618,36 @@ int irq_setup_affinity(struct irq_desc *desc) /** - * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt - * @irq: interrupt number to set affinity - * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU - * specific data for percpu_devid interrupts - * - * This function uses the vCPU specific data to set the vCPU - * affinity for an irq. The vCPU specific data is passed from - * outside, such as KVM. One example code path is as below: - * KVM -> IOMMU -> irq_set_vcpu_affinity(). + * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt + * @irq: interrupt number to set affinity + * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU + * specific data for percpu_devid interrupts + * + * This function uses the vCPU specific data to set the vCPU affinity for + * an irq. The vCPU specific data is passed from outside, such as KVM. One + * example code path is as below: KVM -> IOMMU -> irq_set_vcpu_affinity(). */ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - struct irq_data *data; - struct irq_chip *chip; - int ret = -ENOSYS; + scoped_irqdesc_get_and_lock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; + struct irq_data *data; + struct irq_chip *chip; - if (!desc) - return -EINVAL; - - data = irq_desc_get_irq_data(desc); - do { - chip = irq_data_get_irq_chip(data); - if (chip && chip->irq_set_vcpu_affinity) - break; -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - data = data->parent_data; -#else - data = NULL; -#endif - } while (data); + data = irq_desc_get_irq_data(desc); + do { + chip = irq_data_get_irq_chip(data); + if (chip && chip->irq_set_vcpu_affinity) + break; - if (data) - ret = chip->irq_set_vcpu_affinity(data, vcpu_info); - irq_put_desc_unlock(desc, flags); + data = irqd_get_parent_data(data); + } while (data); - return ret; + if (!data) + return -ENOSYS; + return chip->irq_set_vcpu_affinity(data, vcpu_info); + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); @@ -694,26 +659,23 @@ void __disable_irq(struct irq_desc *desc) static int __disable_irq_nosync(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - - if (!desc) - return -EINVAL; - __disable_irq(desc); - irq_put_desc_busunlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + __disable_irq(scoped_irqdesc); + return 0; + } + return -EINVAL; } /** - * disable_irq_nosync - disable an irq without waiting - * @irq: Interrupt to disable + * disable_irq_nosync - disable an irq without waiting + * @irq: Interrupt to disable * - * Disable the selected interrupt line. Disables and Enables are - * nested. - * Unlike disable_irq(), this function does not ensure existing - * instances of the IRQ handler have completed before returning. + * Disable the selected interrupt line. Disables and Enables are + * nested. + * Unlike disable_irq(), this function does not ensure existing + * instances of the IRQ handler have completed before returning. * - * This function may be called from IRQ context. + * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq) { @@ -722,17 +684,17 @@ void disable_irq_nosync(unsigned int irq) EXPORT_SYMBOL(disable_irq_nosync); /** - * disable_irq - disable an irq and wait for completion - * @irq: Interrupt to disable + * disable_irq - disable an irq and wait for completion + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Enables and Disables are nested. * - * Disable the selected interrupt line. Enables and Disables are - * nested. - * This function waits for any pending IRQ handlers for this interrupt - * to complete before returning. If you use this function while - * holding a resource the IRQ handler may need you will deadlock. + * This function waits for any pending IRQ handlers for this interrupt to + * complete before returning. If you use this function while holding a + * resource the IRQ handler may need you will deadlock. * - * Can only be called from preemptible code as it might sleep when - * an interrupt thread is associated to @irq. + * Can only be called from preemptible code as it might sleep when an + * interrupt thread is associated to @irq. * */ void disable_irq(unsigned int irq) @@ -744,40 +706,39 @@ void disable_irq(unsigned int irq) EXPORT_SYMBOL(disable_irq); /** - * disable_hardirq - disables an irq and waits for hardirq completion - * @irq: Interrupt to disable + * disable_hardirq - disables an irq and waits for hardirq completion + * @irq: Interrupt to disable * - * Disable the selected interrupt line. Enables and Disables are - * nested. - * This function waits for any pending hard IRQ handlers for this - * interrupt to complete before returning. If you use this function while - * holding a resource the hard IRQ handler may need you will deadlock. + * Disable the selected interrupt line. Enables and Disables are nested. * - * When used to optimistically disable an interrupt from atomic context - * the return value must be checked. + * This function waits for any pending hard IRQ handlers for this interrupt + * to complete before returning. If you use this function while holding a + * resource the hard IRQ handler may need you will deadlock. * - * Returns: false if a threaded handler is active. + * When used to optimistically disable an interrupt from atomic context the + * return value must be checked. * - * This function may be called - with care - from IRQ context. + * Returns: false if a threaded handler is active. + * + * This function may be called - with care - from IRQ context. */ bool disable_hardirq(unsigned int irq) { if (!__disable_irq_nosync(irq)) return synchronize_hardirq(irq); - return false; } EXPORT_SYMBOL_GPL(disable_hardirq); /** - * disable_nmi_nosync - disable an nmi without waiting - * @irq: Interrupt to disable - * - * Disable the selected interrupt line. Disables and enables are - * nested. - * The interrupt to disable must have been requested through request_nmi. - * Unlike disable_nmi(), this function does not ensure existing - * instances of the IRQ handler have completed before returning. + * disable_nmi_nosync - disable an nmi without waiting + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Disables and enables are nested. + * + * The interrupt to disable must have been requested through request_nmi. + * Unlike disable_nmi(), this function does not ensure existing + * instances of the IRQ handler have completed before returning. */ void disable_nmi_nosync(unsigned int irq) { @@ -817,41 +778,34 @@ void __enable_irq(struct irq_desc *desc) } /** - * enable_irq - enable handling of an irq - * @irq: Interrupt to enable + * enable_irq - enable handling of an irq + * @irq: Interrupt to enable * - * Undoes the effect of one call to disable_irq(). If this - * matches the last disable, processing of interrupts on this - * IRQ line is re-enabled. + * Undoes the effect of one call to disable_irq(). If this matches the + * last disable, processing of interrupts on this IRQ line is re-enabled. * - * This function may be called from IRQ context only when - * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! + * This function may be called from IRQ context only when + * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return; - if (WARN(!desc->irq_data.chip, - KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) - goto out; - - __enable_irq(desc); -out: - irq_put_desc_busunlock(desc, flags); + if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) + return; + __enable_irq(desc); + } } EXPORT_SYMBOL(enable_irq); /** - * enable_nmi - enable handling of an nmi - * @irq: Interrupt to enable + * enable_nmi - enable handling of an nmi + * @irq: Interrupt to enable * - * The interrupt to enable must have been requested through request_nmi. - * Undoes the effect of one call to disable_nmi(). If this - * matches the last disable, processing of interrupts on this - * IRQ line is re-enabled. + * The interrupt to enable must have been requested through request_nmi. + * Undoes the effect of one call to disable_nmi(). If this matches the last + * disable, processing of interrupts on this IRQ line is re-enabled. */ void enable_nmi(unsigned int irq) { @@ -873,65 +827,59 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) } /** - * irq_set_irq_wake - control irq power management wakeup - * @irq: interrupt to control - * @on: enable/disable power management wakeup - * - * Enable/disable power management wakeup mode, which is - * disabled by default. Enables and disables must match, - * just as they match for non-wakeup mode support. - * - * Wakeup mode lets this IRQ wake the system from sleep - * states like "suspend to RAM". - * - * Note: irq enable/disable state is completely orthogonal - * to the enable/disable state of irq wake. An irq can be - * disabled with disable_irq() and still wake the system as - * long as the irq has wake enabled. If this does not hold, - * then the underlying irq chip and the related driver need - * to be investigated. + * irq_set_irq_wake - control irq power management wakeup + * @irq: interrupt to control + * @on: enable/disable power management wakeup + * + * Enable/disable power management wakeup mode, which is disabled by + * default. Enables and disables must match, just as they match for + * non-wakeup mode support. + * + * Wakeup mode lets this IRQ wake the system from sleep states like + * "suspend to RAM". + * + * Note: irq enable/disable state is completely orthogonal to the + * enable/disable state of irq wake. An irq can be disabled with + * disable_irq() and still wake the system as long as the irq has wake + * enabled. If this does not hold, then the underlying irq chip and the + * related driver need to be investigated. */ int irq_set_irq_wake(unsigned int irq, unsigned int on) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - int ret = 0; + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; + int ret = 0; - if (!desc) - return -EINVAL; - - /* Don't use NMIs as wake up interrupts please */ - if (irq_is_nmi(desc)) { - ret = -EINVAL; - goto out_unlock; - } + /* Don't use NMIs as wake up interrupts please */ + if (irq_is_nmi(desc)) + return -EINVAL; - /* wakeup-capable irqs can be shared between drivers that - * don't need to have the same sleep mode behaviors. - */ - if (on) { - if (desc->wake_depth++ == 0) { - ret = set_irq_wake_real(irq, on); - if (ret) - desc->wake_depth = 0; - else - irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); - } - } else { - if (desc->wake_depth == 0) { - WARN(1, "Unbalanced IRQ %d wake disable\n", irq); - } else if (--desc->wake_depth == 0) { - ret = set_irq_wake_real(irq, on); - if (ret) - desc->wake_depth = 1; - else - irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); + /* + * wakeup-capable irqs can be shared between drivers that + * don't need to have the same sleep mode behaviors. + */ + if (on) { + if (desc->wake_depth++ == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 0; + else + irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); + } + } else { + if (desc->wake_depth == 0) { + WARN(1, "Unbalanced IRQ %d wake disable\n", irq); + } else if (--desc->wake_depth == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 1; + else + irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); + } } + return ret; } - -out_unlock: - irq_put_desc_busunlock(desc, flags); - return ret; + return -EINVAL; } EXPORT_SYMBOL(irq_set_irq_wake); @@ -940,22 +888,17 @@ EXPORT_SYMBOL(irq_set_irq_wake); * particular irq has been exclusively allocated or is available * for driver use. */ -int can_request_irq(unsigned int irq, unsigned long irqflags) +bool can_request_irq(unsigned int irq, unsigned long irqflags) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - int canrequest = 0; + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return 0; - - if (irq_settings_can_request(desc)) { - if (!desc->action || - irqflags & desc->action->flags & IRQF_SHARED) - canrequest = 1; + if (irq_settings_can_request(desc)) { + if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) + return true; + } } - irq_put_desc_unlock(desc, flags); - return canrequest; + return false; } int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) @@ -1016,16 +959,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) #ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return -EINVAL; - - desc->parent_irq = parent_irq; - - irq_put_desc_unlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->parent_irq = parent_irq; + return 0; + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_parent); #endif @@ -1079,19 +1017,19 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *a return; } - raw_spin_lock_irq(&desc->lock); - /* - * This code is triggered unconditionally. Check the affinity - * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. - */ - if (cpumask_available(desc->irq_common_data.affinity)) { - const struct cpumask *m; + scoped_guard(raw_spinlock_irq, &desc->lock) { + /* + * This code is triggered unconditionally. Check the affinity + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. + */ + if (cpumask_available(desc->irq_common_data.affinity)) { + const struct cpumask *m; - m = irq_data_get_effective_affinity_mask(&desc->irq_data); - cpumask_copy(mask, m); - valid = true; + m = irq_data_get_effective_affinity_mask(&desc->irq_data); + cpumask_copy(mask, m); + valid = true; + } } - raw_spin_unlock_irq(&desc->lock); if (valid) set_cpus_allowed_ptr(current, mask); @@ -1259,9 +1197,8 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) if (WARN_ON_ONCE(!secondary)) return; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); __irq_wake_thread(desc, secondary); - raw_spin_unlock_irq(&desc->lock); } /* @@ -1334,21 +1271,19 @@ static int irq_thread(void *data) } /** - * irq_wake_thread - wake the irq thread for the action identified by dev_id - * @irq: Interrupt line - * @dev_id: Device identity for which the thread should be woken - * + * irq_wake_thread - wake the irq thread for the action identified by dev_id + * @irq: Interrupt line + * @dev_id: Device identity for which the thread should be woken */ void irq_wake_thread(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; - unsigned long flags; if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return; - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); for_each_action_of_desc(desc, action) { if (action->dev_id == dev_id) { if (action->thread) @@ -1356,7 +1291,6 @@ void irq_wake_thread(unsigned int irq, void *dev_id) break; } } - raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL_GPL(irq_wake_thread); @@ -1987,9 +1921,8 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) * There is no interrupt on the fly anymore. Deactivate it * completely. */ - raw_spin_lock_irqsave(&desc->lock, flags); - irq_domain_deactivate_irq(&desc->irq_data); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) + irq_domain_deactivate_irq(&desc->irq_data); irq_release_resources(desc); chip_bus_sync_unlock(desc); @@ -2005,20 +1938,19 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) } /** - * free_irq - free an interrupt allocated with request_irq - * @irq: Interrupt line to free - * @dev_id: Device identity to free + * free_irq - free an interrupt allocated with request_irq + * @irq: Interrupt line to free + * @dev_id: Device identity to free * - * Remove an interrupt handler. The handler is removed and if the - * interrupt line is no longer in use by any driver it is disabled. - * On a shared IRQ the caller must ensure the interrupt is disabled - * on the card it drives before calling this function. The function - * does not return until any executing interrupts for this IRQ - * have completed. + * Remove an interrupt handler. The handler is removed and if the interrupt + * line is no longer in use by any driver it is disabled. On a shared IRQ + * the caller must ensure the interrupt is disabled on the card it drives + * before calling this function. The function does not return until any + * executing interrupts for this IRQ have completed. * - * This function must not be called from interrupt context. + * This function must not be called from interrupt context. * - * Returns the devname argument passed to request_irq. + * Returns the devname argument passed to request_irq. */ const void *free_irq(unsigned int irq, void *dev_id) { @@ -2075,8 +2007,6 @@ static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) const void *free_nmi(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - const void *devname; if (!desc || WARN_ON(!irq_is_nmi(desc))) return NULL; @@ -2088,53 +2018,46 @@ const void *free_nmi(unsigned int irq, void *dev_id) if (WARN_ON(desc->depth == 0)) disable_nmi_nosync(irq); - raw_spin_lock_irqsave(&desc->lock, flags); - + guard(raw_spinlock_irqsave)(&desc->lock); irq_nmi_teardown(desc); - devname = __cleanup_nmi(irq, desc); - - raw_spin_unlock_irqrestore(&desc->lock, flags); - - return devname; + return __cleanup_nmi(irq, desc); } /** - * request_threaded_irq - allocate an interrupt line - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * Primary handler for threaded interrupts. - * If handler is NULL and thread_fn != NULL - * the default primary handler is installed. - * @thread_fn: Function called from the irq handler thread - * If NULL, no irq thread is created - * @irqflags: Interrupt type flags - * @devname: An ascii name for the claiming device - * @dev_id: A cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt line and IRQ handling. From the point this - * call is made your handler function may be invoked. Since - * your handler function must clear any interrupt the board - * raises, you must take care both to initialise your hardware - * and to set up the interrupt handler in the right order. - * - * If you want to set up a threaded irq handler for your device - * then you need to supply @handler and @thread_fn. @handler is - * still called in hard interrupt context and has to check - * whether the interrupt originates from the device. If yes it - * needs to disable the interrupt on the device and return - * IRQ_WAKE_THREAD which will wake up the handler thread and run - * @thread_fn. This split handler design is necessary to support - * shared interrupts. - * - * Dev_id must be globally unique. Normally the address of the - * device data structure is used as the cookie. Since the handler - * receives this value it makes sense to use it. - * - * If your interrupt is shared you must pass a non NULL dev_id - * as this is required when freeing the interrupt. - * - * Flags: + * request_threaded_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * Primary handler for threaded interrupts. + * If handler is NULL and thread_fn != NULL + * the default primary handler is installed. + * @thread_fn: Function called from the irq handler thread + * If NULL, no irq thread is created + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt line + * and IRQ handling. From the point this call is made your handler function + * may be invoked. Since your handler function must clear any interrupt the + * board raises, you must take care both to initialise your hardware and to + * set up the interrupt handler in the right order. + * + * If you want to set up a threaded irq handler for your device then you + * need to supply @handler and @thread_fn. @handler is still called in hard + * interrupt context and has to check whether the interrupt originates from + * the device. If yes it needs to disable the interrupt on the device and + * return IRQ_WAKE_THREAD which will wake up the handler thread and run + * @thread_fn. This split handler design is necessary to support shared + * interrupts. + * + * @dev_id must be globally unique. Normally the address of the device data + * structure is used as the cookie. Since the handler receives this value + * it makes sense to use it. + * + * If your interrupt is shared you must pass a non NULL dev_id as this is + * required when freeing the interrupt. + * + * Flags: * * IRQF_SHARED Interrupt is shared * IRQF_TRIGGER_* Specify active edge(s) or level @@ -2232,21 +2155,20 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, EXPORT_SYMBOL(request_threaded_irq); /** - * request_any_context_irq - allocate an interrupt line - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * Threaded handler for threaded interrupts. - * @flags: Interrupt type flags - * @name: An ascii name for the claiming device - * @dev_id: A cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt line and IRQ handling. It selects either a - * hardirq or threaded handling method depending on the - * context. - * - * On failure, it returns a negative value. On success, - * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. + * request_any_context_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * Threaded handler for threaded interrupts. + * @flags: Interrupt type flags + * @name: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt line + * and IRQ handling. It selects either a hardirq or threaded handling + * method depending on the context. + * + * Returns: On failure, it returns a negative value. On success, it returns either + * IRQC_IS_HARDIRQ or IRQC_IS_NESTED. */ int request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id) @@ -2273,37 +2195,35 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, EXPORT_SYMBOL_GPL(request_any_context_irq); /** - * request_nmi - allocate an interrupt line for NMI delivery - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * Threaded handler for threaded interrupts. - * @irqflags: Interrupt type flags - * @name: An ascii name for the claiming device - * @dev_id: A cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt line and IRQ handling. It sets up the IRQ line - * to be handled as an NMI. - * - * An interrupt line delivering NMIs cannot be shared and IRQ handling - * cannot be threaded. - * - * Interrupt lines requested for NMI delivering must produce per cpu - * interrupts and have auto enabling setting disabled. - * - * Dev_id must be globally unique. Normally the address of the - * device data structure is used as the cookie. Since the handler - * receives this value it makes sense to use it. - * - * If the interrupt line cannot be used to deliver NMIs, function - * will fail and return a negative value. + * request_nmi - allocate an interrupt line for NMI delivery + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * Threaded handler for threaded interrupts. + * @irqflags: Interrupt type flags + * @name: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt line + * and IRQ handling. It sets up the IRQ line to be handled as an NMI. + * + * An interrupt line delivering NMIs cannot be shared and IRQ handling + * cannot be threaded. + * + * Interrupt lines requested for NMI delivering must produce per cpu + * interrupts and have auto enabling setting disabled. + * + * @dev_id must be globally unique. Normally the address of the device data + * structure is used as the cookie. Since the handler receives this value + * it makes sense to use it. + * + * If the interrupt line cannot be used to deliver NMIs, function will fail + * and return a negative value. */ int request_nmi(unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *name, void *dev_id) { struct irqaction *action; struct irq_desc *desc; - unsigned long flags; int retval; if (irq == IRQ_NOTCONNECTED) @@ -2345,21 +2265,17 @@ int request_nmi(unsigned int irq, irq_handler_t handler, if (retval) goto err_irq_setup; - raw_spin_lock_irqsave(&desc->lock, flags); - - /* Setup NMI state */ - desc->istate |= IRQS_NMI; - retval = irq_nmi_setup(desc); - if (retval) { - __cleanup_nmi(irq, desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); - return -EINVAL; + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + /* Setup NMI state */ + desc->istate |= IRQS_NMI; + retval = irq_nmi_setup(desc); + if (retval) { + __cleanup_nmi(irq, desc); + return -EINVAL; + } + return 0; } - raw_spin_unlock_irqrestore(&desc->lock, flags); - - return 0; - err_irq_setup: irq_chip_pm_put(&desc->irq_data); err_out: @@ -2370,35 +2286,25 @@ err_out: void enable_percpu_irq(unsigned int irq, unsigned int type) { - unsigned int cpu = smp_processor_id(); - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return; - - /* - * If the trigger type is not specified by the caller, then - * use the default for this interrupt. - */ - type &= IRQ_TYPE_SENSE_MASK; - if (type == IRQ_TYPE_NONE) - type = irqd_get_trigger_type(&desc->irq_data); - - if (type != IRQ_TYPE_NONE) { - int ret; - - ret = __irq_set_trigger(desc, type); - - if (ret) { - WARN(1, "failed to set type for IRQ%d\n", irq); - goto out; + /* + * If the trigger type is not specified by the caller, then + * use the default for this interrupt. + */ + type &= IRQ_TYPE_SENSE_MASK; + if (type == IRQ_TYPE_NONE) + type = irqd_get_trigger_type(&desc->irq_data); + + if (type != IRQ_TYPE_NONE) { + if (__irq_set_trigger(desc, type)) { + WARN(1, "failed to set type for IRQ%d\n", irq); + return; + } } + irq_percpu_enable(desc, smp_processor_id()); } - - irq_percpu_enable(desc, cpu); -out: - irq_put_desc_unlock(desc, flags); } EXPORT_SYMBOL_GPL(enable_percpu_irq); @@ -2416,33 +2322,16 @@ void enable_percpu_nmi(unsigned int irq, unsigned int type) */ bool irq_percpu_is_enabled(unsigned int irq) { - unsigned int cpu = smp_processor_id(); - struct irq_desc *desc; - unsigned long flags; - bool is_enabled; - - desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); - if (!desc) - return false; - - is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); - irq_put_desc_unlock(desc, flags); - - return is_enabled; + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) + return cpumask_test_cpu(smp_processor_id(), scoped_irqdesc->percpu_enabled); + return false; } EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); void disable_percpu_irq(unsigned int irq) { - unsigned int cpu = smp_processor_id(); - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); - - if (!desc) - return; - - irq_percpu_disable(desc, cpu); - irq_put_desc_unlock(desc, flags); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) + irq_percpu_disable(scoped_irqdesc, smp_processor_id()); } EXPORT_SYMBOL_GPL(disable_percpu_irq); @@ -2458,71 +2347,47 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_ { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; - unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); if (!desc) return NULL; - raw_spin_lock_irqsave(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + action = desc->action; + if (!action || action->percpu_dev_id != dev_id) { + WARN(1, "Trying to free already-free IRQ %d\n", irq); + return NULL; + } - action = desc->action; - if (!action || action->percpu_dev_id != dev_id) { - WARN(1, "Trying to free already-free IRQ %d\n", irq); - goto bad; - } + if (!cpumask_empty(desc->percpu_enabled)) { + WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", + irq, cpumask_first(desc->percpu_enabled)); + return NULL; + } - if (!cpumask_empty(desc->percpu_enabled)) { - WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", - irq, cpumask_first(desc->percpu_enabled)); - goto bad; + /* Found it - now remove it from the list of entries: */ + desc->action = NULL; + desc->istate &= ~IRQS_NMI; } - /* Found it - now remove it from the list of entries: */ - desc->action = NULL; - - desc->istate &= ~IRQS_NMI; - - raw_spin_unlock_irqrestore(&desc->lock, flags); - unregister_handler_proc(irq, action); - irq_chip_pm_put(&desc->irq_data); module_put(desc->owner); return action; - -bad: - raw_spin_unlock_irqrestore(&desc->lock, flags); - return NULL; } /** - * remove_percpu_irq - free a per-cpu interrupt - * @irq: Interrupt line to free - * @act: irqaction for the interrupt + * free_percpu_irq - free an interrupt allocated with request_percpu_irq + * @irq: Interrupt line to free + * @dev_id: Device identity to free * - * Used to remove interrupts statically setup by the early boot process. - */ -void remove_percpu_irq(unsigned int irq, struct irqaction *act) -{ - struct irq_desc *desc = irq_to_desc(irq); - - if (desc && irq_settings_is_per_cpu_devid(desc)) - __free_percpu_irq(irq, act->percpu_dev_id); -} - -/** - * free_percpu_irq - free an interrupt allocated with request_percpu_irq - * @irq: Interrupt line to free - * @dev_id: Device identity to free + * Remove a percpu interrupt handler. The handler is removed, but the + * interrupt line is not disabled. This must be done on each CPU before + * calling this function. The function does not return until any executing + * interrupts for this IRQ have completed. * - * Remove a percpu interrupt handler. The handler is removed, but - * the interrupt line is not disabled. This must be done on each - * CPU before calling this function. The function does not return - * until any executing interrupts for this IRQ have completed. - * - * This function must not be called from interrupt context. + * This function must not be called from interrupt context. */ void free_percpu_irq(unsigned int irq, void __percpu *dev_id) { @@ -2551,9 +2416,9 @@ void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) } /** - * setup_percpu_irq - setup a per-cpu interrupt - * @irq: Interrupt line to setup - * @act: irqaction for the interrupt + * setup_percpu_irq - setup a per-cpu interrupt + * @irq: Interrupt line to setup + * @act: irqaction for the interrupt * * Used to statically setup per-cpu interrupts in the early boot process. */ @@ -2578,21 +2443,20 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act) } /** - * __request_percpu_irq - allocate a percpu interrupt line - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * @flags: Interrupt type flags (IRQF_TIMER only) - * @devname: An ascii name for the claiming device - * @dev_id: A percpu cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt on the local CPU. If the interrupt is supposed to be - * enabled on other CPUs, it has to be done on each CPU using - * enable_percpu_irq(). - * - * Dev_id must be globally unique. It is a per-cpu variable, and - * the handler gets called with the interrupted CPU's instance of - * that variable. + * __request_percpu_irq - allocate a percpu interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * @flags: Interrupt type flags (IRQF_TIMER only) + * @devname: An ascii name for the claiming device + * @dev_id: A percpu cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt on the + * local CPU. If the interrupt is supposed to be enabled on other CPUs, it + * has to be done on each CPU using enable_percpu_irq(). + * + * @dev_id must be globally unique. It is a per-cpu variable, and + * the handler gets called with the interrupted CPU's instance of + * that variable. */ int __request_percpu_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *devname, @@ -2640,32 +2504,31 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler, EXPORT_SYMBOL_GPL(__request_percpu_irq); /** - * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * @name: An ascii name for the claiming device - * @dev_id: A percpu cookie passed back to the handler function + * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * @name: An ascii name for the claiming device + * @dev_id: A percpu cookie passed back to the handler function * - * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs - * have to be setup on each CPU by calling prepare_percpu_nmi() before - * being enabled on the same CPU by using enable_percpu_nmi(). + * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs + * have to be setup on each CPU by calling prepare_percpu_nmi() before + * being enabled on the same CPU by using enable_percpu_nmi(). * - * Dev_id must be globally unique. It is a per-cpu variable, and - * the handler gets called with the interrupted CPU's instance of - * that variable. + * @dev_id must be globally unique. It is a per-cpu variable, and the + * handler gets called with the interrupted CPU's instance of that + * variable. * - * Interrupt lines requested for NMI delivering should have auto enabling - * setting disabled. + * Interrupt lines requested for NMI delivering should have auto enabling + * setting disabled. * - * If the interrupt line cannot be used to deliver NMIs, function - * will fail returning a negative value. + * If the interrupt line cannot be used to deliver NMIs, function + * will fail returning a negative value. */ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; - unsigned long flags; int retval; if (!handler) @@ -2701,10 +2564,8 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, if (retval) goto err_irq_setup; - raw_spin_lock_irqsave(&desc->lock, flags); - desc->istate |= IRQS_NMI; - raw_spin_unlock_irqrestore(&desc->lock, flags); - + scoped_guard(raw_spinlock_irqsave, &desc->lock) + desc->istate |= IRQS_NMI; return 0; err_irq_setup: @@ -2716,79 +2577,55 @@ err_out: } /** - * prepare_percpu_nmi - performs CPU local setup for NMI delivery - * @irq: Interrupt line to prepare for NMI delivery + * prepare_percpu_nmi - performs CPU local setup for NMI delivery + * @irq: Interrupt line to prepare for NMI delivery * - * This call prepares an interrupt line to deliver NMI on the current CPU, - * before that interrupt line gets enabled with enable_percpu_nmi(). + * This call prepares an interrupt line to deliver NMI on the current CPU, + * before that interrupt line gets enabled with enable_percpu_nmi(). * - * As a CPU local operation, this should be called from non-preemptible - * context. + * As a CPU local operation, this should be called from non-preemptible + * context. * - * If the interrupt line cannot be used to deliver NMIs, function - * will fail returning a negative value. + * If the interrupt line cannot be used to deliver NMIs, function will fail + * returning a negative value. */ int prepare_percpu_nmi(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc; - int ret = 0; + int ret = -EINVAL; WARN_ON(preemptible()); - desc = irq_get_desc_lock(irq, &flags, - IRQ_GET_DESC_CHECK_PERCPU); - if (!desc) - return -EINVAL; - - if (WARN(!irq_is_nmi(desc), - KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", - irq)) { - ret = -EINVAL; - goto out; - } + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { + if (WARN(!irq_is_nmi(scoped_irqdesc), + "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", irq)) + return -EINVAL; - ret = irq_nmi_setup(desc); - if (ret) { - pr_err("Failed to setup NMI delivery: irq %u\n", irq); - goto out; + ret = irq_nmi_setup(scoped_irqdesc); + if (ret) + pr_err("Failed to setup NMI delivery: irq %u\n", irq); } - -out: - irq_put_desc_unlock(desc, flags); return ret; } /** - * teardown_percpu_nmi - undoes NMI setup of IRQ line - * @irq: Interrupt line from which CPU local NMI configuration should be - * removed - * - * This call undoes the setup done by prepare_percpu_nmi(). + * teardown_percpu_nmi - undoes NMI setup of IRQ line + * @irq: Interrupt line from which CPU local NMI configuration should be removed * - * IRQ line should not be enabled for the current CPU. + * This call undoes the setup done by prepare_percpu_nmi(). * - * As a CPU local operation, this should be called from non-preemptible - * context. + * IRQ line should not be enabled for the current CPU. + * As a CPU local operation, this should be called from non-preemptible + * context. */ void teardown_percpu_nmi(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc; - WARN_ON(preemptible()); - desc = irq_get_desc_lock(irq, &flags, - IRQ_GET_DESC_CHECK_PERCPU); - if (!desc) - return; - - if (WARN_ON(!irq_is_nmi(desc))) - goto out; - - irq_nmi_teardown(desc); -out: - irq_put_desc_unlock(desc, flags); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { + if (WARN_ON(!irq_is_nmi(scoped_irqdesc))) + return; + irq_nmi_teardown(scoped_irqdesc); + } } static int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) @@ -2815,87 +2652,62 @@ static int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state } /** - * irq_get_irqchip_state - returns the irqchip state of a interrupt. - * @irq: Interrupt line that is forwarded to a VM - * @which: One of IRQCHIP_STATE_* the caller wants to know about - * @state: a pointer to a boolean where the state is to be stored + * irq_get_irqchip_state - returns the irqchip state of a interrupt. + * @irq: Interrupt line that is forwarded to a VM + * @which: One of IRQCHIP_STATE_* the caller wants to know about + * @state: a pointer to a boolean where the state is to be stored * - * This call snapshots the internal irqchip state of an - * interrupt, returning into @state the bit corresponding to - * stage @which + * This call snapshots the internal irqchip state of an interrupt, + * returning into @state the bit corresponding to stage @which * - * This function should be called with preemption disabled if the - * interrupt controller has per-cpu registers. + * This function should be called with preemption disabled if the interrupt + * controller has per-cpu registers. */ -int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, - bool *state) +int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state) { - struct irq_desc *desc; - struct irq_data *data; - unsigned long flags; - int err = -EINVAL; - - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return err; + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); - data = irq_desc_get_irq_data(desc); - - err = __irq_get_irqchip_state(data, which, state); - - irq_put_desc_busunlock(desc, flags); - return err; + return __irq_get_irqchip_state(data, which, state); + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_get_irqchip_state); /** - * irq_set_irqchip_state - set the state of a forwarded interrupt. - * @irq: Interrupt line that is forwarded to a VM - * @which: State to be restored (one of IRQCHIP_STATE_*) - * @val: Value corresponding to @which + * irq_set_irqchip_state - set the state of a forwarded interrupt. + * @irq: Interrupt line that is forwarded to a VM + * @which: State to be restored (one of IRQCHIP_STATE_*) + * @val: Value corresponding to @which * - * This call sets the internal irqchip state of an interrupt, - * depending on the value of @which. + * This call sets the internal irqchip state of an interrupt, depending on + * the value of @which. * - * This function should be called with migration disabled if the - * interrupt controller has per-cpu registers. + * This function should be called with migration disabled if the interrupt + * controller has per-cpu registers. */ -int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, - bool val) +int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool val) { - struct irq_desc *desc; - struct irq_data *data; - struct irq_chip *chip; - unsigned long flags; - int err = -EINVAL; + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); + struct irq_chip *chip; - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return err; + do { + chip = irq_data_get_irq_chip(data); - data = irq_desc_get_irq_data(desc); + if (WARN_ON_ONCE(!chip)) + return -ENODEV; - do { - chip = irq_data_get_irq_chip(data); - if (WARN_ON_ONCE(!chip)) { - err = -ENODEV; - goto out_unlock; - } - if (chip->irq_set_irqchip_state) - break; -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - data = data->parent_data; -#else - data = NULL; -#endif - } while (data); + if (chip->irq_set_irqchip_state) + break; - if (data) - err = chip->irq_set_irqchip_state(data, which, val); + data = irqd_get_parent_data(data); + } while (data); -out_unlock: - irq_put_desc_busunlock(desc, flags); - return err; + if (data) + return chip->irq_set_irqchip_state(data, which, val); + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_irqchip_state); diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index c05ba7ca00fa..9febe797a5f6 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -59,7 +59,8 @@ struct msi_ctrl { static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl); static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid); static inline int msi_sysfs_create_group(struct device *dev); - +static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg); /** * msi_alloc_desc - Allocate an initialized msi_desc @@ -343,26 +344,30 @@ int msi_setup_device_data(struct device *dev) } /** - * msi_lock_descs - Lock the MSI descriptor storage of a device + * __msi_lock_descs - Lock the MSI descriptor storage of a device * @dev: Device to operate on + * + * Internal function for guard(msi_descs_lock). Don't use in code. */ -void msi_lock_descs(struct device *dev) +void __msi_lock_descs(struct device *dev) { mutex_lock(&dev->msi.data->mutex); } -EXPORT_SYMBOL_GPL(msi_lock_descs); +EXPORT_SYMBOL_GPL(__msi_lock_descs); /** - * msi_unlock_descs - Unlock the MSI descriptor storage of a device + * __msi_unlock_descs - Unlock the MSI descriptor storage of a device * @dev: Device to operate on + * + * Internal function for guard(msi_descs_lock). Don't use in code. */ -void msi_unlock_descs(struct device *dev) +void __msi_unlock_descs(struct device *dev) { /* Invalidate the index which was cached by the iterator */ dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX; mutex_unlock(&dev->msi.data->mutex); } -EXPORT_SYMBOL_GPL(msi_unlock_descs); +EXPORT_SYMBOL_GPL(__msi_unlock_descs); static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid, enum msi_desc_filter filter) @@ -448,7 +453,6 @@ EXPORT_SYMBOL_GPL(msi_next_desc); unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index) { struct msi_desc *desc; - unsigned int ret = 0; bool pcimsi = false; struct xarray *xa; @@ -462,7 +466,7 @@ unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigne if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN) pcimsi = to_pci_dev(dev)->msi_enabled; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); xa = &dev->msi.data->__domains[domid].store; desc = xa_load(xa, pcimsi ? 0 : index); if (desc && desc->irq) { @@ -471,16 +475,12 @@ unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigne * PCI-MSIX and platform MSI use a descriptor per * interrupt. */ - if (pcimsi) { - if (index < desc->nvec_used) - ret = desc->irq + index; - } else { - ret = desc->irq; - } + if (!pcimsi) + return desc->irq; + if (index < desc->nvec_used) + return desc->irq + index; } - - msi_unlock_descs(dev); - return ret; + return 0; } EXPORT_SYMBOL_GPL(msi_domain_get_virq); @@ -796,6 +796,10 @@ static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, return 0; } +static void msi_domain_ops_teardown(struct irq_domain *domain, msi_alloc_info_t *arg) +{ +} + static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { @@ -821,6 +825,7 @@ static struct msi_domain_ops msi_domain_ops_default = { .get_hwirq = msi_domain_ops_get_hwirq, .msi_init = msi_domain_ops_init, .msi_prepare = msi_domain_ops_prepare, + .msi_teardown = msi_domain_ops_teardown, .set_desc = msi_domain_ops_set_desc, }; @@ -842,6 +847,8 @@ static void msi_domain_update_dom_ops(struct msi_domain_info *info) ops->msi_init = msi_domain_ops_default.msi_init; if (ops->msi_prepare == NULL) ops->msi_prepare = msi_domain_ops_default.msi_prepare; + if (ops->msi_teardown == NULL) + ops->msi_teardown = msi_domain_ops_default.msi_teardown; if (ops->set_desc == NULL) ops->set_desc = msi_domain_ops_default.set_desc; } @@ -905,6 +912,32 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, } /** + * msi_create_parent_irq_domain - Create an MSI-parent interrupt domain + * @info: MSI irqdomain creation info + * @msi_parent_ops: MSI parent callbacks and configuration + * + * Return: pointer to the created &struct irq_domain or %NULL on failure + */ +struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info, + const struct msi_parent_ops *msi_parent_ops) +{ + struct irq_domain *d; + + info->hwirq_max = max(info->hwirq_max, info->size); + info->size = info->hwirq_max; + info->domain_flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; + info->bus_token = msi_parent_ops->bus_select_token; + + d = irq_domain_instantiate(info); + if (IS_ERR(d)) + return NULL; + + d->msi_parent_ops = msi_parent_ops; + return d; +} +EXPORT_SYMBOL_GPL(msi_create_parent_irq_domain); + +/** * msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down * in the domain hierarchy * @dev: The device for which the domain should be created @@ -998,9 +1031,8 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, void *chip_data) { struct irq_domain *domain, *parent = dev->msi.domain; - struct fwnode_handle *fwnode, *fwnalloced = NULL; - struct msi_domain_template *bundle; const struct msi_parent_ops *pops; + struct fwnode_handle *fwnode; if (!irq_domain_is_msi_parent(parent)) return false; @@ -1008,7 +1040,8 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, if (domid >= MSI_MAX_DEVICE_IRQDOMAINS) return false; - bundle = kmemdup(template, sizeof(*bundle), GFP_KERNEL); + struct msi_domain_template *bundle __free(kfree) = + kmemdup(template, sizeof(*bundle), GFP_KERNEL); if (!bundle) return false; @@ -1017,6 +1050,7 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, bundle->info.ops = &bundle->ops; bundle->info.data = domain_data; bundle->info.chip_data = chip_data; + bundle->info.alloc_data = &bundle->alloc_info; pops = parent->msi_parent_ops; snprintf(bundle->name, sizeof(bundle->name), "%s%s-%s", @@ -1031,41 +1065,43 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, * node as they are not guaranteed to have a fwnode. They are never * looked up and always handled in the context of the device. */ - if (bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE) - fwnode = dev->fwnode; + struct fwnode_handle *fwnode_alloced __free(irq_domain_free_fwnode) = NULL; + + if (!(bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE)) + fwnode = fwnode_alloced = irq_domain_alloc_named_fwnode(bundle->name); else - fwnode = fwnalloced = irq_domain_alloc_named_fwnode(bundle->name); + fwnode = dev->fwnode; if (!fwnode) - goto free_bundle; + return false; if (msi_setup_device_data(dev)) - goto free_fwnode; - - msi_lock_descs(dev); + return false; + guard(msi_descs_lock)(dev); if (WARN_ON_ONCE(msi_get_device_domain(dev, domid))) - goto fail; + return false; if (!pops->init_dev_msi_info(dev, parent, parent, &bundle->info)) - goto fail; + return false; domain = __msi_create_irq_domain(fwnode, &bundle->info, IRQ_DOMAIN_FLAG_MSI_DEVICE, parent); if (!domain) - goto fail; + return false; domain->dev = dev; dev->msi.data->__domains[domid].domain = domain; - msi_unlock_descs(dev); - return true; -fail: - msi_unlock_descs(dev); -free_fwnode: - irq_domain_free_fwnode(fwnalloced); -free_bundle: - kfree(bundle); - return false; + if (msi_domain_prepare_irqs(domain, dev, hwsize, &bundle->alloc_info)) { + dev->msi.data->__domains[domid].domain = NULL; + irq_domain_remove(domain); + return false; + } + + /* @bundle and @fwnode_alloced are now in use. Prevent cleanup */ + retain_and_null_ptr(bundle); + retain_and_null_ptr(fwnode_alloced); + return true; } /** @@ -1079,23 +1115,21 @@ void msi_remove_device_irq_domain(struct device *dev, unsigned int domid) struct msi_domain_info *info; struct irq_domain *domain; - msi_lock_descs(dev); - + guard(msi_descs_lock)(dev); domain = msi_get_device_domain(dev, domid); - if (!domain || !irq_domain_is_msi_device(domain)) - goto unlock; + return; dev->msi.data->__domains[domid].domain = NULL; info = domain->host_data; + + info->ops->msi_teardown(domain, info->alloc_data); + if (irq_domain_is_msi_device(domain)) fwnode = domain->fwnode; irq_domain_remove(domain); irq_domain_free_fwnode(fwnode); kfree(container_of(info, struct msi_domain_template, info)); - -unlock: - msi_unlock_descs(dev); } /** @@ -1111,16 +1145,14 @@ bool msi_match_device_irq_domain(struct device *dev, unsigned int domid, { struct msi_domain_info *info; struct irq_domain *domain; - bool ret = false; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); domain = msi_get_device_domain(dev, domid); if (domain && irq_domain_is_msi_device(domain)) { info = domain->host_data; - ret = info->bus_token == bus_token; + return info->bus_token == bus_token; } - msi_unlock_descs(dev); - return ret; + return false; } static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, @@ -1238,6 +1270,24 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag return 0; } +static int populate_alloc_info(struct irq_domain *domain, struct device *dev, + unsigned int nirqs, msi_alloc_info_t *arg) +{ + struct msi_domain_info *info = domain->host_data; + + /* + * If the caller has provided a template alloc info, use that. Once + * all users of msi_create_irq_domain() have been eliminated, this + * should be the only source of allocation information, and the + * prepare call below should be finally removed. + */ + if (!info->alloc_data) + return msi_domain_prepare_irqs(domain, dev, nirqs, arg); + + *arg = *info->alloc_data; + return 0; +} + static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain, struct msi_ctrl *ctrl) { @@ -1250,7 +1300,7 @@ static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain unsigned long idx; int i, ret, virq; - ret = msi_domain_prepare_irqs(domain, dev, ctrl->nirqs, &arg); + ret = populate_alloc_info(domain, dev, ctrl->nirqs, &arg); if (ret) return ret; @@ -1391,12 +1441,9 @@ int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid, int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid, unsigned int first, unsigned int last) { - int ret; - msi_lock_descs(dev); - ret = msi_domain_alloc_irqs_range_locked(dev, domid, first, last); - msi_unlock_descs(dev); - return ret; + guard(msi_descs_lock)(dev); + return msi_domain_alloc_irqs_range_locked(dev, domid, first, last); } EXPORT_SYMBOL_GPL(msi_domain_alloc_irqs_range); @@ -1500,12 +1547,8 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u const struct irq_affinity_desc *affdesc, union msi_instance_cookie *icookie) { - struct msi_map map; - - msi_lock_descs(dev); - map = __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie); - msi_unlock_descs(dev); - return map; + guard(msi_descs_lock)(dev); + return __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie); } /** @@ -1542,13 +1585,11 @@ int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, icookie.value = ((u64)type << 32) | hwirq; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); if (WARN_ON_ONCE(msi_get_device_domain(dev, domid) != domain)) map.index = -EINVAL; else map = __msi_domain_alloc_irq_at(dev, domid, MSI_ANY_INDEX, NULL, &icookie); - msi_unlock_descs(dev); - return map.index >= 0 ? map.virq : map.index; } @@ -1641,9 +1682,8 @@ void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid, void msi_domain_free_irqs_range(struct device *dev, unsigned int domid, unsigned int first, unsigned int last) { - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); msi_domain_free_irqs_range_locked(dev, domid, first, last); - msi_unlock_descs(dev); } EXPORT_SYMBOL_GPL(msi_domain_free_irqs_all); @@ -1673,9 +1713,8 @@ void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid) */ void msi_domain_free_irqs_all(struct device *dev, unsigned int domid) { - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); msi_domain_free_irqs_all_locked(dev, domid); - msi_unlock_descs(dev); } /** @@ -1694,12 +1733,11 @@ void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq) if (WARN_ON_ONCE(!dev || !desc || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) return; - msi_lock_descs(dev); - if (!WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) { - msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, - desc->msi_index); - } - msi_unlock_descs(dev); + guard(msi_descs_lock)(dev); + if (WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) + return; + msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, + desc->msi_index); } /** diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index c556bc49d213..445912d51033 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -46,8 +46,7 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) desc->cond_suspend_depth++; WARN_ON_ONCE(desc->no_suspend_depth && - (desc->no_suspend_depth + - desc->cond_suspend_depth) != desc->nr_actions); + (desc->no_suspend_depth + desc->cond_suspend_depth) != desc->nr_actions); } /* @@ -134,14 +133,12 @@ void suspend_device_irqs(void) int irq; for_each_irq_desc(irq, desc) { - unsigned long flags; bool sync; if (irq_settings_is_nested_thread(desc)) continue; - raw_spin_lock_irqsave(&desc->lock, flags); - sync = suspend_device_irq(desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) + sync = suspend_device_irq(desc); if (sync) synchronize_irq(irq); @@ -186,18 +183,15 @@ static void resume_irqs(bool want_early) int irq; for_each_irq_desc(irq, desc) { - unsigned long flags; - bool is_early = desc->action && - desc->action->flags & IRQF_EARLY_RESUME; + bool is_early = desc->action && desc->action->flags & IRQF_EARLY_RESUME; if (!is_early && want_early) continue; if (irq_settings_is_nested_thread(desc)) continue; - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); resume_irq(desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -207,22 +201,16 @@ static void resume_irqs(bool want_early) */ void rearm_wake_irq(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return; - - if (!(desc->istate & IRQS_SUSPENDED) || - !irqd_is_wakeup_set(&desc->irq_data)) - goto unlock; - - desc->istate &= ~IRQS_SUSPENDED; - irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); - __enable_irq(desc); + if (!(desc->istate & IRQS_SUSPENDED) || !irqd_is_wakeup_set(&desc->irq_data)) + return; -unlock: - irq_put_desc_busunlock(desc, flags); + desc->istate &= ~IRQS_SUSPENDED; + irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); + __enable_irq(desc); + } } /** diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 8e29809de38d..29c2404e743b 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -81,20 +81,18 @@ static int show_irq_affinity(int type, struct seq_file *m) static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); - unsigned long flags; cpumask_var_t mask; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; - raw_spin_lock_irqsave(&desc->lock, flags); - if (desc->affinity_hint) - cpumask_copy(mask, desc->affinity_hint); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irq, &desc->lock) { + if (desc->affinity_hint) + cpumask_copy(mask, desc->affinity_hint); + } seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); free_cpumask_var(mask); - return 0; } @@ -295,32 +293,26 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v) #define MAX_NAMELEN 128 -static int name_unique(unsigned int irq, struct irqaction *new_action) +static bool name_unique(unsigned int irq, struct irqaction *new_action) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; - unsigned long flags; - int ret = 1; - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irq)(&desc->lock); for_each_action_of_desc(desc, action) { if ((action != new_action) && action->name && - !strcmp(new_action->name, action->name)) { - ret = 0; - break; - } + !strcmp(new_action->name, action->name)) + return false; } - raw_spin_unlock_irqrestore(&desc->lock, flags); - return ret; + return true; } void register_handler_proc(unsigned int irq, struct irqaction *action) { - char name [MAX_NAMELEN]; + char name[MAX_NAMELEN]; struct irq_desc *desc = irq_to_desc(irq); - if (!desc->dir || action->dir || !action->name || - !name_unique(irq, action)) + if (!desc->dir || action->dir || !action->name || !name_unique(irq, action)) return; snprintf(name, MAX_NAMELEN, "%s", action->name); @@ -347,17 +339,16 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) * added, not when the descriptor is created, so multiple * tasks might try to register at the same time. */ - mutex_lock(®ister_lock); + guard(mutex)(®ister_lock); if (desc->dir) - goto out_unlock; - - sprintf(name, "%d", irq); + return; /* create /proc/irq/1234 */ + sprintf(name, "%u", irq); desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) - goto out_unlock; + return; #ifdef CONFIG_SMP umode_t umode = S_IRUGO; @@ -366,31 +357,27 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) umode |= S_IWUSR; /* create /proc/irq/<irq>/smp_affinity */ - proc_create_data("smp_affinity", umode, desc->dir, - &irq_affinity_proc_ops, irqp); + proc_create_data("smp_affinity", umode, desc->dir, &irq_affinity_proc_ops, irqp); /* create /proc/irq/<irq>/affinity_hint */ proc_create_single_data("affinity_hint", 0444, desc->dir, - irq_affinity_hint_proc_show, irqp); + irq_affinity_hint_proc_show, irqp); /* create /proc/irq/<irq>/smp_affinity_list */ proc_create_data("smp_affinity_list", umode, desc->dir, &irq_affinity_list_proc_ops, irqp); - proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, - irqp); + proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, irqp); # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK proc_create_single_data("effective_affinity", 0444, desc->dir, - irq_effective_aff_proc_show, irqp); + irq_effective_aff_proc_show, irqp); proc_create_single_data("effective_affinity_list", 0444, desc->dir, - irq_effective_aff_list_proc_show, irqp); + irq_effective_aff_list_proc_show, irqp); # endif #endif proc_create_single_data("spurious", 0444, desc->dir, - irq_spurious_proc_show, (void *)(long)irq); + irq_spurious_proc_show, (void *)(long)irq); -out_unlock: - mutex_unlock(®ister_lock); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) @@ -468,7 +455,6 @@ int show_interrupts(struct seq_file *p, void *v) int i = *(loff_t *) v, j; struct irqaction *action; struct irq_desc *desc; - unsigned long flags; if (i > ACTUAL_NR_IRQS) return 0; @@ -487,13 +473,13 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - rcu_read_lock(); + guard(rcu)(); desc = irq_to_desc(i); if (!desc || irq_settings_is_hidden(desc)) - goto outsparse; + return 0; if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs) - goto outsparse; + return 0; seq_printf(p, "%*d:", prec, i); for_each_online_cpu(j) { @@ -503,7 +489,7 @@ int show_interrupts(struct seq_file *p, void *v) } seq_putc(p, ' '); - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irq)(&desc->lock); if (desc->irq_data.chip) { if (desc->irq_data.chip->irq_print_chip) desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); @@ -532,9 +518,6 @@ int show_interrupts(struct seq_file *p, void *v) } seq_putc(p, '\n'); - raw_spin_unlock_irqrestore(&desc->lock, flags); -outsparse: - rcu_read_unlock(); return 0; } #endif diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 1b7fa72968bd..ca9cc1b806a9 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -30,18 +30,17 @@ static DEFINE_RAW_SPINLOCK(irq_resend_lock); */ static void resend_irqs(struct tasklet_struct *unused) { - struct irq_desc *desc; - - raw_spin_lock_irq(&irq_resend_lock); + guard(raw_spinlock_irq)(&irq_resend_lock); while (!hlist_empty(&irq_resend_list)) { - desc = hlist_entry(irq_resend_list.first, struct irq_desc, - resend_node); + struct irq_desc *desc; + + desc = hlist_entry(irq_resend_list.first, struct irq_desc, resend_node); hlist_del_init(&desc->resend_node); + raw_spin_unlock(&irq_resend_lock); desc->handle_irq(desc); raw_spin_lock(&irq_resend_lock); } - raw_spin_unlock_irq(&irq_resend_lock); } /* Tasklet to handle resend: */ @@ -75,19 +74,18 @@ static int irq_sw_resend(struct irq_desc *desc) } /* Add to resend_list and activate the softirq: */ - raw_spin_lock(&irq_resend_lock); - if (hlist_unhashed(&desc->resend_node)) - hlist_add_head(&desc->resend_node, &irq_resend_list); - raw_spin_unlock(&irq_resend_lock); + scoped_guard(raw_spinlock, &irq_resend_lock) { + if (hlist_unhashed(&desc->resend_node)) + hlist_add_head(&desc->resend_node, &irq_resend_list); + } tasklet_schedule(&resend_tasklet); return 0; } void clear_irq_resend(struct irq_desc *desc) { - raw_spin_lock(&irq_resend_lock); + guard(raw_spinlock)(&irq_resend_lock); hlist_del_init(&desc->resend_node); - raw_spin_unlock(&irq_resend_lock); } void irq_resend_init(struct irq_desc *desc) @@ -172,30 +170,24 @@ int check_irq_resend(struct irq_desc *desc, bool inject) */ int irq_inject_interrupt(unsigned int irq) { - struct irq_desc *desc; - unsigned long flags; - int err; + int err = -EINVAL; /* Try the state injection hardware interface first */ if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true)) return 0; /* That failed, try via the resend mechanism */ - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return -EINVAL; + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; - /* - * Only try to inject when the interrupt is: - * - not NMI type - * - activated - */ - if (irq_is_nmi(desc) || !irqd_is_activated(&desc->irq_data)) - err = -EINVAL; - else - err = check_irq_resend(desc, true); - - irq_put_desc_busunlock(desc, flags); + /* + * Only try to inject when the interrupt is: + * - not NMI type + * - activated + */ + if (!irq_is_nmi(desc) && irqd_is_activated(&desc->irq_data)) + err = check_irq_resend(desc, true); + } return err; } EXPORT_SYMBOL_GPL(irq_inject_interrupt); diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 02b2daf07441..8f26982e7300 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -34,8 +34,9 @@ static atomic_t irq_poll_active; * true and let the handler run. */ bool irq_wait_for_poll(struct irq_desc *desc) - __must_hold(&desc->lock) { + lockdep_assert_held(&desc->lock); + if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), "irq poll in progress on cpu %d for irq %d\n", smp_processor_id(), desc->irq_data.irq)) @@ -59,37 +60,35 @@ bool irq_wait_for_poll(struct irq_desc *desc) /* * Recovery handler for misrouted interrupts. */ -static int try_one_irq(struct irq_desc *desc, bool force) +static bool try_one_irq(struct irq_desc *desc, bool force) { - irqreturn_t ret = IRQ_NONE; struct irqaction *action; + bool ret = false; - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); /* * PER_CPU, nested thread interrupts and interrupts explicitly * marked polled are excluded from polling. */ - if (irq_settings_is_per_cpu(desc) || - irq_settings_is_nested_thread(desc) || + if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc) || irq_settings_is_polled(desc)) - goto out; + return false; /* * Do not poll disabled interrupts unless the spurious * disabled poller asks explicitly. */ if (irqd_irq_disabled(&desc->irq_data) && !force) - goto out; + return false; /* * All handlers must agree on IRQF_SHARED, so we test just the * first. */ action = desc->action; - if (!action || !(action->flags & IRQF_SHARED) || - (action->flags & __IRQF_TIMER)) - goto out; + if (!action || !(action->flags & IRQF_SHARED) || (action->flags & __IRQF_TIMER)) + return false; /* Already running on another processor */ if (irqd_irq_inprogress(&desc->irq_data)) { @@ -98,21 +97,19 @@ static int try_one_irq(struct irq_desc *desc, bool force) * CPU to go looking for our mystery interrupt too */ desc->istate |= IRQS_PENDING; - goto out; + return false; } /* Mark it poll in progress */ desc->istate |= IRQS_POLL_INPROGRESS; do { if (handle_irq_event(desc) == IRQ_HANDLED) - ret = IRQ_HANDLED; + ret = true; /* Make sure that there is still a valid action */ action = desc->action; } while ((desc->istate & IRQS_PENDING) && action); desc->istate &= ~IRQS_POLL_INPROGRESS; -out: - raw_spin_unlock(&desc->lock); - return ret == IRQ_HANDLED; + return ret; } static int misrouted_irq(int irq) @@ -157,8 +154,7 @@ static void poll_spurious_irqs(struct timer_list *unused) continue; /* Racy but it doesn't matter */ - state = desc->istate; - barrier(); + state = READ_ONCE(desc->istate); if (!(state & IRQS_SPURIOUS_DISABLED)) continue; @@ -168,8 +164,7 @@ static void poll_spurious_irqs(struct timer_list *unused) } out: atomic_dec(&irq_poll_active); - mod_timer(&poll_spurious_irq_timer, - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); } static inline int bad_action_ret(irqreturn_t action_ret) @@ -193,17 +188,13 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) { unsigned int irq = irq_desc_get_irq(desc); struct irqaction *action; - unsigned long flags; - if (bad_action_ret(action_ret)) { - printk(KERN_ERR "irq event %d: bogus return value %x\n", - irq, action_ret); - } else { - printk(KERN_ERR "irq %d: nobody cared (try booting with " - "the \"irqpoll\" option)\n", irq); - } + if (bad_action_ret(action_ret)) + pr_err("irq event %d: bogus return value %x\n", irq, action_ret); + else + pr_err("irq %d: nobody cared (try booting with the \"irqpoll\" option)\n", irq); dump_stack(); - printk(KERN_ERR "handlers:\n"); + pr_err("handlers:\n"); /* * We need to take desc->lock here. note_interrupt() is called @@ -211,15 +202,13 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) * with something else removing an action. It's ok to take * desc->lock here. See synchronize_irq(). */ - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); for_each_action_of_desc(desc, action) { - printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler); + pr_err("[<%p>] %ps", action->handler, action->handler); if (action->thread_fn) - printk(KERN_CONT " threaded [<%p>] %ps", - action->thread_fn, action->thread_fn); - printk(KERN_CONT "\n"); + pr_cont(" threaded [<%p>] %ps", action->thread_fn, action->thread_fn); + pr_cont("\n"); } - raw_spin_unlock_irqrestore(&desc->lock, flags); } static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) @@ -232,18 +221,17 @@ static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) } } -static inline int -try_misrouted_irq(unsigned int irq, struct irq_desc *desc, - irqreturn_t action_ret) +static inline bool try_misrouted_irq(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) { struct irqaction *action; if (!irqfixup) - return 0; + return false; /* We didn't actually handle the IRQ - see if it was misrouted? */ if (action_ret == IRQ_NONE) - return 1; + return true; /* * But for 'irqfixup == 2' we also do it for handled interrupts if @@ -251,19 +239,16 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, * traditional PC timer interrupt.. Legacy) */ if (irqfixup < 2) - return 0; + return false; if (!irq) - return 1; + return true; /* * Since we don't get the descriptor lock, "action" can - * change under us. We don't really care, but we don't - * want to follow a NULL pointer. So tell the compiler to - * just load it once by using a barrier. + * change under us. */ - action = desc->action; - barrier(); + action = READ_ONCE(desc->action); return action && (action->flags & IRQF_IRQPOLL); } @@ -273,8 +258,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) { unsigned int irq; - if (desc->istate & IRQS_POLL_INPROGRESS || - irq_settings_is_polled(desc)) + if (desc->istate & IRQS_POLL_INPROGRESS || irq_settings_is_polled(desc)) return; if (bad_action_ret(action_ret)) { @@ -420,13 +404,12 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) /* * Now kill the IRQ */ - printk(KERN_EMERG "Disabling IRQ #%d\n", irq); + pr_emerg("Disabling IRQ #%d\n", irq); desc->istate |= IRQS_SPURIOUS_DISABLED; desc->depth++; irq_disable(desc); - mod_timer(&poll_spurious_irq_timer, - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); } desc->irqs_unhandled = 0; } @@ -436,11 +419,9 @@ bool noirqdebug __read_mostly; int noirqdebug_setup(char *str) { noirqdebug = 1; - printk(KERN_INFO "IRQ lockup detection disabled\n"); - + pr_info("IRQ lockup detection disabled\n"); return 1; } - __setup("noirqdebug", noirqdebug_setup); module_param(noirqdebug, bool, 0644); MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); @@ -452,12 +433,10 @@ static int __init irqfixup_setup(char *str) return 1; } irqfixup = 1; - printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); - printk(KERN_WARNING "This may impact system performance.\n"); - + pr_warn("Misrouted IRQ fixup support enabled.\n"); + pr_warn("This may impact system performance.\n"); return 1; } - __setup("irqfixup", irqfixup_setup); module_param(irqfixup, int, 0644); @@ -468,11 +447,8 @@ static int __init irqpoll_setup(char *str) return 1; } irqfixup = 2; - printk(KERN_WARNING "Misrouted IRQ fixup and polling support " - "enabled\n"); - printk(KERN_WARNING "This may significantly impact system " - "performance\n"); + pr_warn("Misrouted IRQ fixup and polling support enabled\n"); + pr_warn("This may significantly impact system performance\n"); return 1; } - __setup("irqpoll", irqpoll_setup); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index ba069459c101..2351a19ac2a9 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -29,22 +29,13 @@ static unsigned int klp_signals_cnt; /* * When a livepatch is in progress, enable klp stack checking in - * cond_resched(). This helps CPU-bound kthreads get patched. + * schedule(). This helps CPU-bound kthreads get patched. */ -#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) - -#define klp_cond_resched_enable() sched_dynamic_klp_enable() -#define klp_cond_resched_disable() sched_dynamic_klp_disable() - -#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key); -EXPORT_SYMBOL(klp_sched_try_switch_key); -#define klp_cond_resched_enable() static_branch_enable(&klp_sched_try_switch_key) -#define klp_cond_resched_disable() static_branch_disable(&klp_sched_try_switch_key) - -#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ +#define klp_resched_enable() static_branch_enable(&klp_sched_try_switch_key) +#define klp_resched_disable() static_branch_disable(&klp_sched_try_switch_key) /* * This work can be performed periodically to finish patching or unpatching any @@ -365,26 +356,18 @@ static bool klp_try_switch_task(struct task_struct *task) void __klp_sched_try_switch(void) { - if (likely(!klp_patch_pending(current))) - return; - /* - * This function is called from cond_resched() which is called in many - * places throughout the kernel. Using the klp_mutex here might - * deadlock. - * - * Instead, disable preemption to prevent racing with other callers of - * klp_try_switch_task(). Thanks to task_call_func() they won't be - * able to switch this task while it's running. + * This function is called from __schedule() while a context switch is + * about to happen. Preemption is already disabled and klp_mutex + * can't be acquired. + * Disabled preemption is used to prevent racing with other callers of + * klp_try_switch_task(). Thanks to task_call_func() they won't be + * able to switch to this task while it's running. */ - preempt_disable(); + lockdep_assert_preemption_disabled(); - /* - * Make sure current didn't get patched between the above check and - * preempt_disable(). - */ - if (unlikely(!klp_patch_pending(current))) - goto out; + if (likely(!klp_patch_pending(current))) + return; /* * Enforce the order of the TIF_PATCH_PENDING read above and the @@ -395,11 +378,7 @@ void __klp_sched_try_switch(void) smp_rmb(); klp_try_switch_task(current); - -out: - preempt_enable(); } -EXPORT_SYMBOL(__klp_sched_try_switch); /* * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. @@ -508,7 +487,7 @@ void klp_try_complete_transition(void) } /* Done! Now cleanup the data structures. */ - klp_cond_resched_disable(); + klp_resched_disable(); patch = klp_transition_patch; klp_complete_transition(); @@ -560,7 +539,7 @@ void klp_start_transition(void) set_tsk_thread_flag(task, TIF_PATCH_PENDING); } - klp_cond_resched_enable(); + klp_resched_enable(); klp_signals_cnt = 0; } diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 58d78a33ac65..dd2bbf73718b 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -219,6 +219,7 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; unsigned long nr_lock_classes; unsigned long nr_zapped_classes; +unsigned long nr_dynamic_keys; unsigned long max_lock_class_idx; struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); @@ -1238,6 +1239,7 @@ void lockdep_register_key(struct lock_class_key *key) goto out_unlock; } hlist_add_head_rcu(&key->hash_entry, hash_head); + nr_dynamic_keys++; out_unlock: graph_unlock(); restore_irqs: @@ -1977,41 +1979,6 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, } /* - * We are about to add A -> B into the dependency graph, and in __bfs() a - * strong dependency path A -> .. -> B is found: hlock_class equals - * entry->class. - * - * If A -> .. -> B can replace A -> B in any __bfs() search (means the former - * is _stronger_ than or equal to the latter), we consider A -> B as redundant. - * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A - * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the - * dependency graph, as any strong path ..-> A -> B ->.. we can get with - * having dependency A -> B, we could already get a equivalent path ..-> A -> - * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant. - * - * We need to make sure both the start and the end of A -> .. -> B is not - * weaker than A -> B. For the start part, please see the comment in - * check_redundant(). For the end part, we need: - * - * Either - * - * a) A -> B is -(*R)-> (everything is not weaker than that) - * - * or - * - * b) A -> .. -> B is -(*N)-> (nothing is stronger than this) - * - */ -static inline bool hlock_equal(struct lock_list *entry, void *data) -{ - struct held_lock *hlock = (struct held_lock *)data; - - return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ - (hlock->read == 2 || /* A -> B is -(*R)-> */ - !entry->only_xr); /* A -> .. -> B is -(*N)-> */ -} - -/* * We are about to add B -> A into the dependency graph, and in __bfs() a * strong dependency path A -> .. -> B is found: hlock_class equals * entry->class. @@ -2916,6 +2883,41 @@ static inline bool usage_skip(struct lock_list *entry, void *mask) #ifdef CONFIG_LOCKDEP_SMALL /* + * We are about to add A -> B into the dependency graph, and in __bfs() a + * strong dependency path A -> .. -> B is found: hlock_class equals + * entry->class. + * + * If A -> .. -> B can replace A -> B in any __bfs() search (means the former + * is _stronger_ than or equal to the latter), we consider A -> B as redundant. + * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A + * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the + * dependency graph, as any strong path ..-> A -> B ->.. we can get with + * having dependency A -> B, we could already get a equivalent path ..-> A -> + * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant. + * + * We need to make sure both the start and the end of A -> .. -> B is not + * weaker than A -> B. For the start part, please see the comment in + * check_redundant(). For the end part, we need: + * + * Either + * + * a) A -> B is -(*R)-> (everything is not weaker than that) + * + * or + * + * b) A -> .. -> B is -(*N)-> (nothing is stronger than this) + * + */ +static inline bool hlock_equal(struct lock_list *entry, void *data) +{ + struct held_lock *hlock = (struct held_lock *)data; + + return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ + (hlock->read == 2 || /* A -> B is -(*R)-> */ + !entry->only_xr); /* A -> .. -> B is -(*N)-> */ +} + +/* * Check that the dependency graph starting at <src> can lead to * <target> or not. If it can, <src> -> <target> dependency is already * in the graph. @@ -5101,6 +5103,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, lockevent_inc(lockdep_nocheck); } + if (DEBUG_LOCKS_WARN_ON(subclass >= MAX_LOCKDEP_SUBCLASSES)) + return 0; + if (subclass < NR_LOCKDEP_CACHING_CLASSES) class = lock->class_cache[subclass]; /* @@ -6606,6 +6611,7 @@ void lockdep_unregister_key(struct lock_class_key *key) pf = get_pending_free(); __lockdep_free_key_range(pf, key, 1); need_callback = prepare_call_rcu_zapped(pf); + nr_dynamic_keys--; } lockdep_unlock(); raw_local_irq_restore(flags); diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 20f9ef58d3d0..82156caf77d1 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -138,6 +138,7 @@ extern unsigned long nr_lock_classes; extern unsigned long nr_zapped_classes; extern unsigned long nr_zapped_lock_chains; extern unsigned long nr_list_entries; +extern unsigned long nr_dynamic_keys; long lockdep_next_lockchain(long i); unsigned long lock_chain_count(void); extern unsigned long nr_stack_trace_entries; diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 6db0f43fc4df..b52c07c4707c 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -286,6 +286,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v) #endif seq_printf(m, " lock-classes: %11lu [max: %lu]\n", nr_lock_classes, MAX_LOCKDEP_KEYS); + seq_printf(m, " dynamic-keys: %11lu\n", + nr_dynamic_keys); seq_printf(m, " direct dependencies: %11lu [max: %lu]\n", nr_list_entries, MAX_LOCKDEP_ENTRIES); seq_printf(m, " indirect dependencies: %11lu\n", diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index d6964fc29f51..ef234469baac 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -138,7 +138,8 @@ static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry, return !reader; /* wake (readers until) 1 writer */ } -static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader) +static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader, + bool freeze) { DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function); bool wait; @@ -156,7 +157,8 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader) spin_unlock_irq(&sem->waiters.lock); while (wait) { - set_current_state(TASK_UNINTERRUPTIBLE); + set_current_state(TASK_UNINTERRUPTIBLE | + (freeze ? TASK_FREEZABLE : 0)); if (!smp_load_acquire(&wq_entry.private)) break; schedule(); @@ -164,7 +166,8 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader) __set_current_state(TASK_RUNNING); } -bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try) +bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try, + bool freeze) { if (__percpu_down_read_trylock(sem)) return true; @@ -174,7 +177,7 @@ bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try) trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_READ); preempt_enable(); - percpu_rwsem_wait(sem, /* .reader = */ true); + percpu_rwsem_wait(sem, /* .reader = */ true, freeze); preempt_disable(); trace_contention_end(sem, 0); @@ -237,7 +240,7 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem) */ if (!__percpu_down_write_trylock(sem)) { trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE); - percpu_rwsem_wait(sem, /* .reader = */ false); + percpu_rwsem_wait(sem, /* .reader = */ false, false); contended = true; } diff --git a/kernel/module/main.c b/kernel/module/main.c index a2859dc3eea6..5c6ab20240a6 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2829,6 +2829,7 @@ static void module_deallocate(struct module *mod, struct load_info *info) { percpu_modfree(mod); module_arch_freeing_init(mod); + codetag_free_module_sections(mod); free_mod_mem(mod); } diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index c9d97ed20122..5f31fdff8a38 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -128,17 +128,13 @@ out_time: out_net: put_cgroup_ns(new_nsp->cgroup_ns); out_cgroup: - if (new_nsp->pid_ns_for_children) - put_pid_ns(new_nsp->pid_ns_for_children); + put_pid_ns(new_nsp->pid_ns_for_children); out_pid: - if (new_nsp->ipc_ns) - put_ipc_ns(new_nsp->ipc_ns); + put_ipc_ns(new_nsp->ipc_ns); out_ipc: - if (new_nsp->uts_ns) - put_uts_ns(new_nsp->uts_ns); + put_uts_ns(new_nsp->uts_ns); out_uts: - if (new_nsp->mnt_ns) - put_mnt_ns(new_nsp->mnt_ns); + put_mnt_ns(new_nsp->mnt_ns); out_ns: kmem_cache_free(nsproxy_cachep, new_nsp); return ERR_PTR(err); @@ -189,18 +185,12 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) void free_nsproxy(struct nsproxy *ns) { - if (ns->mnt_ns) - put_mnt_ns(ns->mnt_ns); - if (ns->uts_ns) - put_uts_ns(ns->uts_ns); - if (ns->ipc_ns) - put_ipc_ns(ns->ipc_ns); - if (ns->pid_ns_for_children) - put_pid_ns(ns->pid_ns_for_children); - if (ns->time_ns) - put_time_ns(ns->time_ns); - if (ns->time_ns_for_children) - put_time_ns(ns->time_ns_for_children); + put_mnt_ns(ns->mnt_ns); + put_uts_ns(ns->uts_ns); + put_ipc_ns(ns->ipc_ns); + put_pid_ns(ns->pid_ns_for_children); + put_time_ns(ns->time_ns); + put_time_ns(ns->time_ns_for_children); put_cgroup_ns(ns->cgroup_ns); put_net(ns->net_ns); kmem_cache_free(nsproxy_cachep, ns); diff --git a/kernel/padata.c b/kernel/padata.c index b3d4eacc4f5d..7eee94166357 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -358,7 +358,8 @@ static void padata_reorder(struct parallel_data *pd) * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. */ padata_get_pd(pd); - queue_work(pinst->serial_wq, &pd->reorder_work); + if (!queue_work(pinst->serial_wq, &pd->reorder_work)) + padata_put_pd(pd); } } diff --git a/kernel/pid.c b/kernel/pid.c index 4ac2ce46817f..8317bcbc7cf7 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -100,6 +100,7 @@ void put_pid(struct pid *pid) ns = pid->numbers[pid->level].ns; if (refcount_dec_and_test(&pid->count)) { + WARN_ON_ONCE(pid->stashed); kmem_cache_free(ns->pid_cachep, pid); put_pid_ns(ns); } @@ -359,11 +360,6 @@ static void __change_pid(struct pid **pids, struct task_struct *task, hlist_del_rcu(&task->pid_links[type]); *pid_ptr = new; - if (type == PIDTYPE_PID) { - WARN_ON_ONCE(pid_has_task(pid, PIDTYPE_PID)); - wake_up_all(&pid->wait_pidfd); - } - for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (pid_has_task(pid, tmp)) return; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 23c0f4e6cb2f..338c9917d4ee 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -778,6 +778,8 @@ int hibernate(void) goto Restore; ksys_sync_helper(); + if (filesystem_freeze_enabled) + filesystems_freeze(); error = freeze_processes(); if (error) @@ -846,6 +848,7 @@ int hibernate(void) /* Don't bother checking whether freezer_test_done is true */ freezer_test_done = false; Exit: + filesystems_thaw(); pm_notifier_call_chain(PM_POST_HIBERNATION); Restore: pm_restore_console(); @@ -882,6 +885,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data) if (error) goto restore; + if (filesystem_freeze_enabled) + filesystems_freeze(); + error = freeze_processes(); if (error) goto exit; @@ -941,6 +947,7 @@ thaw: thaw_processes(); exit: + filesystems_thaw(); pm_notifier_call_chain(PM_POST_HIBERNATION); restore: @@ -1029,19 +1036,26 @@ static int software_resume(void) if (error) goto Restore; + if (filesystem_freeze_enabled) + filesystems_freeze(); + pm_pr_dbg("Preparing processes for hibernation restore.\n"); error = freeze_processes(); - if (error) + if (error) { + filesystems_thaw(); goto Close_Finish; + } error = freeze_kernel_threads(); if (error) { thaw_processes(); + filesystems_thaw(); goto Close_Finish; } error = load_image_and_restore(); thaw_processes(); + filesystems_thaw(); Finish: pm_notifier_call_chain(PM_POST_RESTORE); Restore: diff --git a/kernel/power/main.c b/kernel/power/main.c index 6254814d4817..0b0e76324c43 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -962,6 +962,34 @@ power_attr(pm_freeze_timeout); #endif /* CONFIG_FREEZER*/ +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) +bool filesystem_freeze_enabled = false; + +static ssize_t freeze_filesystems_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", filesystem_freeze_enabled); +} + +static ssize_t freeze_filesystems_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + unsigned long val; + + if (kstrtoul(buf, 10, &val)) + return -EINVAL; + + if (val > 1) + return -EINVAL; + + filesystem_freeze_enabled = !!val; + return n; +} + +power_attr(freeze_filesystems); +#endif /* CONFIG_SUSPEND || CONFIG_HIBERNATION */ + static struct attribute * g[] = { &state_attr.attr, #ifdef CONFIG_PM_TRACE @@ -992,6 +1020,9 @@ static struct attribute * g[] = { #ifdef CONFIG_FREEZER &pm_freeze_timeout_attr.attr, #endif +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) + &freeze_filesystems_attr.attr, +#endif NULL, }; diff --git a/kernel/power/power.h b/kernel/power/power.h index c352dea2f67b..2eb81662b8fa 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -18,6 +18,10 @@ struct swsusp_info { unsigned long size; } __aligned(PAGE_SIZE); +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) +extern bool filesystem_freeze_enabled; +#endif + #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ extern void __init hibernate_reserved_size_init(void); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8eaec4ab121d..76b141b9aac0 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -30,6 +30,7 @@ #include <trace/events/power.h> #include <linux/compiler.h> #include <linux/moduleparam.h> +#include <linux/fs.h> #include "power.h" @@ -374,6 +375,8 @@ static int suspend_prepare(suspend_state_t state) if (error) goto Restore; + if (filesystem_freeze_enabled) + filesystems_freeze(); trace_suspend_resume(TPS("freeze_processes"), 0, true); error = suspend_freeze_processes(); trace_suspend_resume(TPS("freeze_processes"), 0, false); @@ -550,6 +553,7 @@ int suspend_devices_and_enter(suspend_state_t state) static void suspend_finish(void) { suspend_thaw_processes(); + filesystems_thaw(); pm_notifier_call_chain(PM_POST_SUSPEND); pm_restore_console(); } @@ -588,6 +592,8 @@ static int enter_state(suspend_state_t state) ksys_sync_helper(); trace_suspend_resume(TPS("sync_filesystems"), 0, false); } + if (filesystem_freeze_enabled) + filesystems_freeze(); pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); pm_suspend_clear_flags(); @@ -609,6 +615,7 @@ static int enter_state(suspend_state_t state) pm_pr_dbg("Finishing wakeup.\n"); suspend_finish(); Unlock: + filesystems_thaw(); mutex_unlock(&system_transition_mutex); return error; } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 80ff5f933a62..ad13c461b657 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -268,35 +268,26 @@ static void hib_end_io(struct bio *bio) bio_put(bio); } -static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr, +static int hib_submit_io_sync(blk_opf_t opf, pgoff_t page_off, void *addr) +{ + return bdev_rw_virt(file_bdev(hib_resume_bdev_file), + page_off * (PAGE_SIZE >> 9), addr, PAGE_SIZE, opf); +} + +static int hib_submit_io_async(blk_opf_t opf, pgoff_t page_off, void *addr, struct hib_bio_batch *hb) { - struct page *page = virt_to_page(addr); struct bio *bio; - int error = 0; bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf, GFP_NOIO | __GFP_HIGH); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); - - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { - pr_err("Adding page to bio failed at %llu\n", - (unsigned long long)bio->bi_iter.bi_sector); - bio_put(bio); - return -EFAULT; - } - - if (hb) { - bio->bi_end_io = hib_end_io; - bio->bi_private = hb; - atomic_inc(&hb->count); - submit_bio(bio); - } else { - error = submit_bio_wait(bio); - bio_put(bio); - } - - return error; + bio_add_virt_nofail(bio, addr, PAGE_SIZE); + bio->bi_end_io = hib_end_io; + bio->bi_private = hb; + atomic_inc(&hb->count); + submit_bio(bio); + return 0; } static int hib_wait_io(struct hib_bio_batch *hb) @@ -316,7 +307,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) { int error; - hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL); + hib_submit_io_sync(REQ_OP_READ, swsusp_resume_block, swsusp_header); if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); @@ -329,8 +320,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) swsusp_header->flags = flags; if (flags & SF_CRC32_MODE) swsusp_header->crc32 = handle->crc32; - error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC, - swsusp_resume_block, swsusp_header, NULL); + error = hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC, + swsusp_resume_block, swsusp_header); } else { pr_err("Swap header not found!\n"); error = -ENODEV; @@ -380,36 +371,30 @@ static int swsusp_swap_check(void) static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) { + gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; void *src; int ret; if (!offset) return -ENOSPC; - if (hb) { - src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN | - __GFP_NORETRY); - if (src) { - copy_page(src, buf); - } else { - ret = hib_wait_io(hb); /* Free pages */ - if (ret) - return ret; - src = (void *)__get_free_page(GFP_NOIO | - __GFP_NOWARN | - __GFP_NORETRY); - if (src) { - copy_page(src, buf); - } else { - WARN_ON_ONCE(1); - hb = NULL; /* Go synchronous */ - src = buf; - } - } - } else { - src = buf; + if (!hb) + goto sync_io; + + src = (void *)__get_free_page(gfp); + if (!src) { + ret = hib_wait_io(hb); /* Free pages */ + if (ret) + return ret; + src = (void *)__get_free_page(gfp); + if (WARN_ON_ONCE(!src)) + goto sync_io; } - return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb); + + copy_page(src, buf); + return hib_submit_io_async(REQ_OP_WRITE | REQ_SYNC, offset, src, hb); +sync_io: + return hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC, offset, buf); } static void release_swap_writer(struct swap_map_handle *handle) @@ -1041,7 +1026,7 @@ static int get_swap_reader(struct swap_map_handle *handle, return -ENOMEM; } - error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL); + error = hib_submit_io_sync(REQ_OP_READ, offset, tmp->map); if (error) { release_swap_reader(handle); return error; @@ -1065,7 +1050,10 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf, offset = handle->cur->entries[handle->k]; if (!offset) return -EFAULT; - error = hib_submit_io(REQ_OP_READ, offset, buf, hb); + if (hb) + error = hib_submit_io_async(REQ_OP_READ, offset, buf, hb); + else + error = hib_submit_io_sync(REQ_OP_READ, offset, buf); if (error) return error; if (++handle->k >= MAP_PAGE_ENTRIES) { @@ -1590,8 +1578,8 @@ int swsusp_check(bool exclusive) BLK_OPEN_READ, holder, NULL); if (!IS_ERR(hib_resume_bdev_file)) { clear_page(swsusp_header); - error = hib_submit_io(REQ_OP_READ, swsusp_resume_block, - swsusp_header, NULL); + error = hib_submit_io_sync(REQ_OP_READ, swsusp_resume_block, + swsusp_header); if (error) goto put; @@ -1599,9 +1587,9 @@ int swsusp_check(bool exclusive) memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); swsusp_header_flags = swsusp_header->flags; /* Reset swap signature now */ - error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC, + error = hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC, swsusp_resume_block, - swsusp_header, NULL); + swsusp_header); } else { error = -EINVAL; } @@ -1650,13 +1638,12 @@ int swsusp_unmark(void) { int error; - hib_submit_io(REQ_OP_READ, swsusp_resume_block, - swsusp_header, NULL); + hib_submit_io_sync(REQ_OP_READ, swsusp_resume_block, swsusp_header); if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); - error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC, + error = hib_submit_io_sync(REQ_OP_WRITE | REQ_SYNC, swsusp_resume_block, - swsusp_header, NULL); + swsusp_header); } else { pr_err("Cannot find swsusp signature!\n"); error = -ENODEV; diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index eed2951a4962..9cf01832a6c3 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -57,6 +57,9 @@ /* Low-order bit definition for polled grace-period APIs. */ #define RCU_GET_STATE_COMPLETED 0x1 +/* A complete grace period count */ +#define RCU_SEQ_GP (RCU_SEQ_STATE_MASK + 1) + extern int sysctl_sched_rt_runtime; /* @@ -157,12 +160,21 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) * Given a snapshot from rcu_seq_snap(), determine whether or not a * full update-side operation has occurred, but do not allow the * (ULONG_MAX / 2) safety-factor/guard-band. + * + * The token returned by get_state_synchronize_rcu_full() is based on + * rcu_state.gp_seq but it is tested in poll_state_synchronize_rcu_full() + * against the root rnp->gp_seq. Since rcu_seq_start() is first called + * on rcu_state.gp_seq and only later reflected on the root rnp->gp_seq, + * it is possible that rcu_seq_snap(rcu_state.gp_seq) returns 2 full grace + * periods ahead of the root rnp->gp_seq. To prevent false-positives with the + * full polling API that a wrap around instantly completed the GP, when nothing + * like that happened, adjust for the 2 GPs in the ULONG_CMP_LT(). */ static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s) { unsigned long cur_s = READ_ONCE(*sp); - return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (3 * RCU_SEQ_STATE_MASK + 1)); + return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_GP)); } /* @@ -572,6 +584,8 @@ void do_trace_rcu_torture_read(const char *rcutorturename, unsigned long c_old, unsigned long c); void rcu_gp_set_torture_wait(int duration); +void rcu_set_gpwrap_lag(unsigned long lag); +int rcu_get_gpwrap_count(int cpu); #else static inline void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq) { @@ -589,6 +603,8 @@ void do_trace_rcu_torture_read(const char *rcutorturename, do { } while (0) #endif static inline void rcu_gp_set_torture_wait(int duration) { } +static inline void rcu_set_gpwrap_lag(unsigned long lag) { } +static inline int rcu_get_gpwrap_count(int cpu) { return 0; } #endif unsigned long long rcutorture_gather_gp_seqs(void); void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len); diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 0f3059b1b80d..b521d0455992 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -762,7 +762,7 @@ kfree_scale_thread(void *arg) } for (i = 0; i < kfree_alloc_num; i++) { - alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL); + alloc_ptr = kcalloc(kfree_mult, sizeof(struct kfree_obj), GFP_KERNEL); if (!alloc_ptr) return -ENOMEM; diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 4fa7772be183..b5db1ac32a3d 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -115,6 +115,10 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads"); torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); +torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing"); +torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period."); +torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)"); +torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)"); torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable"); @@ -413,6 +417,8 @@ struct rcu_torture_ops { bool (*reader_blocked)(void); unsigned long long (*gather_gp_seqs)(void); void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len); + void (*set_gpwrap_lag)(unsigned long lag); + int (*get_gpwrap_count)(int cpu); long cbflood_max; int irq_capable; int can_boost; @@ -619,6 +625,8 @@ static struct rcu_torture_ops rcu_ops = { : NULL, .gather_gp_seqs = rcutorture_gather_gp_seqs, .format_gp_seqs = rcutorture_format_gp_seqs, + .set_gpwrap_lag = rcu_set_gpwrap_lag, + .get_gpwrap_count = rcu_get_gpwrap_count, .irq_capable = 1, .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), .extendables = RCUTORTURE_MAX_EXTEND, @@ -2164,53 +2172,70 @@ rcutorture_loop_extend(int *readstate, bool insoftirq, struct torture_random_sta return &rtrsp[j]; } -/* - * Do one read-side critical section, returning false if there was - * no data to read. Can be invoked both from process context and - * from a timer handler. - */ -static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) -{ - bool checkpolling = !(torture_random(trsp) & 0xfff); +struct rcu_torture_one_read_state { + bool checkpolling; unsigned long cookie; struct rcu_gp_oldstate cookie_full; - int i; unsigned long started; - unsigned long completed; - int newstate; struct rcu_torture *p; - int pipe_count; - bool preempted = false; - int readstate = 0; - struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; - struct rt_read_seg *rtrsp = &rtseg[0]; - struct rt_read_seg *rtrsp1; + int readstate; + struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS]; + struct rt_read_seg *rtrsp; unsigned long long ts; +}; - WARN_ON_ONCE(!rcu_is_watching()); - newstate = rcutorture_extend_mask(readstate, trsp); - rcutorture_one_extend(&readstate, newstate, myid < 0, trsp, rtrsp++); - if (checkpolling) { +static void init_rcu_torture_one_read_state(struct rcu_torture_one_read_state *rtorsp, + struct torture_random_state *trsp) +{ + memset(rtorsp, 0, sizeof(*rtorsp)); + rtorsp->checkpolling = !(torture_random(trsp) & 0xfff); + rtorsp->rtrsp = &rtorsp->rtseg[0]; +} + +/* + * Set up the first segment of a series of overlapping read-side + * critical sections. The caller must have actually initiated the + * outermost read-side critical section. + */ +static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp, + struct torture_random_state *trsp, long myid) +{ + if (rtorsp->checkpolling) { if (cur_ops->get_gp_state && cur_ops->poll_gp_state) - cookie = cur_ops->get_gp_state(); + rtorsp->cookie = cur_ops->get_gp_state(); if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) - cur_ops->get_gp_state_full(&cookie_full); + cur_ops->get_gp_state_full(&rtorsp->cookie_full); } - started = cur_ops->get_gp_seq(); - ts = rcu_trace_clock_local(); - p = rcu_dereference_check(rcu_torture_current, + rtorsp->started = cur_ops->get_gp_seq(); + rtorsp->ts = rcu_trace_clock_local(); + rtorsp->p = rcu_dereference_check(rcu_torture_current, !cur_ops->readlock_held || cur_ops->readlock_held()); - if (p == NULL) { + if (rtorsp->p == NULL) { /* Wait for rcu_torture_writer to get underway */ - rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp); + rcutorture_one_extend(&rtorsp->readstate, 0, myid < 0, trsp, rtorsp->rtrsp); return false; } - if (p->rtort_mbtest == 0) + if (rtorsp->p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); - rcu_torture_reader_do_mbchk(myid, p, trsp); - rtrsp = rcutorture_loop_extend(&readstate, myid < 0, trsp, rtrsp); + rcu_torture_reader_do_mbchk(myid, rtorsp->p, trsp); + return true; +} + +/* + * Complete the last segment of a series of overlapping read-side + * critical sections and check for errors. + */ +static void rcu_torture_one_read_end(struct rcu_torture_one_read_state *rtorsp, + struct torture_random_state *trsp, long myid) +{ + int i; + unsigned long completed; + int pipe_count; + bool preempted = false; + struct rt_read_seg *rtrsp1; + preempt_disable(); - pipe_count = READ_ONCE(p->rtort_pipe_count); + pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count); if (pipe_count > RCU_TORTURE_PIPE_LEN) { // Should not happen in a correct RCU implementation, // happens quite often for torture_type=busted. @@ -2218,28 +2243,28 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) } completed = cur_ops->get_gp_seq(); if (pipe_count > 1) { - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, - ts, started, completed); + do_trace_rcu_torture_read(cur_ops->name, &rtorsp->p->rtort_rcu, + rtorsp->ts, rtorsp->started, completed); rcu_ftrace_dump(DUMP_ALL); } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = rcutorture_seq_diff(completed, started); + completed = rcutorture_seq_diff(completed, rtorsp->started); if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; } __this_cpu_inc(rcu_torture_batch[completed]); preempt_enable(); - if (checkpolling) { + if (rtorsp->checkpolling) { if (cur_ops->get_gp_state && cur_ops->poll_gp_state) - WARN_ONCE(cur_ops->poll_gp_state(cookie), + WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie), "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", __func__, rcu_torture_writer_state_getname(), rcu_torture_writer_state, - cookie, cur_ops->get_gp_state()); + rtorsp->cookie, cur_ops->get_gp_state()); if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) - WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), + WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full), "%s: Cookie check 6 failed %s(%d) online %*pbl\n", __func__, rcu_torture_writer_state_getname(), @@ -2248,21 +2273,42 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) } if (cur_ops->reader_blocked) preempted = cur_ops->reader_blocked(); - rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp); - WARN_ON_ONCE(readstate); + rcutorture_one_extend(&rtorsp->readstate, 0, myid < 0, trsp, rtorsp->rtrsp); + WARN_ON_ONCE(rtorsp->readstate); // This next splat is expected behavior if leakpointer, especially // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. - WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); + WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1); /* If error or close call, record the sequence of reader protections. */ if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { i = 0; - for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) + for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++) err_segs[i++] = *rtrsp1; rt_read_nsegs = i; rt_read_preempted = preempted; } +} +/* + * Do one read-side critical section, returning false if there was + * no data to read. Can be invoked both from process context and + * from a timer handler. + */ +static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) +{ + int newstate; + struct rcu_torture_one_read_state rtors; + + WARN_ON_ONCE(!rcu_is_watching()); + init_rcu_torture_one_read_state(&rtors, trsp); + newstate = rcutorture_extend_mask(rtors.readstate, trsp); + rcutorture_one_extend(&rtors.readstate, newstate, myid < 0, trsp, rtors.rtrsp++); + if (!rcu_torture_one_read_start(&rtors, trsp, myid)) { + rcutorture_one_extend(&rtors.readstate, 0, myid < 0, trsp, rtors.rtrsp); + return false; + } + rtors.rtrsp = rcutorture_loop_extend(&rtors.readstate, myid < 0, trsp, rtors.rtrsp); + rcu_torture_one_read_end(&rtors, trsp, myid); return true; } @@ -2307,7 +2353,7 @@ rcu_torture_reader(void *arg) set_user_nice(current, MAX_NICE); if (irqreader && cur_ops->irq_capable) timer_setup_on_stack(&t, rcu_torture_timer, 0); - tick_dep_set_task(current, TICK_DEP_BIT_RCU); + tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. do { if (irqreader && cur_ops->irq_capable) { if (!timer_pending(&t)) @@ -2394,6 +2440,7 @@ rcu_torture_stats_print(void) int i; long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; + long n_gpwraps = 0; struct rcu_torture *rtcp; static unsigned long rtcv_snap = ULONG_MAX; static bool splatted; @@ -2404,6 +2451,8 @@ rcu_torture_stats_print(void) pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); } + if (cur_ops->get_gpwrap_count) + n_gpwraps += cur_ops->get_gpwrap_count(cpu); } for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { if (pipesummary[i] != 0) @@ -2435,8 +2484,9 @@ rcu_torture_stats_print(void) data_race(n_barrier_attempts), data_race(n_rcu_torture_barrier_error)); pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. - pr_cont("nocb-toggles: %ld:%ld\n", + pr_cont("nocb-toggles: %ld:%ld ", atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); + pr_cont("gpwraps: %ld\n", n_gpwraps); pr_alert("%s%s ", torture_type, TORTURE_FLAG); if (atomic_read(&n_rcu_torture_mberror) || @@ -3036,7 +3086,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) cver = READ_ONCE(rcu_torture_current_version); gps = cur_ops->get_gp_seq(); rfp->rcu_launder_gp_seq_start = gps; - tick_dep_set_task(current, TICK_DEP_BIT_RCU); + tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick. while (time_before(jiffies, stopat) && !shutdown_time_arrived() && !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { @@ -3607,6 +3657,57 @@ static int rcu_torture_preempt(void *unused) static enum cpuhp_state rcutor_hp; +static struct hrtimer gpwrap_lag_timer; +static bool gpwrap_lag_active; + +/* Timer handler for toggling RCU grace-period sequence overflow test lag value */ +static enum hrtimer_restart rcu_gpwrap_lag_timer(struct hrtimer *timer) +{ + ktime_t next_delay; + + if (gpwrap_lag_active) { + pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n"); + cur_ops->set_gpwrap_lag(0); + gpwrap_lag_active = false; + next_delay = ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0); + } else { + pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n", gpwrap_lag_gps); + cur_ops->set_gpwrap_lag(gpwrap_lag_gps); + gpwrap_lag_active = true; + next_delay = ktime_set(gpwrap_lag_active_mins * 60, 0); + } + + if (torture_must_stop_irq()) + return HRTIMER_NORESTART; + + hrtimer_forward_now(timer, next_delay); + return HRTIMER_RESTART; +} + +static int rcu_gpwrap_lag_init(void) +{ + if (!gpwrap_lag) + return 0; + + if (gpwrap_lag_cycle_mins <= 0 || gpwrap_lag_active_mins <= 0) { + pr_alert("rcu-torture: lag timing parameters must be positive\n"); + return -EINVAL; + } + + hrtimer_setup(&gpwrap_lag_timer, rcu_gpwrap_lag_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + gpwrap_lag_active = false; + hrtimer_start(&gpwrap_lag_timer, + ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0), HRTIMER_MODE_REL); + + return 0; +} + +static void rcu_gpwrap_lag_cleanup(void) +{ + hrtimer_cancel(&gpwrap_lag_timer); + cur_ops->set_gpwrap_lag(0); + gpwrap_lag_active = false; +} static void rcu_torture_cleanup(void) { @@ -3776,6 +3877,9 @@ rcu_torture_cleanup(void) torture_cleanup_end(); if (cur_ops->gp_slow_unregister) cur_ops->gp_slow_unregister(NULL); + + if (gpwrap_lag && cur_ops->set_gpwrap_lag) + rcu_gpwrap_lag_cleanup(); } static void rcu_torture_leak_cb(struct rcu_head *rhp) @@ -4272,9 +4376,17 @@ rcu_torture_init(void) } if (object_debug) rcu_test_debug_objects(); - torture_init_end(); + if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister)) cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay); + + if (gpwrap_lag && cur_ops->set_gpwrap_lag) { + firsterr = rcu_gpwrap_lag_init(); + if (torture_init_error(firsterr)) + goto unwind; + } + + torture_init_end(); return 0; unwind: diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 9a59b071501b..48047260697e 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1589,7 +1589,7 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) { if (cookie != SRCU_GET_STATE_COMPLETED && - !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie)) + !rcu_seq_done_exact(&ssp->srcu_sup->srcu_gp_seq, cookie)) return false; // Ensure that the end of the SRCU grace period happens before // any subsequent code that the caller might execute. diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 659f83e71048..e8a4b720d7d2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -80,6 +80,15 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { .gpwrap = true, }; + +int rcu_get_gpwrap_count(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + + return READ_ONCE(rdp->gpwrap_count); +} +EXPORT_SYMBOL_GPL(rcu_get_gpwrap_count); + static struct rcu_state rcu_state = { .level = { &rcu_state.node[0] }, .gp_state = RCU_GP_IDLE, @@ -757,6 +766,25 @@ void rcu_request_urgent_qs_task(struct task_struct *t) smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); } +static unsigned long seq_gpwrap_lag = ULONG_MAX / 4; + +/** + * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value. + * @lag_gps: Set overflow lag to this many grace period worth of counters + * which is used by rcutorture to quickly force a gpwrap situation. + * @lag_gps = 0 means we reset it back to the boot-time value. + */ +void rcu_set_gpwrap_lag(unsigned long lag_gps) +{ + unsigned long lag_seq_count; + + lag_seq_count = (lag_gps == 0) + ? ULONG_MAX / 4 + : lag_gps << RCU_SEQ_CTR_SHIFT; + WRITE_ONCE(seq_gpwrap_lag, lag_seq_count); +} +EXPORT_SYMBOL_GPL(rcu_set_gpwrap_lag); + /* * When trying to report a quiescent state on behalf of some other CPU, * it is our responsibility to check for and handle potential overflow @@ -767,9 +795,11 @@ void rcu_request_urgent_qs_task(struct task_struct *t) static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) { raw_lockdep_assert_held_rcu_node(rnp); - if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, - rnp->gp_seq)) + if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag, + rnp->gp_seq)) { WRITE_ONCE(rdp->gpwrap, true); + WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1); + } if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; } @@ -801,6 +831,10 @@ static int rcu_watching_snap_save(struct rcu_data *rdp) return 0; } +#ifndef arch_irq_stat_cpu +#define arch_irq_stat_cpu(cpu) 0 +#endif + /* * Returns positive if the specified CPU has passed through a quiescent state * by virtue of being in or having passed through an dynticks idle state since @@ -936,9 +970,9 @@ static int rcu_watching_snap_recheck(struct rcu_data *rdp) rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); - rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu); - rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu); - rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu); + rsrp->nr_hardirqs = kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu); + rsrp->nr_softirqs = kstat_cpu_softirqs_sum(cpu); + rsrp->nr_csw = nr_context_switches_cpu(cpu); rsrp->jiffies = jiffies; rsrp->gp_seq = rdp->gp_seq; } @@ -1060,38 +1094,6 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp) return needmore; } -static void swake_up_one_online_ipi(void *arg) -{ - struct swait_queue_head *wqh = arg; - - swake_up_one(wqh); -} - -static void swake_up_one_online(struct swait_queue_head *wqh) -{ - int cpu = get_cpu(); - - /* - * If called from rcutree_report_cpu_starting(), wake up - * is dangerous that late in the CPU-down hotplug process. The - * scheduler might queue an ignored hrtimer. Defer the wake up - * to an online CPU instead. - */ - if (unlikely(cpu_is_offline(cpu))) { - int target; - - target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU), - cpu_online_mask); - - smp_call_function_single(target, swake_up_one_online_ipi, - wqh, 0); - put_cpu(); - } else { - put_cpu(); - swake_up_one(wqh); - } -} - /* * Awaken the grace-period kthread. Don't do a self-awaken (unless in an * interrupt or softirq handler, in which case we just might immediately @@ -1116,7 +1118,7 @@ static void rcu_gp_kthread_wake(void) return; WRITE_ONCE(rcu_state.gp_wake_time, jiffies); WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); - swake_up_one_online(&rcu_state.gp_wq); + swake_up_one(&rcu_state.gp_wq); } /* @@ -1798,6 +1800,7 @@ static noinline_for_stack bool rcu_gp_init(void) struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(); bool start_new_poll; + unsigned long old_gp_seq; WRITE_ONCE(rcu_state.gp_activity, jiffies); raw_spin_lock_irq_rcu_node(rnp); @@ -1825,7 +1828,12 @@ static noinline_for_stack bool rcu_gp_init(void) */ start_new_poll = rcu_sr_normal_gp_init(); /* Record GP times before starting GP, hence rcu_seq_start(). */ + old_gp_seq = rcu_state.gp_seq; rcu_seq_start(&rcu_state.gp_seq); + /* Ensure that rcu_seq_done_exact() guardband doesn't give false positives. */ + WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && + rcu_seq_done_exact(&old_gp_seq, rcu_seq_snap(&rcu_state.gp_seq))); + ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a9a811d9d7a3..3830c19cf2f6 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -168,7 +168,7 @@ struct rcu_snap_record { u64 cputime_irq; /* Accumulated cputime of hard irqs */ u64 cputime_softirq;/* Accumulated cputime of soft irqs */ u64 cputime_system; /* Accumulated cputime of kernel tasks */ - unsigned long nr_hardirqs; /* Accumulated number of hard irqs */ + u64 nr_hardirqs; /* Accumulated number of hard irqs */ unsigned int nr_softirqs; /* Accumulated number of soft irqs */ unsigned long long nr_csw; /* Accumulated number of task switches */ unsigned long jiffies; /* Track jiffies value */ @@ -183,6 +183,7 @@ struct rcu_data { bool core_needs_qs; /* Core waits for quiescent state. */ bool beenonline; /* CPU online at least once. */ bool gpwrap; /* Possible ->gp_seq wrap. */ + unsigned int gpwrap_count; /* Count of GP sequence wrap. */ bool cpu_started; /* RCU watching this onlining CPU. */ struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ unsigned long grpmask; /* Mask to apply to leaf qsmask. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 8d4895c854c5..c36c7d5575ca 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -200,7 +200,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp, if (rnp->parent == NULL) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); if (wake) - swake_up_one_online(&rcu_state.expedited_wq); + swake_up_one(&rcu_state.expedited_wq); break; } diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index fa269d34167a..1596812f7f12 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -216,7 +216,7 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp, raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); if (needwake) { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); - swake_up_one_online(&rdp_gp->nocb_gp_wq); + swake_up_one(&rdp_gp->nocb_gp_wq); } return needwake; @@ -554,19 +554,13 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY, TPS("WakeLazy")); - } else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) { + } else if (!irqs_disabled_flags(flags)) { /* ... if queue was empty ... */ rcu_nocb_unlock(rdp); wake_nocb_gp(rdp, false); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeEmpty")); } else { - /* - * Don't do the wake-up upfront on fragile paths. - * Also offline CPUs can't call swake_up_one_online() from - * (soft-)IRQs. Rely on the final deferred wake-up from - * rcutree_report_cpu_dead() - */ rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, TPS("WakeEmptyIsDeferred")); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 3c0bbbbb686f..0b0f56f6abc8 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -29,7 +29,7 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) || lockdep_is_held(&rdp->nocb_lock) || lockdep_is_held(&rcu_state.nocb_mutex) || - (!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) && + ((!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) || softirq_count()) && rdp == this_cpu_ptr(&rcu_data)) || rcu_current_is_nocb_kthread(rdp)), "Unsafe read of RCU_NOCB offloaded state" diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 925fcdad5dea..56b21219442b 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -435,8 +435,8 @@ static void print_cpu_stat_info(int cpu) rsr.cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); pr_err("\t hardirqs softirqs csw/system\n"); - pr_err("\t number: %8ld %10d %12lld\n", - kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs, + pr_err("\t number: %8lld %10d %12lld\n", + kstat_cpu_irqs_sum(cpu) + arch_irq_stat_cpu(cpu) - rsrp->nr_hardirqs, kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs, nr_context_switches_cpu(cpu) - rsrp->nr_csw); pr_err("\tcputime: %8lld %10lld %12lld ==> %d(ms)\n", diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c81cf642dba0..62b3416f5e43 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -66,6 +66,7 @@ #include <linux/vtime.h> #include <linux/wait_api.h> #include <linux/workqueue_api.h> +#include <linux/livepatch_sched.h> #ifdef CONFIG_PREEMPT_DYNAMIC # ifdef CONFIG_GENERIC_ENTRY @@ -1752,7 +1753,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, } } -static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) +static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { enum uclamp_id clamp_id; @@ -1768,7 +1769,8 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) if (unlikely(!p->sched_class->uclamp_enabled)) return; - if (p->se.sched_delayed) + /* Only inc the delayed task which being woken up. */ + if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED)) return; for_each_clamp_id(clamp_id) @@ -2036,7 +2038,7 @@ static void __init init_uclamp(void) } #else /* !CONFIG_UCLAMP_TASK */ -static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } +static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { } static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } static inline void uclamp_fork(struct task_struct *p) { } static inline void uclamp_post_fork(struct task_struct *p) { } @@ -2072,12 +2074,14 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & ENQUEUE_NOCLOCK)) update_rq_clock(rq); - p->sched_class->enqueue_task(rq, p, flags); /* - * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear - * ->sched_delayed. + * Can be before ->enqueue_task() because uclamp considers the + * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared + * in ->enqueue_task(). */ - uclamp_rq_inc(rq, p); + uclamp_rq_inc(rq, p, flags); + + p->sched_class->enqueue_task(rq, p, flags); psi_enqueue(p, flags); @@ -2283,6 +2287,12 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state * just go back and repeat. */ rq = task_rq_lock(p, &rf); + /* + * If task is sched_delayed, force dequeue it, to avoid always + * hitting the tick timeout in the queued case + */ + if (p->se.sched_delayed) + dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); trace_sched_wait_task(p); running = task_on_cpu(rq, p); queued = task_on_rq_queued(p); @@ -6571,12 +6581,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * Otherwise marks the task's __state as RUNNING */ static bool try_to_block_task(struct rq *rq, struct task_struct *p, - unsigned long task_state) + unsigned long *task_state_p) { + unsigned long task_state = *task_state_p; int flags = DEQUEUE_NOCLOCK; if (signal_pending_state(task_state, p)) { WRITE_ONCE(p->__state, TASK_RUNNING); + *task_state_p = TASK_RUNNING; return false; } @@ -6668,6 +6680,8 @@ static void __sched notrace __schedule(int sched_mode) if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) hrtick_clear(rq); + klp_sched_try_switch(prev); + local_irq_disable(); rcu_note_context_switch(preempt); @@ -6713,7 +6727,7 @@ static void __sched notrace __schedule(int sched_mode) goto picked; } } else if (!preempt && prev_state) { - try_to_block_task(rq, prev, prev_state); + try_to_block_task(rq, prev, &prev_state); switch_count = &prev->nvcsw; } @@ -7328,7 +7342,6 @@ EXPORT_STATIC_CALL_TRAMP(might_resched); static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); int __sched dynamic_cond_resched(void) { - klp_sched_try_switch(); if (!static_branch_unlikely(&sk_dynamic_cond_resched)) return 0; return __cond_resched(); @@ -7500,7 +7513,6 @@ int sched_dynamic_mode(const char *str) #endif static DEFINE_MUTEX(sched_dynamic_mutex); -static bool klp_override; static void __sched_dynamic_update(int mode) { @@ -7508,8 +7520,7 @@ static void __sched_dynamic_update(int mode) * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in * the ZERO state, which is invalid. */ - if (!klp_override) - preempt_dynamic_enable(cond_resched); + preempt_dynamic_enable(cond_resched); preempt_dynamic_enable(might_resched); preempt_dynamic_enable(preempt_schedule); preempt_dynamic_enable(preempt_schedule_notrace); @@ -7518,8 +7529,7 @@ static void __sched_dynamic_update(int mode) switch (mode) { case preempt_dynamic_none: - if (!klp_override) - preempt_dynamic_enable(cond_resched); + preempt_dynamic_enable(cond_resched); preempt_dynamic_disable(might_resched); preempt_dynamic_disable(preempt_schedule); preempt_dynamic_disable(preempt_schedule_notrace); @@ -7530,8 +7540,7 @@ static void __sched_dynamic_update(int mode) break; case preempt_dynamic_voluntary: - if (!klp_override) - preempt_dynamic_enable(cond_resched); + preempt_dynamic_enable(cond_resched); preempt_dynamic_enable(might_resched); preempt_dynamic_disable(preempt_schedule); preempt_dynamic_disable(preempt_schedule_notrace); @@ -7542,8 +7551,7 @@ static void __sched_dynamic_update(int mode) break; case preempt_dynamic_full: - if (!klp_override) - preempt_dynamic_disable(cond_resched); + preempt_dynamic_disable(cond_resched); preempt_dynamic_disable(might_resched); preempt_dynamic_enable(preempt_schedule); preempt_dynamic_enable(preempt_schedule_notrace); @@ -7554,8 +7562,7 @@ static void __sched_dynamic_update(int mode) break; case preempt_dynamic_lazy: - if (!klp_override) - preempt_dynamic_disable(cond_resched); + preempt_dynamic_disable(cond_resched); preempt_dynamic_disable(might_resched); preempt_dynamic_enable(preempt_schedule); preempt_dynamic_enable(preempt_schedule_notrace); @@ -7576,36 +7583,6 @@ void sched_dynamic_update(int mode) mutex_unlock(&sched_dynamic_mutex); } -#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL - -static int klp_cond_resched(void) -{ - __klp_sched_try_switch(); - return __cond_resched(); -} - -void sched_dynamic_klp_enable(void) -{ - mutex_lock(&sched_dynamic_mutex); - - klp_override = true; - static_call_update(cond_resched, klp_cond_resched); - - mutex_unlock(&sched_dynamic_mutex); -} - -void sched_dynamic_klp_disable(void) -{ - mutex_lock(&sched_dynamic_mutex); - - klp_override = false; - __sched_dynamic_update(preempt_dynamic_mode); - - mutex_unlock(&sched_dynamic_mutex); -} - -#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ - static int __init setup_preempt_mode(char *str) { int mode = sched_dynamic_mode(str); @@ -9018,7 +8995,7 @@ void sched_online_group(struct task_group *tg, struct task_group *parent) unsigned long flags; spin_lock_irqsave(&task_group_lock, flags); - list_add_rcu(&tg->list, &task_groups); + list_add_tail_rcu(&tg->list, &task_groups); /* Root should already exist: */ WARN_ON(!parent); @@ -9204,11 +9181,15 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) struct task_struct *task; struct cgroup_subsys_state *css; + if (!rt_group_sched_enabled()) + goto scx_check; + cgroup_taskset_for_each(task, css, tset) { if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; } -#endif +scx_check: +#endif /* CONFIG_RT_GROUP_SCHED */ return scx_cgroup_can_attach(tset); } @@ -9861,18 +9842,6 @@ static struct cftype cpu_legacy_files[] = { .seq_show = cpu_cfs_local_stat_show, }, #endif -#ifdef CONFIG_RT_GROUP_SCHED - { - .name = "rt_runtime_us", - .read_s64 = cpu_rt_runtime_read, - .write_s64 = cpu_rt_runtime_write, - }, - { - .name = "rt_period_us", - .read_u64 = cpu_rt_period_read_uint, - .write_u64 = cpu_rt_period_write_uint, - }, -#endif #ifdef CONFIG_UCLAMP_TASK_GROUP { .name = "uclamp.min", @@ -9890,6 +9859,55 @@ static struct cftype cpu_legacy_files[] = { { } /* Terminate */ }; +#ifdef CONFIG_RT_GROUP_SCHED +static struct cftype rt_group_files[] = { + { + .name = "rt_runtime_us", + .read_s64 = cpu_rt_runtime_read, + .write_s64 = cpu_rt_runtime_write, + }, + { + .name = "rt_period_us", + .read_u64 = cpu_rt_period_read_uint, + .write_u64 = cpu_rt_period_write_uint, + }, + { } /* Terminate */ +}; + +# ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED +DEFINE_STATIC_KEY_FALSE(rt_group_sched); +# else +DEFINE_STATIC_KEY_TRUE(rt_group_sched); +# endif + +static int __init setup_rt_group_sched(char *str) +{ + long val; + + if (kstrtol(str, 0, &val) || val < 0 || val > 1) { + pr_warn("Unable to set rt_group_sched\n"); + return 1; + } + if (val) + static_branch_enable(&rt_group_sched); + else + static_branch_disable(&rt_group_sched); + + return 1; +} +__setup("rt_group_sched=", setup_rt_group_sched); + +static int __init cpu_rt_group_init(void) +{ + if (!rt_group_sched_enabled()) + return 0; + + WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files)); + return 0; +} +subsys_initcall(cpu_rt_group_init); +#endif /* CONFIG_RT_GROUP_SCHED */ + static int cpu_extra_stat_show(struct seq_file *sf, struct cgroup_subsys_state *css) { diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 56ae54e0ce6a..557246880a7e 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -588,6 +588,10 @@ static void register_sd(struct sched_domain *sd, struct dentry *parent) debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops); debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops); debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level); + + if (sd->flags & SD_ASYM_PACKING) + debugfs_create_u32("group_asym_prefer_cpu", 0444, parent, + (u32 *)&sd->groups->asym_prefer_cpu); } void update_sched_domain_debugfs(void) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0fb9bf995a47..125912c0e9dd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3795,6 +3795,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, update_entity_lag(cfs_rq, se); se->deadline -= se->vruntime; se->rel_deadline = 1; + cfs_rq->nr_queued--; if (!curr) __dequeue_entity(cfs_rq, se); update_load_sub(&cfs_rq->load, se->load.weight); @@ -3821,10 +3822,11 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, enqueue_load_avg(cfs_rq, se); if (se->on_rq) { - update_load_add(&cfs_rq->load, se->load.weight); place_entity(cfs_rq, se, 0); + update_load_add(&cfs_rq->load, se->load.weight); if (!curr) __enqueue_entity(cfs_rq, se); + cfs_rq->nr_queued++; /* * The entity's vruntime has been adjusted, so let's check @@ -4933,13 +4935,6 @@ static inline void util_est_update(struct cfs_rq *cfs_rq, goto done; /* - * To avoid overestimation of actual task utilization, skip updates if - * we cannot grant there is idle time in this CPU. - */ - if (dequeued > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)))) - return; - - /* * To avoid underestimate of task utilization, skip updates of EWMA if * we cannot grant that thread got all CPU time it wanted. */ @@ -6941,7 +6936,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) * Let's add the task's estimated utilization to the cfs_rq's * estimated utilization, before we update schedutil. */ - if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE)))) + if (!p->se.sched_delayed || (flags & ENQUEUE_DELAYED)) util_est_enqueue(&rq->cfs, p); if (flags & ENQUEUE_DELAYED) { @@ -7181,7 +7176,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) */ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { - if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE)))) + if (!p->se.sched_delayed) util_est_dequeue(&rq->cfs, p); util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP); @@ -7196,6 +7191,11 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) return true; } +static inline unsigned int cfs_h_nr_delayed(struct rq *rq) +{ + return (rq->cfs.h_nr_queued - rq->cfs.h_nr_runnable); +} + #ifdef CONFIG_SMP /* Working cpumask for: sched_balance_rq(), sched_balance_newidle(). */ @@ -7357,8 +7357,12 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync) if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; - if (sync && cpu_rq(this_cpu)->nr_running == 1) - return this_cpu; + if (sync) { + struct rq *rq = cpu_rq(this_cpu); + + if ((rq->nr_running - cfs_h_nr_delayed(rq)) == 1) + return this_cpu; + } if (available_idle_cpu(prev_cpu)) return prev_cpu; @@ -10256,7 +10260,7 @@ sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group (sgs->group_weight - sgs->idle_cpus != 1)) return false; - return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu); + return sched_asym(env->sd, env->dst_cpu, READ_ONCE(group->asym_prefer_cpu)); } /* One group has more than one SMT CPU while the other group does not */ @@ -10493,7 +10497,8 @@ static bool update_sd_pick_busiest(struct lb_env *env, case group_asym_packing: /* Prefer to move from lowest priority CPU's work */ - return sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu); + return sched_asym_prefer(READ_ONCE(sds->busiest->asym_prefer_cpu), + READ_ONCE(sg->asym_prefer_cpu)); case group_misfit_task: /* diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 81bc8b329ef1..93b038d48900 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -40,7 +40,7 @@ int housekeeping_any_cpu(enum hk_type type) if (cpu < nr_cpu_ids) return cpu; - cpu = cpumask_any_and(housekeeping.cpumasks[type], cpu_online_mask); + cpu = cpumask_any_and_distribute(housekeeping.cpumasks[type], cpu_online_mask); if (likely(cpu < nr_cpu_ids)) return cpu; /* diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index fa03ec3ed56a..e40422c37033 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -89,6 +89,7 @@ void init_rt_rq(struct rt_rq *rt_rq) rt_rq->rt_throttled = 0; rt_rq->rt_runtime = 0; raw_spin_lock_init(&rt_rq->rt_runtime_lock); + rt_rq->tg = &root_task_group; #endif } @@ -175,11 +176,14 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { + /* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */ + WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); return rt_rq->rq; } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { + WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group); return rt_se->rt_rq; } @@ -187,11 +191,15 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_se->rt_rq; + WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); return rt_rq->rq; } void unregister_rt_sched_group(struct task_group *tg) { + if (!rt_group_sched_enabled()) + return; + if (tg->rt_se) destroy_rt_bandwidth(&tg->rt_bandwidth); } @@ -200,6 +208,9 @@ void free_rt_sched_group(struct task_group *tg) { int i; + if (!rt_group_sched_enabled()) + return; + for_each_possible_cpu(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); @@ -244,6 +255,9 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) struct sched_rt_entity *rt_se; int i; + if (!rt_group_sched_enabled()) + return 1; + tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); if (!tg->rt_rq) goto err; @@ -482,9 +496,6 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { - if (!rt_rq->tg) - return RUNTIME_INF; - return rt_rq->rt_runtime; } @@ -497,6 +508,11 @@ typedef struct task_group *rt_rq_iter_t; static inline struct task_group *next_task_group(struct task_group *tg) { + if (!rt_group_sched_enabled()) { + WARN_ON(tg != &root_task_group); + return NULL; + } + do { tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); @@ -509,9 +525,9 @@ static inline struct task_group *next_task_group(struct task_group *tg) } #define for_each_rt_rq(rt_rq, iter, rq) \ - for (iter = container_of(&task_groups, typeof(*iter), list); \ - (iter = next_task_group(iter)) && \ - (rt_rq = iter->rt_rq[cpu_of(rq)]);) + for (iter = &root_task_group; \ + iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \ + iter = next_task_group(iter)) #define for_each_sched_rt_entity(rt_se) \ for (; rt_se; rt_se = rt_se->parent) @@ -1066,13 +1082,12 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); -#ifdef CONFIG_RT_GROUP_SCHED /* * Change rq's cpupri only if rt_rq is the top queue. */ - if (&rq->rt != rt_rq) + if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq) return; -#endif + if (rq->online && prio < prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, prio); } @@ -1082,13 +1097,12 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); -#ifdef CONFIG_RT_GROUP_SCHED /* * Change rq's cpupri only if rt_rq is the top queue. */ - if (&rq->rt != rt_rq) + if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq) return; -#endif + if (rq->online && rt_rq->highest_prio.curr != prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } @@ -1156,8 +1170,7 @@ inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) if (rt_se_boosted(rt_se)) rt_rq->rt_nr_boosted++; - if (rt_rq->tg) - start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); + start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); } static void @@ -1257,11 +1270,9 @@ static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_arr static inline struct sched_statistics * __schedstats_from_rt_se(struct sched_rt_entity *rt_se) { -#ifdef CONFIG_RT_GROUP_SCHED /* schedstats is not supported for rt group. */ if (!rt_entity_is_task(rt_se)) return NULL; -#endif return &rt_task_of(rt_se)->stats; } @@ -1883,6 +1894,27 @@ static int find_lowest_rq(struct task_struct *task) return -1; } +static struct task_struct *pick_next_pushable_task(struct rq *rq) +{ + struct task_struct *p; + + if (!has_pushable_tasks(rq)) + return NULL; + + p = plist_first_entry(&rq->rt.pushable_tasks, + struct task_struct, pushable_tasks); + + BUG_ON(rq->cpu != task_cpu(p)); + BUG_ON(task_current(rq, p)); + BUG_ON(task_current_donor(rq, p)); + BUG_ON(p->nr_cpus_allowed <= 1); + + BUG_ON(!task_on_rq_queued(p)); + BUG_ON(!rt_task(p)); + + return p; +} + /* Will lock the rq it finds */ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { @@ -1913,18 +1945,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) /* * We had to unlock the run queue. In * the mean time, task could have - * migrated already or had its affinity changed. - * Also make sure that it wasn't scheduled on its rq. + * migrated already or had its affinity changed, + * therefore check if the task is still at the + * head of the pushable tasks list. * It is possible the task was scheduled, set * "migrate_disabled" and then got preempted, so we must * check the task migration disable flag here too. */ - if (unlikely(task_rq(task) != rq || + if (unlikely(is_migration_disabled(task) || !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || - task_on_cpu(rq, task) || - !rt_task(task) || - is_migration_disabled(task) || - !task_on_rq_queued(task))) { + task != pick_next_pushable_task(rq))) { double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; @@ -1944,27 +1974,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) return lowest_rq; } -static struct task_struct *pick_next_pushable_task(struct rq *rq) -{ - struct task_struct *p; - - if (!has_pushable_tasks(rq)) - return NULL; - - p = plist_first_entry(&rq->rt.pushable_tasks, - struct task_struct, pushable_tasks); - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); - BUG_ON(task_current_donor(rq, p)); - BUG_ON(p->nr_cpus_allowed <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!rt_task(p)); - - return p; -} - /* * If the current CPU has more than one RT task, see if the non * running task can migrate over to a CPU that is running a task @@ -2602,8 +2611,9 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu) { struct rt_rq *rt_rq; -#ifdef CONFIG_RT_GROUP_SCHED +#ifdef CONFIG_RT_GROUP_SCHED // XXX maybe add task_rt_rq(), see also sched_rt_period_rt_rq rt_rq = task_group(p)->rt_rq[cpu]; + WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); #else rt_rq = &cpu_rq(cpu)->rt; #endif @@ -2713,6 +2723,9 @@ static int tg_rt_schedulable(struct task_group *tg, void *data) tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) return -EBUSY; + if (WARN_ON(!rt_group_sched_enabled() && tg != &root_task_group)) + return -EBUSY; + total = to_ratio(period, runtime); /* @@ -2868,7 +2881,7 @@ static int sched_rt_global_constraints(void) int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) { /* Don't accept real-time tasks when there is no way for them to run */ - if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) + if (rt_group_sched_enabled() && rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) return 0; return 1; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 47972f34ea70..c5a6a503eb6d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -813,15 +813,17 @@ struct rt_rq { #ifdef CONFIG_RT_GROUP_SCHED int rt_throttled; - u64 rt_time; - u64 rt_runtime; + u64 rt_time; /* consumed RT time, goes up in update_curr_rt */ + u64 rt_runtime; /* allotted RT time, "slice" from rt_bandwidth, RT sharing/balancing */ /* Nests inside the rq lock: */ raw_spinlock_t rt_runtime_lock; unsigned int rt_nr_boosted; - struct rq *rq; - struct task_group *tg; + struct rq *rq; /* this is always top-level rq, cache? */ +#endif +#ifdef CONFIG_CGROUP_SCHED + struct task_group *tg; /* this tg has "this" rt_rq on given CPU for runnable entities */ #endif }; @@ -1498,6 +1500,23 @@ static inline bool sched_group_cookie_match(struct rq *rq, } #endif /* !CONFIG_SCHED_CORE */ +#ifdef CONFIG_RT_GROUP_SCHED +# ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED +DECLARE_STATIC_KEY_FALSE(rt_group_sched); +static inline bool rt_group_sched_enabled(void) +{ + return static_branch_unlikely(&rt_group_sched); +} +# else +DECLARE_STATIC_KEY_TRUE(rt_group_sched); +static inline bool rt_group_sched_enabled(void) +{ + return static_branch_likely(&rt_group_sched); +} +# endif /* CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED */ +#else +# define rt_group_sched_enabled() false +#endif /* CONFIG_RT_GROUP_SCHED */ static inline void lockdep_assert_rq_held(struct rq *rq) { @@ -2146,6 +2165,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) #endif #ifdef CONFIG_RT_GROUP_SCHED + /* + * p->rt.rt_rq is NULL initially and it is easier to assign + * root_task_group's rt_rq than switching in rt_rq_of_se() + * Clobbers tg(!) + */ + if (!rt_group_sched_enabled()) + tg = &root_task_group; p->rt.rt_rq = tg->rt_rq[cpu]; p->rt.parent = tg->rt_se[cpu]; #endif diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index c326de1344fb..547c1f05b667 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -634,13 +634,14 @@ change: * Do not allow real-time tasks into groups that have no runtime * assigned. */ - if (rt_bandwidth_enabled() && rt_policy(policy) && + if (rt_group_sched_enabled() && + rt_bandwidth_enabled() && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0 && !task_group_is_autogroup(task_group(p))) { retval = -EPERM; goto unlock; } -#endif +#endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_SMP if (dl_bandwidth_enabled() && dl_policy(policy) && !(attr->sched_flags & SCHED_FLAG_SUGOV)) { diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index f1ebc60d967f..a2a38e1b6f18 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1333,6 +1333,64 @@ next: update_group_capacity(sd, cpu); } +#ifdef CONFIG_SMP + +/* Update the "asym_prefer_cpu" when arch_asym_cpu_priority() changes. */ +void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) +{ + int asym_prefer_cpu = cpu; + struct sched_domain *sd; + + guard(rcu)(); + + for_each_domain(cpu, sd) { + struct sched_group *sg; + int group_cpu; + + if (!(sd->flags & SD_ASYM_PACKING)) + continue; + + /* + * Groups of overlapping domain are replicated per NUMA + * node and will require updating "asym_prefer_cpu" on + * each local copy. + * + * If you are hitting this warning, consider moving + * "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu" + * which is shared by all the overlapping groups. + */ + WARN_ON_ONCE(sd->flags & SD_OVERLAP); + + sg = sd->groups; + if (cpu != sg->asym_prefer_cpu) { + /* + * Since the parent is a superset of the current group, + * if the cpu is not the "asym_prefer_cpu" at the + * current level, it cannot be the preferred CPU at a + * higher levels either. + */ + if (!sched_asym_prefer(cpu, sg->asym_prefer_cpu)) + return; + + WRITE_ONCE(sg->asym_prefer_cpu, cpu); + continue; + } + + /* Ranking has improved; CPU is still the preferred one. */ + if (new_prio >= old_prio) + continue; + + for_each_cpu(group_cpu, sched_group_span(sg)) { + if (sched_asym_prefer(group_cpu, asym_prefer_cpu)) + asym_prefer_cpu = group_cpu; + } + + WRITE_ONCE(sg->asym_prefer_cpu, asym_prefer_cpu); + } +} + +#endif /* CONFIG_SMP */ + /* * Set of available CPUs grouped by their corresponding capacities * Each list entry contains a CPU mask reflecting CPUs that share the same @@ -2098,7 +2156,7 @@ int sched_numa_find_closest(const struct cpumask *cpus, int cpu) for (i = 0; i < sched_domains_numa_levels; i++) { if (!masks[i][j]) break; - cpu = cpumask_any_and(cpus, masks[i][j]); + cpu = cpumask_any_and_distribute(cpus, masks[i][j]); if (cpu < nr_cpu_ids) { found = cpu; break; @@ -2347,35 +2405,54 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve /* * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for - * any two given CPUs at this (non-NUMA) topology level. + * any two given CPUs on non-NUMA topology levels. */ -static bool topology_span_sane(struct sched_domain_topology_level *tl, - const struct cpumask *cpu_map, int cpu) +static bool topology_span_sane(const struct cpumask *cpu_map) { - int i = cpu + 1; + struct sched_domain_topology_level *tl; + struct cpumask *covered, *id_seen; + int cpu; - /* NUMA levels are allowed to overlap */ - if (tl->flags & SDTL_OVERLAP) - return true; + lockdep_assert_held(&sched_domains_mutex); + covered = sched_domains_tmpmask; + id_seen = sched_domains_tmpmask2; + + for_each_sd_topology(tl) { + + /* NUMA levels are allowed to overlap */ + if (tl->flags & SDTL_OVERLAP) + continue; + + cpumask_clear(covered); + cpumask_clear(id_seen); - /* - * Non-NUMA levels cannot partially overlap - they must be either - * completely equal or completely disjoint. Otherwise we can end up - * breaking the sched_group lists - i.e. a later get_group() pass - * breaks the linking done for an earlier span. - */ - for_each_cpu_from(i, cpu_map) { /* - * We should 'and' all those masks with 'cpu_map' to exactly - * match the topology we're about to build, but that can only - * remove CPUs, which only lessens our ability to detect - * overlaps + * Non-NUMA levels cannot partially overlap - they must be either + * completely equal or completely disjoint. Otherwise we can end up + * breaking the sched_group lists - i.e. a later get_group() pass + * breaks the linking done for an earlier span. */ - if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && - cpumask_intersects(tl->mask(cpu), tl->mask(i))) - return false; - } + for_each_cpu(cpu, cpu_map) { + const struct cpumask *tl_cpu_mask = tl->mask(cpu); + int id; + + /* lowest bit set in this mask is used as a unique id */ + id = cpumask_first(tl_cpu_mask); + + if (cpumask_test_cpu(id, id_seen)) { + /* First CPU has already been seen, ensure identical spans */ + if (!cpumask_equal(tl->mask(id), tl_cpu_mask)) + return false; + } else { + /* First CPU hasn't been seen before, ensure it's a completely new span */ + if (cpumask_intersects(tl_cpu_mask, covered)) + return false; + cpumask_or(covered, covered, tl_cpu_mask); + cpumask_set_cpu(id, id_seen); + } + } + } return true; } @@ -2408,9 +2485,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att sd = NULL; for_each_sd_topology(tl) { - if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) - goto error; - sd = build_sched_domain(tl, cpu_map, attr, sd, i); has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; @@ -2424,6 +2498,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } } + if (WARN_ON(!topology_span_sane(cpu_map))) + goto error; + /* Build the groups for the domains */ for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { diff --git a/kernel/sys.c b/kernel/sys.c index c434968e9f5d..adc0de0aa364 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -52,6 +52,7 @@ #include <linux/user_namespace.h> #include <linux/time_namespace.h> #include <linux/binfmts.h> +#include <linux/futex.h> #include <linux/sched.h> #include <linux/sched/autogroup.h> @@ -2820,6 +2821,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, return -EINVAL; error = posixtimer_create_prctl(arg2); break; + case PR_FUTEX_HASH: + error = futex_hash_prctl(arg2, arg3, arg4); + break; default: trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5); error = -EINVAL; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 3679a6d18934..3f6a7bdc6edf 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -893,11 +893,6 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, rcu_read_unlock(); } -static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio) -{ - blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0); -} - static void blk_add_trace_bio_complete(void *ignore, struct request_queue *q, struct bio *bio) { @@ -1089,8 +1084,6 @@ static void blk_register_tracepoints(void) WARN_ON(ret); ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); WARN_ON(ret); - ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); - WARN_ON(ret); ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); WARN_ON(ret); ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); @@ -1125,7 +1118,6 @@ static void blk_unregister_tracepoints(void) unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL); unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL); unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL); - unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); @@ -1462,7 +1454,6 @@ static const struct { [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, - [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, }; @@ -1896,6 +1887,8 @@ void blk_fill_rwbs(char *rwbs, blk_opf_t opf) rwbs[i++] = 'S'; if (opf & REQ_META) rwbs[i++] = 'M'; + if (opf & REQ_ATOMIC) + rwbs[i++] = 'U'; rwbs[i] = '\0'; } diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 25ecc1334b67..c7f602fa7b23 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -350,18 +350,28 @@ static bool needs_section_mem(struct module *mod, unsigned long size) return size >= sizeof(struct alloc_tag); } -static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to) +static bool clean_unused_counters(struct alloc_tag *start_tag, + struct alloc_tag *end_tag) { - while (from <= to) { + struct alloc_tag *tag; + bool ret = true; + + for (tag = start_tag; tag <= end_tag; tag++) { struct alloc_tag_counters counter; - counter = alloc_tag_read(from); - if (counter.bytes) - return from; - from++; + if (!tag->counters) + continue; + + counter = alloc_tag_read(tag); + if (!counter.bytes) { + free_percpu(tag->counters); + tag->counters = NULL; + } else { + ret = false; + } } - return NULL; + return ret; } /* Called with mod_area_mt locked */ @@ -371,12 +381,16 @@ static void clean_unused_module_areas_locked(void) struct module *val; mas_for_each(&mas, val, module_tags.size) { + struct alloc_tag *start_tag; + struct alloc_tag *end_tag; + if (val != &unloaded_mod) continue; /* Release area if all tags are unused */ - if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), - (struct alloc_tag *)(module_tags.start_addr + mas.last))) + start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); + end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); + if (clean_unused_counters(start_tag, end_tag)) mas_erase(&mas); } } @@ -561,7 +575,8 @@ unlock: static void release_module_tags(struct module *mod, bool used) { MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); - struct alloc_tag *tag; + struct alloc_tag *start_tag; + struct alloc_tag *end_tag; struct module *val; mas_lock(&mas); @@ -575,15 +590,22 @@ static void release_module_tags(struct module *mod, bool used) if (!used) goto release_area; - /* Find out if the area is used */ - tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), - (struct alloc_tag *)(module_tags.start_addr + mas.last)); - if (tag) { - struct alloc_tag_counters counter = alloc_tag_read(tag); + start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); + end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); + if (!clean_unused_counters(start_tag, end_tag)) { + struct alloc_tag *tag; + + for (tag = start_tag; tag <= end_tag; tag++) { + struct alloc_tag_counters counter; + + if (!tag->counters) + continue; - pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", - tag->ct.filename, tag->ct.lineno, tag->ct.modname, - tag->ct.function, counter.bytes); + counter = alloc_tag_read(tag); + pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", + tag->ct.filename, tag->ct.lineno, tag->ct.modname, + tag->ct.function, counter.bytes); + } } else { used = false; } @@ -596,6 +618,34 @@ out: mas_unlock(&mas); } +static void load_module(struct module *mod, struct codetag *start, struct codetag *stop) +{ + /* Allocate module alloc_tag percpu counters */ + struct alloc_tag *start_tag; + struct alloc_tag *stop_tag; + struct alloc_tag *tag; + + if (!mod) + return; + + start_tag = ct_to_alloc_tag(start); + stop_tag = ct_to_alloc_tag(stop); + for (tag = start_tag; tag < stop_tag; tag++) { + WARN_ON(tag->counters); + tag->counters = alloc_percpu(struct alloc_tag_counters); + if (!tag->counters) { + while (--tag >= start_tag) { + free_percpu(tag->counters); + tag->counters = NULL; + } + shutdown_mem_profiling(true); + pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n", + mod->name); + break; + } + } +} + static void replace_module(struct module *mod, struct module *new_mod) { MA_STATE(mas, &mod_area_mt, 0, module_tags.size); @@ -757,6 +807,7 @@ static int __init alloc_tag_init(void) .needs_section_mem = needs_section_mem, .alloc_section_mem = reserve_module_tags, .free_section_mem = release_module_tags, + .module_load = load_module, .module_replaced = replace_module, #endif }; diff --git a/lib/codetag.c b/lib/codetag.c index 42aadd6c1454..de332e98d6f5 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -194,7 +194,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) if (err >= 0) { cttype->count += range_size(cttype, &range); if (cttype->desc.module_load) - cttype->desc.module_load(cttype, cmod); + cttype->desc.module_load(mod, range.start, range.stop); } up_write(&cttype->mod_lock); @@ -333,7 +333,8 @@ void codetag_unload_module(struct module *mod) } if (found) { if (cttype->desc.module_unload) - cttype->desc.module_unload(cttype, cmod); + cttype->desc.module_unload(cmod->mod, + cmod->range.start, cmod->range.stop); cttype->count -= range_size(cttype, &cmod->range); idr_remove(&cttype->mod_idr, mod_id); diff --git a/lib/crc16.c b/lib/crc16.c index 5c3a803c01e0..9c71eda9bf4b 100644 --- a/lib/crc16.c +++ b/lib/crc16.c @@ -8,7 +8,7 @@ #include <linux/crc16.h> /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ -u16 const crc16_table[256] = { +static const u16 crc16_table[256] = { 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, @@ -42,20 +42,19 @@ u16 const crc16_table[256] = { 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 }; -EXPORT_SYMBOL(crc16_table); /** * crc16 - compute the CRC-16 for the data buffer * @crc: previous CRC value - * @buffer: data pointer + * @p: data pointer * @len: number of bytes in the buffer * * Returns the updated CRC value. */ -u16 crc16(u16 crc, u8 const *buffer, size_t len) +u16 crc16(u16 crc, const u8 *p, size_t len) { while (len--) - crc = crc16_byte(crc, *buffer++); + crc = (crc >> 8) ^ crc16_table[(crc & 0xff) ^ *p++]; return crc; } EXPORT_SYMBOL(crc16); diff --git a/lib/crc32.c b/lib/crc32.c index fddd424ff224..e690026f44f7 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin * cleaned up code to current version of sparse and added the slicing-by-8 @@ -19,9 +20,6 @@ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. * fs/jffs2 uses seed 0, doesn't xor with ~0. * fs/partitions/efi.c uses seed ~0, xor's with ~0. - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. */ /* see: Documentation/staging/crc32.rst for a description of algorithms */ diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 798972b29b68..1ec1466108cc 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -50,22 +50,16 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA config CRYPTO_LIB_CHACHA_GENERIC tristate + default CRYPTO_LIB_CHACHA if !CRYPTO_ARCH_HAVE_LIB_CHACHA select CRYPTO_LIB_UTILS help - This symbol can be depended upon by arch implementations of the - ChaCha library interface that require the generic code as a - fallback, e.g., for SIMD implementations. If no arch specific - implementation is enabled, this implementation serves the users - of CRYPTO_LIB_CHACHA. - -config CRYPTO_LIB_CHACHA_INTERNAL - tristate - select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n + This symbol can be selected by arch implementations of the ChaCha + library interface that require the generic code as a fallback, e.g., + for SIMD implementations. If no arch specific implementation is + enabled, this implementation serves the users of CRYPTO_LIB_CHACHA. config CRYPTO_LIB_CHACHA tristate - select CRYPTO - select CRYPTO_LIB_CHACHA_INTERNAL help Enable the ChaCha library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one @@ -120,21 +114,15 @@ config CRYPTO_ARCH_HAVE_LIB_POLY1305 config CRYPTO_LIB_POLY1305_GENERIC tristate + default CRYPTO_LIB_POLY1305 if !CRYPTO_ARCH_HAVE_LIB_POLY1305 help - This symbol can be depended upon by arch implementations of the - Poly1305 library interface that require the generic code as a - fallback, e.g., for SIMD implementations. If no arch specific - implementation is enabled, this implementation serves the users - of CRYPTO_LIB_POLY1305. - -config CRYPTO_LIB_POLY1305_INTERNAL - tristate - select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n + This symbol can be selected by arch implementations of the Poly1305 + library interface that require the generic code as a fallback, e.g., + for SIMD implementations. If no arch specific implementation is + enabled, this implementation serves the users of CRYPTO_LIB_POLY1305. config CRYPTO_LIB_POLY1305 tristate - select CRYPTO - select CRYPTO_LIB_POLY1305_INTERNAL help Enable the Poly1305 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one @@ -151,5 +139,62 @@ config CRYPTO_LIB_SHA1 config CRYPTO_LIB_SHA256 tristate + help + Enable the SHA-256 library interface. This interface may be fulfilled + by either the generic implementation or an arch-specific one, if one + is available and enabled. + +config CRYPTO_ARCH_HAVE_LIB_SHA256 + bool + help + Declares whether the architecture provides an arch-specific + accelerated implementation of the SHA-256 library interface. + +config CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD + bool + help + Declares whether the architecture provides an arch-specific + accelerated implementation of the SHA-256 library interface + that is SIMD-based and therefore not usable in hardirq + context. + +config CRYPTO_LIB_SHA256_GENERIC + tristate + default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256 + help + This symbol can be selected by arch implementations of the SHA-256 + library interface that require the generic code as a fallback, e.g., + for SIMD implementations. If no arch specific implementation is + enabled, this implementation serves the users of CRYPTO_LIB_SHA256. + +config CRYPTO_LIB_SM3 + tristate + +if !KMSAN # avoid false positives from assembly +if ARM +source "arch/arm/lib/crypto/Kconfig" +endif +if ARM64 +source "arch/arm64/lib/crypto/Kconfig" +endif +if MIPS +source "arch/mips/lib/crypto/Kconfig" +endif +if PPC +source "arch/powerpc/lib/crypto/Kconfig" +endif +if RISCV +source "arch/riscv/lib/crypto/Kconfig" +endif +if S390 +source "arch/s390/lib/crypto/Kconfig" +endif +if SPARC +source "arch/sparc/lib/crypto/Kconfig" +endif +if X86 +source "arch/x86/lib/crypto/Kconfig" +endif +endif endmenu diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 01fac1cd05a1..3e79283b617d 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -25,9 +25,11 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o obj-y += libblake2s.o libblake2s-y := blake2s.o libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o +libblake2s-$(CONFIG_CRYPTO_SELFTESTS) += blake2s-selftest.o obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o libchacha20poly1305-y += chacha20poly1305.o +libchacha20poly1305-$(CONFIG_CRYPTO_SELFTESTS) += chacha20poly1305-selftest.o obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o libcurve25519-generic-y := curve25519-fiat32.o @@ -36,27 +38,31 @@ libcurve25519-generic-y += curve25519-generic.o obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o libcurve25519-y += curve25519.o +libcurve25519-$(CONFIG_CRYPTO_SELFTESTS) += curve25519-selftest.o obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o libdes-y := des.o -obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o -libpoly1305-y := poly1305-donna32.o -libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o +obj-$(CONFIG_CRYPTO_LIB_POLY1305) += libpoly1305.o libpoly1305-y += poly1305.o +obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305-generic.o +libpoly1305-generic-y := poly1305-donna32.o +libpoly1305-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o +libpoly1305-generic-y += poly1305-generic.o + obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o libsha1-y := sha1.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o -ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) -libblake2s-y += blake2s-selftest.o -libchacha20poly1305-y += chacha20poly1305-selftest.o -libcurve25519-y += curve25519-selftest.o -endif +obj-$(CONFIG_CRYPTO_LIB_SHA256_GENERIC) += libsha256-generic.o +libsha256-generic-y := sha256-generic.o obj-$(CONFIG_MPILIB) += mpi/ -obj-$(CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) += simd.o +obj-$(CONFIG_CRYPTO_SELFTESTS) += simd.o + +obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o +libsm3-y := sm3.o diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c index 749dc1258a44..437613265e14 100644 --- a/lib/crypto/aescfb.c +++ b/lib/crypto/aescfb.c @@ -99,7 +99,7 @@ MODULE_DESCRIPTION("Generic AES-CFB library"); MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); MODULE_LICENSE("GPL"); -#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#ifdef CONFIG_CRYPTO_SELFTESTS /* * Test code below. Vectors taken from crypto/testmgr.h diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c index 902e49410aaf..277824d6b4af 100644 --- a/lib/crypto/aesgcm.c +++ b/lib/crypto/aesgcm.c @@ -199,7 +199,7 @@ MODULE_DESCRIPTION("Generic AES-GCM library"); MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); MODULE_LICENSE("GPL"); -#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#ifdef CONFIG_CRYPTO_SELFTESTS /* * Test code below. Vectors taken from crypto/testmgr.h diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 71a316552cc5..b0f9a678300b 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -60,7 +60,7 @@ EXPORT_SYMBOL(blake2s_final); static int __init blake2s_mod_init(void) { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && + if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) && WARN_ON(!blake2s_selftest())) return -ENODEV; return 0; diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index 3cdda3b5ee06..ced87dd31a97 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -13,8 +13,9 @@ #include <linux/unaligned.h> #include <crypto/chacha.h> -static void chacha_permute(u32 *x, int nrounds) +static void chacha_permute(struct chacha_state *state, int nrounds) { + u32 *x = state->x; int i; /* whitelist the allowed round counts */ @@ -65,34 +66,34 @@ static void chacha_permute(u32 *x, int nrounds) /** * chacha_block_generic - generate one keystream block and increment block counter - * @state: input state matrix (16 32-bit words) - * @stream: output keystream block (64 bytes) + * @state: input state matrix + * @out: output keystream block * @nrounds: number of rounds (20 or 12; 20 is recommended) * * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. * The caller has already converted the endianness of the input. This function * also handles incrementing the block counter in the input matrix. */ -void chacha_block_generic(u32 *state, u8 *stream, int nrounds) +void chacha_block_generic(struct chacha_state *state, + u8 out[CHACHA_BLOCK_SIZE], int nrounds) { - u32 x[16]; + struct chacha_state permuted_state = *state; int i; - memcpy(x, state, 64); + chacha_permute(&permuted_state, nrounds); - chacha_permute(x, nrounds); + for (i = 0; i < ARRAY_SIZE(state->x); i++) + put_unaligned_le32(permuted_state.x[i] + state->x[i], + &out[i * sizeof(u32)]); - for (i = 0; i < ARRAY_SIZE(x); i++) - put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); - - state[12]++; + state->x[12]++; } EXPORT_SYMBOL(chacha_block_generic); /** * hchacha_block_generic - abbreviated ChaCha core, for XChaCha - * @state: input state matrix (16 32-bit words) - * @stream: output (8 32-bit words) + * @state: input state matrix + * @out: the output words * @nrounds: number of rounds (20 or 12; 20 is recommended) * * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step @@ -100,15 +101,14 @@ EXPORT_SYMBOL(chacha_block_generic); * skips the final addition of the initial state, and outputs only certain words * of the state. It should not be used for streaming directly. */ -void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds) +void hchacha_block_generic(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) { - u32 x[16]; - - memcpy(x, state, 64); + struct chacha_state permuted_state = *state; - chacha_permute(x, nrounds); + chacha_permute(&permuted_state, nrounds); - memcpy(&stream[0], &x[0], 16); - memcpy(&stream[4], &x[12], 16); + memcpy(&out[0], &permuted_state.x[0], 16); + memcpy(&out[4], &permuted_state.x[12], 16); } EXPORT_SYMBOL(hchacha_block_generic); diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c index 2ea61c28be4f..e4c85bc5a6d7 100644 --- a/lib/crypto/chacha20poly1305-selftest.c +++ b/lib/crypto/chacha20poly1305-selftest.c @@ -8832,7 +8832,7 @@ chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len, { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; - u32 chacha20_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha20_state; union { u8 block0[POLY1305_KEY_SIZE]; __le64 lens[2]; @@ -8844,12 +8844,12 @@ chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len, memcpy(&bottom_row[4], nonce, 12); for (i = 0; i < 8; ++i) le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i); - chacha_init(chacha20_state, le_key, bottom_row); - chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0)); + chacha_init(&chacha20_state, le_key, bottom_row); + chacha20_crypt(&chacha20_state, b.block0, b.block0, sizeof(b.block0)); poly1305_init(&poly1305_state, b.block0); poly1305_update(&poly1305_state, ad, ad_len); poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf); - chacha20_crypt(chacha20_state, dst, src, src_len); + chacha20_crypt(&chacha20_state, dst, src, src_len); poly1305_update(&poly1305_state, dst, src_len); poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf); b.lens[0] = cpu_to_le64(ad_len); diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index 9cfa886f1f89..e29eed49a5a1 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -18,8 +18,6 @@ #include <linux/mm.h> #include <linux/module.h> -#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) - static void chacha_load_key(u32 *k, const u8 *in) { k[0] = get_unaligned_le32(in); @@ -32,7 +30,8 @@ static void chacha_load_key(u32 *k, const u8 *in) k[7] = get_unaligned_le32(in + 28); } -static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce) +static void xchacha_init(struct chacha_state *chacha_state, + const u8 *key, const u8 *nonce) { u32 k[CHACHA_KEY_WORDS]; u8 iv[CHACHA_IV_SIZE]; @@ -54,7 +53,8 @@ static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce) static void __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, - const u8 *ad, const size_t ad_len, u32 *chacha_state) + const u8 *ad, const size_t ad_len, + struct chacha_state *chacha_state) { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; @@ -82,7 +82,7 @@ __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, poly1305_final(&poly1305_state, dst + src_len); - memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32)); + chacha_zeroize_state(chacha_state); memzero_explicit(&b, sizeof(b)); } @@ -91,7 +91,7 @@ void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u32 k[CHACHA_KEY_WORDS]; __le64 iv[2]; @@ -100,8 +100,9 @@ void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, iv[0] = 0; iv[1] = cpu_to_le64(nonce); - chacha_init(chacha_state, k, (u8 *)iv); - __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state); + chacha_init(&chacha_state, k, (u8 *)iv); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + &chacha_state); memzero_explicit(iv, sizeof(iv)); memzero_explicit(k, sizeof(k)); @@ -113,16 +114,18 @@ void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; - xchacha_init(chacha_state, key, nonce); - __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state); + xchacha_init(&chacha_state, key, nonce); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + &chacha_state); } EXPORT_SYMBOL(xchacha20poly1305_encrypt); static bool __chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, - const u8 *ad, const size_t ad_len, u32 *chacha_state) + const u8 *ad, const size_t ad_len, + struct chacha_state *chacha_state) { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; @@ -169,7 +172,7 @@ bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u32 k[CHACHA_KEY_WORDS]; __le64 iv[2]; bool ret; @@ -179,11 +182,11 @@ bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, iv[0] = 0; iv[1] = cpu_to_le64(nonce); - chacha_init(chacha_state, k, (u8 *)iv); + chacha_init(&chacha_state, k, (u8 *)iv); ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, - chacha_state); + &chacha_state); - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); memzero_explicit(iv, sizeof(iv)); memzero_explicit(k, sizeof(k)); return ret; @@ -195,11 +198,11 @@ bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; - xchacha_init(chacha_state, key, nonce); + xchacha_init(&chacha_state, key, nonce); return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, - chacha_state); + &chacha_state); } EXPORT_SYMBOL(xchacha20poly1305_decrypt); @@ -213,7 +216,7 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; struct sg_mapping_iter miter; size_t partial = 0; unsigned int flags; @@ -240,8 +243,8 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, b.iv[0] = 0; b.iv[1] = cpu_to_le64(nonce); - chacha_init(chacha_state, b.k, (u8 *)b.iv); - chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0)); + chacha_init(&chacha_state, b.k, (u8 *)b.iv); + chacha20_crypt(&chacha_state, b.block0, pad0, sizeof(b.block0)); poly1305_init(&poly1305_state, b.block0); if (unlikely(ad_len)) { @@ -276,13 +279,13 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, if (unlikely(length < sl)) l &= ~(CHACHA_BLOCK_SIZE - 1); - chacha20_crypt(chacha_state, addr, addr, l); + chacha20_crypt(&chacha_state, addr, addr, l); addr += l; length -= l; } if (unlikely(length > 0)) { - chacha20_crypt(chacha_state, b.chacha_stream, pad0, + chacha20_crypt(&chacha_state, b.chacha_stream, pad0, CHACHA_BLOCK_SIZE); crypto_xor(addr, b.chacha_stream, length); partial = length; @@ -323,7 +326,7 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE); } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); memzero_explicit(&b, sizeof(b)); return ret; @@ -355,7 +358,7 @@ EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace); static int __init chacha20poly1305_init(void) { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && + if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) && WARN_ON(!chacha20poly1305_selftest())) return -ENODEV; return 0; diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c index 064b352c6907..6850b76a80c9 100644 --- a/lib/crypto/curve25519.c +++ b/lib/crypto/curve25519.c @@ -15,7 +15,7 @@ static int __init curve25519_init(void) { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && + if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) && WARN_ON(!curve25519_selftest())) return -ENODEV; return 0; diff --git a/lib/crypto/libchacha.c b/lib/crypto/libchacha.c index cc1be0496eb9..ebcca381e248 100644 --- a/lib/crypto/libchacha.c +++ b/lib/crypto/libchacha.c @@ -12,7 +12,7 @@ #include <crypto/algapi.h> // for crypto_xor_cpy #include <crypto/chacha.h> -void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, +void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { /* aligned to potentially speed up crypto_xor() */ diff --git a/lib/crypto/poly1305-generic.c b/lib/crypto/poly1305-generic.c new file mode 100644 index 000000000000..a73f700fa1fb --- /dev/null +++ b/lib/crypto/poly1305-generic.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Poly1305 authenticator algorithm, RFC7539 + * + * Copyright (C) 2015 Martin Willi + * + * Based on public domain code by Andrew Moon and Daniel J. Bernstein. + */ + +#include <crypto/internal/poly1305.h> +#include <linux/kernel.h> +#include <linux/module.h> + +void poly1305_block_init_generic(struct poly1305_block_state *desc, + const u8 raw_key[POLY1305_BLOCK_SIZE]) +{ + poly1305_core_init(&desc->h); + poly1305_core_setkey(&desc->core_r, raw_key); +} +EXPORT_SYMBOL_GPL(poly1305_block_init_generic); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("Poly1305 algorithm (generic implementation)"); diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c index 6e80214ebad8..5f2f2af3b59f 100644 --- a/lib/crypto/poly1305.c +++ b/lib/crypto/poly1305.c @@ -7,72 +7,67 @@ * Based on public domain code by Andrew Moon and Daniel J. Bernstein. */ +#include <crypto/internal/blockhash.h> #include <crypto/internal/poly1305.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> #include <linux/unaligned.h> -void poly1305_init_generic(struct poly1305_desc_ctx *desc, - const u8 key[POLY1305_KEY_SIZE]) +void poly1305_init(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]) { - poly1305_core_setkey(&desc->core_r, key); desc->s[0] = get_unaligned_le32(key + 16); desc->s[1] = get_unaligned_le32(key + 20); desc->s[2] = get_unaligned_le32(key + 24); desc->s[3] = get_unaligned_le32(key + 28); - poly1305_core_init(&desc->h); desc->buflen = 0; - desc->sset = true; - desc->rset = 2; + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_block_init_arch(&desc->state, key); + else + poly1305_block_init_generic(&desc->state, key); } -EXPORT_SYMBOL_GPL(poly1305_init_generic); +EXPORT_SYMBOL(poly1305_init); -void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, - unsigned int nbytes) +static inline void poly1305_blocks(struct poly1305_block_state *state, + const u8 *src, unsigned int len) { - unsigned int bytes; - - if (unlikely(desc->buflen)) { - bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen); - memcpy(desc->buf + desc->buflen, src, bytes); - src += bytes; - nbytes -= bytes; - desc->buflen += bytes; - - if (desc->buflen == POLY1305_BLOCK_SIZE) { - poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, - 1, 1); - desc->buflen = 0; - } - } - - if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { - poly1305_core_blocks(&desc->h, &desc->core_r, src, - nbytes / POLY1305_BLOCK_SIZE, 1); - src += nbytes - (nbytes % POLY1305_BLOCK_SIZE); - nbytes %= POLY1305_BLOCK_SIZE; - } + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_blocks_arch(state, src, len, 1); + else + poly1305_blocks_generic(state, src, len, 1); +} - if (unlikely(nbytes)) { - desc->buflen = nbytes; - memcpy(desc->buf, src, nbytes); - } +void poly1305_update(struct poly1305_desc_ctx *desc, + const u8 *src, unsigned int nbytes) +{ + desc->buflen = BLOCK_HASH_UPDATE(poly1305_blocks, &desc->state, + src, nbytes, POLY1305_BLOCK_SIZE, + desc->buf, desc->buflen); } -EXPORT_SYMBOL_GPL(poly1305_update_generic); +EXPORT_SYMBOL(poly1305_update); -void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst) +void poly1305_final(struct poly1305_desc_ctx *desc, u8 *dst) { if (unlikely(desc->buflen)) { desc->buf[desc->buflen++] = 1; memset(desc->buf + desc->buflen, 0, POLY1305_BLOCK_SIZE - desc->buflen); - poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0); + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_blocks_arch(&desc->state, desc->buf, + POLY1305_BLOCK_SIZE, 0); + else + poly1305_blocks_generic(&desc->state, desc->buf, + POLY1305_BLOCK_SIZE, 0); } - poly1305_core_emit(&desc->h, desc->s, dst); + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_emit_arch(&desc->state.h, dst, desc->s); + else + poly1305_emit_generic(&desc->state.h, dst, desc->s); *desc = (struct poly1305_desc_ctx){}; } -EXPORT_SYMBOL_GPL(poly1305_final_generic); +EXPORT_SYMBOL(poly1305_final); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); diff --git a/lib/crypto/sha256-generic.c b/lib/crypto/sha256-generic.c new file mode 100644 index 000000000000..a16ad4f25ebb --- /dev/null +++ b/lib/crypto/sha256-generic.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256, as specified in + * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf + * + * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2014 Red Hat Inc. + */ + +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> + +static const u32 SHA256_K[] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, +}; + +static inline u32 Ch(u32 x, u32 y, u32 z) +{ + return z ^ (x & (y ^ z)); +} + +static inline u32 Maj(u32 x, u32 y, u32 z) +{ + return (x & y) | (z & (x | y)); +} + +#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) +#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) +#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) +#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) + +static inline void LOAD_OP(int I, u32 *W, const u8 *input) +{ + W[I] = get_unaligned_be32((__u32 *)input + I); +} + +static inline void BLEND_OP(int I, u32 *W) +{ + W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; +} + +#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \ + u32 t1, t2; \ + t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \ + t2 = e0(a) + Maj(a, b, c); \ + d += t1; \ + h = t1 + t2; \ +} while (0) + +static void sha256_block_generic(u32 state[SHA256_STATE_WORDS], + const u8 *input, u32 W[64]) +{ + u32 a, b, c, d, e, f, g, h; + int i; + + /* load the input */ + for (i = 0; i < 16; i += 8) { + LOAD_OP(i + 0, W, input); + LOAD_OP(i + 1, W, input); + LOAD_OP(i + 2, W, input); + LOAD_OP(i + 3, W, input); + LOAD_OP(i + 4, W, input); + LOAD_OP(i + 5, W, input); + LOAD_OP(i + 6, W, input); + LOAD_OP(i + 7, W, input); + } + + /* now blend */ + for (i = 16; i < 64; i += 8) { + BLEND_OP(i + 0, W); + BLEND_OP(i + 1, W); + BLEND_OP(i + 2, W); + BLEND_OP(i + 3, W); + BLEND_OP(i + 4, W); + BLEND_OP(i + 5, W); + BLEND_OP(i + 6, W); + BLEND_OP(i + 7, W); + } + + /* load the state into our registers */ + a = state[0]; b = state[1]; c = state[2]; d = state[3]; + e = state[4]; f = state[5]; g = state[6]; h = state[7]; + + /* now iterate */ + for (i = 0; i < 64; i += 8) { + SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h); + SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g); + SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f); + SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e); + SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d); + SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c); + SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b); + SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a); + } + + state[0] += a; state[1] += b; state[2] += c; state[3] += d; + state[4] += e; state[5] += f; state[6] += g; state[7] += h; +} + +void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + u32 W[64]; + + do { + sha256_block_generic(state, data, W); + data += SHA256_BLOCK_SIZE; + } while (--nblocks); + + memzero_explicit(W, sizeof(W)); +} +EXPORT_SYMBOL_GPL(sha256_blocks_generic); + +MODULE_DESCRIPTION("SHA-256 Algorithm (generic implementation)"); +MODULE_LICENSE("GPL"); diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 04c1f2557e6c..107e5162507a 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -11,151 +11,65 @@ * Copyright (c) 2014 Red Hat Inc. */ -#include <linux/unaligned.h> -#include <crypto/sha256_base.h> +#include <crypto/internal/blockhash.h> +#include <crypto/internal/sha2.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> -static const u32 SHA256_K[] = { - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, - 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, - 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, - 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, - 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, - 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, -}; +/* + * If __DISABLE_EXPORTS is defined, then this file is being compiled for a + * pre-boot environment. In that case, ignore the kconfig options, pull the + * generic code into the same translation unit, and use that only. + */ +#ifdef __DISABLE_EXPORTS +#include "sha256-generic.c" +#endif -static inline u32 Ch(u32 x, u32 y, u32 z) +static inline bool sha256_purgatory(void) { - return z ^ (x & (y ^ z)); + return __is_defined(__DISABLE_EXPORTS); } -static inline u32 Maj(u32 x, u32 y, u32 z) +static inline void sha256_blocks(u32 state[SHA256_STATE_WORDS], const u8 *data, + size_t nblocks) { - return (x & y) | (z & (x | y)); + sha256_choose_blocks(state, data, nblocks, sha256_purgatory(), false); } -#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) -#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) -#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) -#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) - -static inline void LOAD_OP(int I, u32 *W, const u8 *input) +void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len) { - W[I] = get_unaligned_be32((__u32 *)input + I); -} + size_t partial = sctx->count % SHA256_BLOCK_SIZE; -static inline void BLEND_OP(int I, u32 *W) -{ - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; -} - -#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \ - u32 t1, t2; \ - t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \ - t2 = e0(a) + Maj(a, b, c); \ - d += t1; \ - h = t1 + t2; \ -} while (0) - -static void sha256_transform(u32 *state, const u8 *input, u32 *W) -{ - u32 a, b, c, d, e, f, g, h; - int i; - - /* load the input */ - for (i = 0; i < 16; i += 8) { - LOAD_OP(i + 0, W, input); - LOAD_OP(i + 1, W, input); - LOAD_OP(i + 2, W, input); - LOAD_OP(i + 3, W, input); - LOAD_OP(i + 4, W, input); - LOAD_OP(i + 5, W, input); - LOAD_OP(i + 6, W, input); - LOAD_OP(i + 7, W, input); - } - - /* now blend */ - for (i = 16; i < 64; i += 8) { - BLEND_OP(i + 0, W); - BLEND_OP(i + 1, W); - BLEND_OP(i + 2, W); - BLEND_OP(i + 3, W); - BLEND_OP(i + 4, W); - BLEND_OP(i + 5, W); - BLEND_OP(i + 6, W); - BLEND_OP(i + 7, W); - } - - /* load the state into our registers */ - a = state[0]; b = state[1]; c = state[2]; d = state[3]; - e = state[4]; f = state[5]; g = state[6]; h = state[7]; - - /* now iterate */ - for (i = 0; i < 64; i += 8) { - SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h); - SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g); - SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f); - SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e); - SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d); - SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c); - SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b); - SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a); - } - - state[0] += a; state[1] += b; state[2] += c; state[3] += d; - state[4] += e; state[5] += f; state[6] += g; state[7] += h; -} - -static void sha256_transform_blocks(struct sha256_state *sctx, - const u8 *input, int blocks) -{ - u32 W[64]; - - do { - sha256_transform(sctx->state, input, W); - input += SHA256_BLOCK_SIZE; - } while (--blocks); - - memzero_explicit(W, sizeof(W)); -} - -void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) -{ - lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks); + sctx->count += len; + BLOCK_HASH_UPDATE_BLOCKS(sha256_blocks, sctx->ctx.state, data, len, + SHA256_BLOCK_SIZE, sctx->buf, partial); } EXPORT_SYMBOL(sha256_update); -static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size) +static inline void __sha256_final(struct sha256_state *sctx, u8 *out, + size_t digest_size) { - lib_sha256_base_do_finalize(sctx, sha256_transform_blocks); - lib_sha256_base_finish(sctx, out, digest_size); + size_t partial = sctx->count % SHA256_BLOCK_SIZE; + + sha256_finup(&sctx->ctx, sctx->buf, partial, out, digest_size, + sha256_purgatory(), false); + memzero_explicit(sctx, sizeof(*sctx)); } -void sha256_final(struct sha256_state *sctx, u8 *out) +void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]) { - __sha256_final(sctx, out, 32); + __sha256_final(sctx, out, SHA256_DIGEST_SIZE); } EXPORT_SYMBOL(sha256_final); -void sha224_final(struct sha256_state *sctx, u8 *out) +void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]) { - __sha256_final(sctx, out, 28); + __sha256_final(sctx, out, SHA224_DIGEST_SIZE); } EXPORT_SYMBOL(sha224_final); -void sha256(const u8 *data, unsigned int len, u8 *out) +void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]) { struct sha256_state sctx; diff --git a/crypto/sm3.c b/lib/crypto/sm3.c index 18c2fb73ba16..efff0e267d84 100644 --- a/crypto/sm3.c +++ b/lib/crypto/sm3.c @@ -8,9 +8,11 @@ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ +#include <crypto/sm3.h> +#include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> #include <linux/unaligned.h> -#include <crypto/sm3.h> static const u32 ____cacheline_aligned K[64] = { 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, @@ -166,81 +168,18 @@ static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) #undef W1 #undef W2 -static inline void sm3_block(struct sm3_state *sctx, - u8 const *data, int blocks, u32 W[16]) -{ - while (blocks--) { - sm3_transform(sctx, data, W); - data += SM3_BLOCK_SIZE; - } -} - -void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len) +void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks) { - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; u32 W[16]; - sctx->count += len; - - if ((partial + len) >= SM3_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SM3_BLOCK_SIZE - partial; - - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - blocks = len / SM3_BLOCK_SIZE; - len %= SM3_BLOCK_SIZE; - - if (blocks) { - sm3_block(sctx, data, blocks, W); - data += blocks * SM3_BLOCK_SIZE; - } - - memzero_explicit(W, sizeof(W)); - - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); -} -EXPORT_SYMBOL_GPL(sm3_update); - -void sm3_final(struct sm3_state *sctx, u8 *out) -{ - const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64); - __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); - __be32 *digest = (__be32 *)out; - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - u32 W[16]; - int i; - - sctx->buffer[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial); - partial = 0; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - memset(sctx->buffer + partial, 0, bit_offset - partial); - *bits = cpu_to_be64(sctx->count << 3); - sm3_block(sctx, sctx->buffer, 1, W); - - for (i = 0; i < 8; i++) - put_unaligned_be32(sctx->state[i], digest++); + do { + sm3_transform(sctx, data, W); + data += SM3_BLOCK_SIZE; + } while (--blocks); - /* Zeroize sensitive information. */ memzero_explicit(W, sizeof(W)); - memzero_explicit(sctx, sizeof(*sctx)); } -EXPORT_SYMBOL_GPL(sm3_final); +EXPORT_SYMBOL_GPL(sm3_block_generic); MODULE_DESCRIPTION("Generic SM3 library"); MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 3f39955cb0f1..0061d4c7e351 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -177,7 +177,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, const size_t max = suite_set->end - suite_set->start; - copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL); + copy = kcalloc(max, sizeof(*copy), GFP_KERNEL); if (!copy) { /* won't be able to run anything, return an empty set */ return filtered; } diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c index 92b2cccd5e76..484fd85251b4 100644 --- a/lib/kunit/static_stub.c +++ b/lib/kunit/static_stub.c @@ -96,7 +96,7 @@ void __kunit_activate_static_stub(struct kunit *test, /* If the replacement address is NULL, deactivate the stub. */ if (!replacement_addr) { - kunit_deactivate_static_stub(test, replacement_addr); + kunit_deactivate_static_stub(test, real_fn_addr); return; } @@ -608,7 +608,10 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, * complain. Find the boundary by adding one to the last valid * address. */ - highmem_start = __pa(high_memory - 1) + 1; + if (IS_ENABLED(CONFIG_HIGHMEM)) + highmem_start = __pa(high_memory - 1) + 1; + else + highmem_start = memblock_end_of_DRAM(); pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); diff --git a/mm/dmapool.c b/mm/dmapool.c index f0bfc6c490f4..5be8cc1c6529 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -56,6 +56,7 @@ struct dma_pool { /* the pool */ unsigned int size; unsigned int allocation; unsigned int boundary; + int node; char name[32]; struct list_head pools; }; @@ -199,12 +200,13 @@ static void pool_block_push(struct dma_pool *pool, struct dma_block *block, /** - * dma_pool_create - Creates a pool of consistent memory blocks, for dma. + * dma_pool_create_node - Creates a pool of consistent memory blocks, for dma. * @name: name of pool, for diagnostics * @dev: device that will be doing the DMA * @size: size of the blocks in this pool. * @align: alignment requirement for blocks; must be a power of two * @boundary: returned blocks won't cross this power of two boundary + * @node: optional NUMA node to allocate structs 'dma_pool' and 'dma_page' on * Context: not in_interrupt() * * Given one of these pools, dma_pool_alloc() @@ -221,8 +223,8 @@ static void pool_block_push(struct dma_pool *pool, struct dma_block *block, * Return: a dma allocation pool with the requested characteristics, or * %NULL if one can't be created. */ -struct dma_pool *dma_pool_create(const char *name, struct device *dev, - size_t size, size_t align, size_t boundary) +struct dma_pool *dma_pool_create_node(const char *name, struct device *dev, + size_t size, size_t align, size_t boundary, int node) { struct dma_pool *retval; size_t allocation; @@ -251,7 +253,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, boundary = min(boundary, allocation); - retval = kzalloc(sizeof(*retval), GFP_KERNEL); + retval = kzalloc_node(sizeof(*retval), GFP_KERNEL, node); if (!retval) return retval; @@ -264,6 +266,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, retval->size = size; retval->boundary = boundary; retval->allocation = allocation; + retval->node = node; INIT_LIST_HEAD(&retval->pools); /* @@ -295,7 +298,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, mutex_unlock(&pools_reg_lock); return retval; } -EXPORT_SYMBOL(dma_pool_create); +EXPORT_SYMBOL(dma_pool_create_node); static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) { @@ -335,7 +338,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) { struct dma_page *page; - page = kmalloc(sizeof(*page), mem_flags); + page = kmalloc_node(sizeof(*page), mem_flags, pool->node); if (!page) return NULL; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7ae38bfb9096..6a3cf7935c14 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1250,7 +1250,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma) /* * Reset and decrement one ref on hugepage private reservation. * Called with mm->mmap_lock writer semaphore held. - * This function should be only used by move_vma() and operate on + * This function should be only used by mremap and operate on * same sized vma. It should never come here with last ref on the * reservation. */ @@ -2949,12 +2949,20 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) while (start_pfn < end_pfn) { folio = pfn_folio(start_pfn); + + /* + * The folio might have been dissolved from under our feet, so make sure + * to carefully check the state under the lock. + */ + spin_lock_irq(&hugetlb_lock); if (folio_test_hugetlb(folio)) { h = folio_hstate(folio); } else { + spin_unlock_irq(&hugetlb_lock); start_pfn++; continue; } + spin_unlock_irq(&hugetlb_lock); if (!folio_ref_count(folio)) { ret = alloc_and_dissolve_hugetlb_folio(h, folio, @@ -7931,3 +7939,17 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), ALIGN_DOWN(vma->vm_end, PUD_SIZE)); } + +/* + * For hugetlb, mremap() is an odd edge case - while the VMA copying is + * performed, we permit both the old and new VMAs to reference the same + * reservation. + * + * We fix this up after the operation succeeds, or if a newly allocated VMA + * is closed as a result of a failure to allocate memory. + */ +void fixup_hugetlb_reservations(struct vm_area_struct *vma) +{ + if (is_vm_hugetlb_page(vma)) + clear_vma_resv_huge_pages(vma); +} diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 88d1c9dcb507..d2c70cd2afb1 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -292,33 +292,99 @@ void __init __weak kasan_populate_early_vm_area_shadow(void *start, { } +struct vmalloc_populate_data { + unsigned long start; + struct page **pages; +}; + static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, - void *unused) + void *_data) { - unsigned long page; + struct vmalloc_populate_data *data = _data; + struct page *page; pte_t pte; + int index; if (likely(!pte_none(ptep_get(ptep)))) return 0; - page = __get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); - pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); + index = PFN_DOWN(addr - data->start); + page = data->pages[index]; + __memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE); + pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); spin_lock(&init_mm.page_table_lock); if (likely(pte_none(ptep_get(ptep)))) { set_pte_at(&init_mm, addr, ptep, pte); - page = 0; + data->pages[index] = NULL; } spin_unlock(&init_mm.page_table_lock); - if (page) - free_page(page); + + return 0; +} + +static void ___free_pages_bulk(struct page **pages, int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) { + if (pages[i]) { + __free_pages(pages[i], 0); + pages[i] = NULL; + } + } +} + +static int ___alloc_pages_bulk(struct page **pages, int nr_pages) +{ + unsigned long nr_populated, nr_total = nr_pages; + struct page **page_array = pages; + + while (nr_pages) { + nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages); + if (!nr_populated) { + ___free_pages_bulk(page_array, nr_total - nr_pages); + return -ENOMEM; + } + pages += nr_populated; + nr_pages -= nr_populated; + } + return 0; } +static int __kasan_populate_vmalloc(unsigned long start, unsigned long end) +{ + unsigned long nr_pages, nr_total = PFN_UP(end - start); + struct vmalloc_populate_data data; + int ret = 0; + + data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!data.pages) + return -ENOMEM; + + while (nr_total) { + nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0])); + ret = ___alloc_pages_bulk(data.pages, nr_pages); + if (ret) + break; + + data.start = start; + ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE, + kasan_populate_vmalloc_pte, &data); + ___free_pages_bulk(data.pages, nr_pages); + if (ret) + break; + + start += nr_pages * PAGE_SIZE; + nr_total -= nr_pages; + } + + free_page((unsigned long)data.pages); + + return ret; +} + int kasan_populate_vmalloc(unsigned long addr, unsigned long size) { unsigned long shadow_start, shadow_end; @@ -348,9 +414,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) shadow_start = PAGE_ALIGN_DOWN(shadow_start); shadow_end = PAGE_ALIGN(shadow_end); - ret = apply_to_page_range(&init_mm, shadow_start, - shadow_end - shadow_start, - kasan_populate_vmalloc_pte, NULL); + ret = __kasan_populate_vmalloc(shadow_start, shadow_end); if (ret) return ret; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c96c1f2b9cf5..2d4d65f25fec 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1168,7 +1168,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, { struct mem_cgroup *iter; int ret = 0; - int i = 0; BUG_ON(mem_cgroup_is_root(memcg)); @@ -1178,10 +1177,9 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); while (!ret && (task = css_task_iter_next(&it))) { - /* Avoid potential softlockup warning */ - if ((++i & 1023) == 0) - cond_resched(); ret = fn(task, arg); + /* Avoid potential softlockup warning */ + cond_resched(); } css_task_iter_end(&it); if (ret) { diff --git a/mm/migrate.c b/mm/migrate.c index 676d9cfc7059..c80591514e66 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -947,66 +947,20 @@ int filemap_migrate_folio(struct address_space *mapping, EXPORT_SYMBOL_GPL(filemap_migrate_folio); /* - * Writeback a folio to clean the dirty state - */ -static int writeout(struct address_space *mapping, struct folio *folio) -{ - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = 1, - .range_start = 0, - .range_end = LLONG_MAX, - .for_reclaim = 1 - }; - int rc; - - if (!mapping->a_ops->writepage) - /* No write method for the address space */ - return -EINVAL; - - if (!folio_clear_dirty_for_io(folio)) - /* Someone else already triggered a write */ - return -EAGAIN; - - /* - * A dirty folio may imply that the underlying filesystem has - * the folio on some queue. So the folio must be clean for - * migration. Writeout may mean we lose the lock and the - * folio state is no longer what we checked for earlier. - * At this point we know that the migration attempt cannot - * be successful. - */ - remove_migration_ptes(folio, folio, 0); - - rc = mapping->a_ops->writepage(&folio->page, &wbc); - - if (rc != AOP_WRITEPAGE_ACTIVATE) - /* unlocked. Relock */ - folio_lock(folio); - - return (rc < 0) ? -EIO : -EAGAIN; -} - -/* * Default handling if a filesystem does not provide a migration function. */ static int fallback_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { - if (folio_test_dirty(src)) { - /* Only writeback folios in full synchronous migration */ - switch (mode) { - case MIGRATE_SYNC: - break; - default: - return -EBUSY; - } - return writeout(mapping, src); - } + WARN_ONCE(mapping->a_ops->writepages, + "%ps does not implement migrate_folio\n", + mapping->a_ops); + if (folio_test_dirty(src)) + return -EBUSY; /* - * Buffers may be managed in a filesystem specific way. - * We must have no buffers or drop them. + * Filesystem may have private data at folio->private that we + * can't migrate automatically. */ if (!filemap_release_folio(src, GFP_KERNEL)) return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; diff --git a/mm/mremap.c b/mm/mremap.c index 7db9da609c84..0d4948b720e2 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -1188,8 +1188,7 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm, mremap_userfaultfd_prep(new_vma, vrm->uf); } - if (is_vm_hugetlb_page(vma)) - clear_vma_resv_huge_pages(vma); + fixup_hugetlb_reservations(vma); /* Tell pfnmap has moved from this vma */ if (unlikely(vma->vm_flags & VM_PFNMAP)) diff --git a/mm/nommu.c b/mm/nommu.c index 617e7ba8022f..70f92f9a7fab 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -200,7 +200,23 @@ void *vmalloc_noprof(unsigned long size) } EXPORT_SYMBOL(vmalloc_noprof); -void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof); +/* + * vmalloc_huge_node - allocate virtually contiguous memory, on a node + * + * @size: allocation size + * @gfp_mask: flags for the page level allocator + * @node: node to use for allocation or NUMA_NO_NODE + * + * Allocate enough pages to cover @size from the page level + * allocator and map them into contiguous kernel virtual space. + * + * Due to NOMMU implications the node argument and HUGE page attribute is + * ignored. + */ +void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) +{ + return __vmalloc_noprof(size, gfp_mask); +} /* * vzalloc - allocate virtually contiguous memory with zero fill diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c81624bc3969..76200cd85fe7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2621,27 +2621,6 @@ int write_cache_pages(struct address_space *mapping, } EXPORT_SYMBOL(write_cache_pages); -static int writeback_use_writepage(struct address_space *mapping, - struct writeback_control *wbc) -{ - struct folio *folio = NULL; - struct blk_plug plug; - int err; - - blk_start_plug(&plug); - while ((folio = writeback_iter(mapping, wbc, folio, &err))) { - err = mapping->a_ops->writepage(&folio->page, wbc); - if (err == AOP_WRITEPAGE_ACTIVATE) { - folio_unlock(folio); - err = 0; - } - mapping_set_error(mapping, err); - } - blk_finish_plug(&plug); - - return err; -} - int do_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; @@ -2652,14 +2631,11 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc) wb = inode_to_wb_wbc(mapping->host, wbc); wb_bandwidth_estimate_start(wb); while (1) { - if (mapping->a_ops->writepages) { + if (mapping->a_ops->writepages) ret = mapping->a_ops->writepages(mapping, wbc); - } else if (mapping->a_ops->writepage) { - ret = writeback_use_writepage(mapping, wbc); - } else { + else /* deal with chardevs and other special files */ ret = 0; - } if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL) break; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8258349e49ac..47fa713ccb4d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4562,6 +4562,14 @@ restart: } retry: + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * infinite retries. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; + /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac); diff --git a/mm/page_io.c b/mm/page_io.c index 4bce19df557b..f7716b6569fa 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -237,9 +237,8 @@ static void swap_zeromap_folio_clear(struct folio *folio) * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ -int swap_writepage(struct page *page, struct writeback_control *wbc) +int swap_writeout(struct folio *folio, struct writeback_control *wbc) { - struct folio *folio = page_folio(page); int ret; if (folio_free_swap(folio)) { diff --git a/mm/readahead.c b/mm/readahead.c index 6a4e96b69702..20d36d6b055e 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -690,9 +690,15 @@ EXPORT_SYMBOL_GPL(page_cache_async_ra); ssize_t ksys_readahead(int fd, loff_t offset, size_t count) { + struct file *file; + const struct inode *inode; + CLASS(fd, f)(fd); + if (fd_empty(f)) + return -EBADF; - if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ)) + file = fd_file(f); + if (!(file->f_mode & FMODE_READ)) return -EBADF; /* @@ -700,9 +706,15 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count) * that can execute readahead. If readahead is not possible * on this file, then we must return -EINVAL. */ - if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops || - (!S_ISREG(file_inode(fd_file(f))->i_mode) && - !S_ISBLK(file_inode(fd_file(f))->i_mode))) + if (!file->f_mapping) + return -EINVAL; + if (!file->f_mapping->a_ops) + return -EINVAL; + + inode = file_inode(file); + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) + return -EINVAL; + if (IS_ANON_FILE(inode)) return -EINVAL; return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED); diff --git a/mm/shmem.c b/mm/shmem.c index 99327c30507c..858cee02ca49 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -98,7 +98,7 @@ static struct vfsmount *shm_mnt __ro_after_init; #define SHORT_SYMLINK_LEN 128 /* - * shmem_fallocate communicates with shmem_fault or shmem_writepage via + * shmem_fallocate communicates with shmem_fault or shmem_writeout via * inode->i_private (with i_rwsem making sure that it has only one user at * a time): we would prefer not to enlarge the shmem inode just for that. */ @@ -107,7 +107,7 @@ struct shmem_falloc { pgoff_t start; /* start of range currently being fallocated */ pgoff_t next; /* the next page offset to be fallocated */ pgoff_t nr_falloced; /* how many new pages have been fallocated */ - pgoff_t nr_unswapped; /* how often writepage refused to swap out */ + pgoff_t nr_unswapped; /* how often writeout refused to swap out */ }; struct shmem_options { @@ -446,7 +446,7 @@ static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) /* * Special case: whereas normally shmem_recalc_inode() is called * after i_mapping->nrpages has already been adjusted (up or down), - * shmem_writepage() has to raise swapped before nrpages is lowered - + * shmem_writeout() has to raise swapped before nrpages is lowered - * to stop a racing shmem_recalc_inode() from thinking that a page has * been freed. Compensate here, to avoid the need for a followup call. */ @@ -1536,12 +1536,15 @@ int shmem_unuse(unsigned int type) return error; } -/* - * Move the page from the page cache to the swap cache. +/** + * shmem_writeout - Write the folio to swap + * @folio: The folio to write + * @wbc: How writeback is to be done + * + * Move the folio from the page cache to the swap cache. */ -static int shmem_writepage(struct page *page, struct writeback_control *wbc) +int shmem_writeout(struct folio *folio, struct writeback_control *wbc) { - struct folio *folio = page_folio(page); struct address_space *mapping = folio->mapping; struct inode *inode = mapping->host; struct shmem_inode_info *info = SHMEM_I(inode); @@ -1550,13 +1553,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) int nr_pages; bool split = false; - /* - * Our capabilities prevent regular writeback or sync from ever calling - * shmem_writepage; but a stacking filesystem might use ->writepage of - * its underlying filesystem, in which case tmpfs should write out to - * swap only in response to memory pressure, and not for the writeback - * threads or sync. - */ if (WARN_ON_ONCE(!wbc->for_reclaim)) goto redirty; @@ -1586,9 +1582,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) try_split: /* Ensure the subpages are still dirty */ folio_test_set_dirty(folio); - if (split_huge_page_to_list_to_order(page, wbc->list, 0)) + if (split_folio_to_list(folio, wbc->list)) goto redirty; - folio = page_folio(page); folio_clear_dirty(folio); } @@ -1646,7 +1641,7 @@ try_split: mutex_unlock(&shmem_swaplist_mutex); BUG_ON(folio_mapped(folio)); - return swap_writepage(&folio->page, wbc); + return swap_writeout(folio, wbc); } list_del_init(&info->swaplist); @@ -1660,6 +1655,7 @@ redirty: folio_unlock(folio); return 0; } +EXPORT_SYMBOL_GPL(shmem_writeout); #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) @@ -3768,7 +3764,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, index--; /* - * Inform shmem_writepage() how far we have reached. + * Inform shmem_writeout() how far we have reached. * No need for lock or barrier: we have the page lock. */ if (!folio_test_uptodate(folio)) @@ -5191,7 +5187,6 @@ static int shmem_error_remove_folio(struct address_space *mapping, } static const struct address_space_operations shmem_aops = { - .writepage = shmem_writepage, .dirty_folio = noop_dirty_folio, #ifdef CONFIG_TMPFS .write_begin = shmem_write_begin, diff --git a/mm/show_mem.c b/mm/show_mem.c index 6af13bcd2ab3..5acb51a9fc49 100644 --- a/mm/show_mem.c +++ b/mm/show_mem.c @@ -223,7 +223,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z global_node_page_state(NR_SHMEM), global_node_page_state(NR_PAGETABLE), global_node_page_state(NR_SECONDARY_PAGETABLE), - global_zone_page_state(NR_BOUNCE), + 0UL, global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), global_zone_page_state(NR_FREE_PAGES), free_pcp, @@ -341,7 +341,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z K(zone->present_pages), K(zone_managed_pages(zone)), K(zone_page_state(zone, NR_MLOCK)), - K(zone_page_state(zone, NR_BOUNCE)), + 0UL, K(free_pcp), K(this_cpu_read(zone->per_cpu_pageset->count)), K(zone_page_state(zone, NR_FREE_CMA_PAGES))); diff --git a/mm/swap.h b/mm/swap.h index 6f4a3f927edb..aa62463976d5 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -20,7 +20,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug) __swap_read_unplug(plug); } void swap_write_unplug(struct swap_iocb *sio); -int swap_writepage(struct page *page, struct writeback_control *wbc); +int swap_writeout(struct folio *folio, struct writeback_control *wbc); void __swap_writepage(struct folio *folio, struct writeback_control *wbc); /* linux/mm/swap_state.c */ @@ -141,7 +141,7 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, return NULL; } -static inline int swap_writepage(struct page *p, struct writeback_control *wbc) +static inline int swap_writeout(struct folio *f, struct writeback_control *wbc) { return 0; } diff --git a/mm/swap_state.c b/mm/swap_state.c index 68fd981b514f..ec2b1c9c9926 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -30,7 +30,6 @@ * vmscan's shrink_folio_list. */ static const struct address_space_operations swap_aops = { - .writepage = swap_writepage, .dirty_folio = noop_dirty_folio, #ifdef CONFIG_MIGRATION .migrate_folio = migrate_folio, diff --git a/mm/swapfile.c b/mm/swapfile.c index 412ccd6543b3..86643b181098 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2368,7 +2368,7 @@ retry: * Limit the number of retries? No: when mmget_not_zero() * above fails, that mm is likely to be freeing swap from * exit_mmap(), which proceeds at its own independent pace; - * and even shmem_writepage() could have been preempted after + * and even shmem_writeout() could have been preempted after * folio_alloc_swap(), temporarily hiding that swap. It's easy * and robust (though cpu-intensive) just to keep retrying. */ diff --git a/mm/truncate.c b/mm/truncate.c index 5d98054094d1..f2aaf99f2990 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -191,6 +191,7 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio) bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) { loff_t pos = folio_pos(folio); + size_t size = folio_size(folio); unsigned int offset, length; struct page *split_at, *split_at2; @@ -198,14 +199,13 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) offset = start - pos; else offset = 0; - length = folio_size(folio); - if (pos + length <= (u64)end) - length = length - offset; + if (pos + size <= (u64)end) + length = size - offset; else length = end + 1 - pos - offset; folio_wait_writeback(folio); - if (length == folio_size(folio)) { + if (length == size) { truncate_inode_folio(folio->mapping, folio); return true; } @@ -224,16 +224,20 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) return true; split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); - split_at2 = folio_page(folio, - PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE); - if (!try_folio_split(folio, split_at, NULL)) { /* * try to split at offset + length to make sure folios within * the range can be dropped, especially to avoid memory waste * for shmem truncate */ - struct folio *folio2 = page_folio(split_at2); + struct folio *folio2; + + if (offset + length == size) + goto no_split; + + split_at2 = folio_page(folio, + PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE); + folio2 = page_folio(split_at2); if (!folio_try_get(folio2)) goto no_split; @@ -1834,6 +1834,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, return new_vma; out_vma_link: + fixup_hugetlb_reservations(new_vma); vma_close(new_vma); if (new_vma->vm_file) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2d7511654831..8a1f7783bbdb 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3944,9 +3944,10 @@ void *vmalloc_noprof(unsigned long size) EXPORT_SYMBOL(vmalloc_noprof); /** - * vmalloc_huge - allocate virtually contiguous memory, allow huge pages + * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages * @size: allocation size * @gfp_mask: flags for the page level allocator + * @node: node to use for allocation or NUMA_NO_NODE * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. @@ -3955,13 +3956,13 @@ EXPORT_SYMBOL(vmalloc_noprof); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) +void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) { return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, - gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, - NUMA_NO_NODE, __builtin_return_address(0)); + gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, + node, __builtin_return_address(0)); } -EXPORT_SYMBOL_GPL(vmalloc_huge_noprof); +EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof); /** * vzalloc - allocate virtually contiguous memory with zero fill @@ -4093,8 +4094,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) * would be a good heuristic for when to shrink the vm_area? */ if (size <= old_size) { - /* Zero out "freed" memory. */ - if (want_init_on_free()) + /* Zero out "freed" memory, potentially for future realloc. */ + if (want_init_on_free() || want_init_on_alloc(flags)) memset((void *)p + size, 0, old_size - size); vm->requested_size = size; kasan_poison_vmalloc(p + size, old_size - size); @@ -4107,10 +4108,13 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) if (size <= alloced_size) { kasan_unpoison_vmalloc(p + old_size, size - old_size, KASAN_VMALLOC_PROT_NORMAL); - /* Zero out "alloced" memory. */ - if (want_init_on_alloc(flags)) - memset((void *)p + old_size, 0, size - old_size); + /* + * No need to zero memory here, as unused memory will have + * already been zeroed at initial allocation time or during + * realloc shrink time. + */ vm->requested_size = size; + return (void *)p; } /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 3783e45bfc92..b6f4db6c240f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -648,21 +648,20 @@ typedef enum { /* * pageout is called by shrink_folio_list() for each dirty folio. - * Calls ->writepage(). */ static pageout_t pageout(struct folio *folio, struct address_space *mapping, struct swap_iocb **plug, struct list_head *folio_list) { + int (*writeout)(struct folio *, struct writeback_control *); + /* - * If the folio is dirty, only perform writeback if that write - * will be non-blocking. To prevent this allocation from being - * stalled by pagecache activity. But note that there may be - * stalls if we need to run get_block(). We could test - * PagePrivate for that. - * - * If this process is currently in __generic_file_write_iter() against - * this folio's queue, we can perform writeback even if that - * will block. + * We no longer attempt to writeback filesystem folios here, other + * than tmpfs/shmem. That's taken care of in page-writeback. + * If we find a dirty filesystem folio at the end of the LRU list, + * typically that means the filesystem is saturating the storage + * with contiguous writes and telling it to write a folio here + * would only make the situation worse by injecting an element + * of random access. * * If the folio is swapcache, write it back even if that would * block, for some throttling. This happens by accident, because @@ -685,7 +684,11 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping, } return PAGE_KEEP; } - if (mapping->a_ops->writepage == NULL) + if (shmem_mapping(mapping)) + writeout = shmem_writeout; + else if (folio_test_anon(folio)) + writeout = swap_writeout; + else return PAGE_ACTIVATE; if (folio_clear_dirty_for_io(folio)) { @@ -708,7 +711,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping, wbc.list = folio_list; folio_set_reclaim(folio); - res = mapping->a_ops->writepage(&folio->page, &wbc); + res = writeout(folio, &wbc); if (res < 0) handle_write_error(mapping, folio, res); if (res == AOP_WRITEPAGE_ACTIVATE) { @@ -717,7 +720,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping, } if (!folio_test_writeback(folio)) { - /* synchronous write or broken a_ops? */ + /* synchronous write? */ folio_clear_reclaim(folio); } trace_mm_vmscan_write_folio(folio); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 73472756618a..042d3ac3b4a3 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1411,7 +1411,8 @@ static void l2cap_request_info(struct l2cap_conn *conn) sizeof(req), &req); } -static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +static bool l2cap_check_enc_key_size(struct hci_conn *hcon, + struct l2cap_chan *chan) { /* The minimum encryption key size needs to be enforced by the * host stack before establishing any L2CAP connections. The @@ -1425,7 +1426,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) int min_key_size = hcon->hdev->min_enc_key_size; /* On FIPS security level, key size must be 16 bytes */ - if (hcon->sec_level == BT_SECURITY_FIPS) + if (chan->sec_level == BT_SECURITY_FIPS) min_key_size = 16; return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || @@ -1453,7 +1454,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) !__l2cap_no_conn_pending(chan)) return; - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -1528,7 +1529,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; } - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else l2cap_chan_close(chan, ECONNREFUSED); @@ -3992,7 +3993,7 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && (!hci_conn_check_link_mode(conn->hcon) || - !l2cap_check_enc_key_size(conn->hcon))) { + !l2cap_check_enc_key_size(conn->hcon, pchan))) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; @@ -7352,7 +7353,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } if (chan->state == BT_CONNECT) { - if (!status && l2cap_check_enc_key_size(hcon)) + if (!status && l2cap_check_enc_key_size(hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -7362,7 +7363,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct l2cap_conn_rsp rsp; __u16 res, stat; - if (!status && l2cap_check_enc_key_size(hcon)) { + if (!status && l2cap_check_enc_key_size(hcon, chan)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 98aea5485aae..a8c67035e23c 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = { * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ -static const u32 br_dst_default_metrics[RTAX_MAX] = { - [RTAX_MTU - 1] = 1500, -}; - void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; rcuref_init(&rt->dst.__rcuref, 1); rt->dst.dev = br->dev; - dst_init_metrics(&rt->dst, br_dst_default_metrics, true); + dst_init_metrics(&rt->dst, br->metrics, false); + dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu); rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; rt->dst.ops = &fake_dst_ops; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d5b3c5936a79..4715a8d6dc32 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -505,6 +505,7 @@ struct net_bridge { struct rtable fake_rtable; struct rt6_info fake_rt6_info; }; + u32 metrics[RTAX_MAX]; #endif u16 group_fwd_mask; u16 group_fwd_mask_required; diff --git a/net/can/bcm.c b/net/can/bcm.c index 0bca1b9b3f70..6bc1cc4c94c5 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -58,6 +58,7 @@ #include <linux/can/skb.h> #include <linux/can/bcm.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <net/sock.h> #include <net/net_namespace.h> @@ -122,6 +123,7 @@ struct bcm_op { struct canfd_frame last_sframe; struct sock *sk; struct net_device *rx_reg_dev; + spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */ }; struct bcm_sock { @@ -217,7 +219,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); seq_printf(m, " <<<\n"); - list_for_each_entry(op, &bo->rx_ops, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(op, &bo->rx_ops, list) { unsigned long reduction; @@ -273,6 +277,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, "# sent %ld\n", op->frames_abs); } seq_putc(m, '\n'); + + rcu_read_unlock(); + return 0; } #endif /* CONFIG_PROC_FS */ @@ -285,13 +292,18 @@ static void bcm_can_tx(struct bcm_op *op) { struct sk_buff *skb; struct net_device *dev; - struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; + struct canfd_frame *cf; int err; /* no target device? => exit */ if (!op->ifindex) return; + /* read currframe under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + cf = op->frames + op->cfsiz * op->currframe; + spin_unlock_bh(&op->bcm_tx_lock); + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ @@ -312,6 +324,10 @@ static void bcm_can_tx(struct bcm_op *op) skb->dev = dev; can_skb_set_owner(skb, op->sk); err = can_send(skb, 1); + + /* update currframe and count under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + if (!err) op->frames_abs++; @@ -320,6 +336,11 @@ static void bcm_can_tx(struct bcm_op *op) /* reached last frame? */ if (op->currframe >= op->nframes) op->currframe = 0; + + if (op->count > 0) + op->count--; + + spin_unlock_bh(&op->bcm_tx_lock); out: dev_put(dev); } @@ -430,7 +451,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) struct bcm_msg_head msg_head; if (op->kt_ival1 && (op->count > 0)) { - op->count--; + bcm_can_tx(op); if (!op->count && (op->flags & TX_COUNTEVT)) { /* create notification to user */ @@ -445,7 +466,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) bcm_send_to_user(op, &msg_head, NULL, 0); } - bcm_can_tx(op); } else if (op->kt_ival2) { bcm_can_tx(op); @@ -843,7 +863,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, REGMASK(op->can_id), bcm_rx_handler, op); - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -863,7 +883,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -956,6 +976,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } op->flags = msg_head->flags; + /* only lock for unlikely count/nframes/currframe changes */ + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX || + op->flags & SETTIMER) { + + spin_lock_bh(&op->bcm_tx_lock); + + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX) { + /* potentially update changed nframes */ + op->nframes = msg_head->nframes; + /* restart multiple frame transmission */ + op->currframe = 0; + } + + if (op->flags & SETTIMER) + op->count = msg_head->count; + + spin_unlock_bh(&op->bcm_tx_lock); + } + } else { /* insert new BCM operation for the given can_id */ @@ -963,9 +1004,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (!op) return -ENOMEM; + spin_lock_init(&op->bcm_tx_lock); op->can_id = msg_head->can_id; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; + op->nframes = msg_head->nframes; + + if (op->flags & SETTIMER) + op->count = msg_head->count; /* create array for CAN frames and copy the data */ if (msg_head->nframes > 1) { @@ -1023,22 +1069,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ - if (op->nframes != msg_head->nframes) { - op->nframes = msg_head->nframes; - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - - /* check flags */ - - if (op->flags & TX_RESET_MULTI_IDX) { - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - if (op->flags & SETTIMER) { /* set timer values */ - op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); @@ -1055,11 +1087,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->flags |= TX_ANNOUNCE; } - if (op->flags & TX_ANNOUNCE) { + if (op->flags & TX_ANNOUNCE) bcm_can_tx(op); - if (op->count) - op->count--; - } if (op->flags & STARTTIMER) bcm_tx_start_timer(op); @@ -1272,7 +1301,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, bcm_rx_handler, op, "bcm", sk); if (err) { /* this bcm rx op is broken -> remove it */ - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return err; } diff --git a/net/core/sock.c b/net/core/sock.c index e54449c9ab0b..1d9466a1f54e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -148,6 +148,8 @@ #include <linux/ethtool.h> +#include <uapi/linux/pidfd.h> + #include "dev.h" static DEFINE_MUTEX(proto_list_mutex); @@ -1879,6 +1881,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname, { struct pid *peer_pid; struct file *pidfd_file = NULL; + unsigned int flags = 0; int pidfd; if (len > sizeof(pidfd)) @@ -1891,7 +1894,14 @@ int sk_getsockopt(struct sock *sk, int level, int optname, if (!peer_pid) return -ENODATA; - pidfd = pidfd_prepare(peer_pid, 0, &pidfd_file); + /* The use of PIDFD_STALE requires stashing of struct pid + * on pidfs with pidfs_register_pid() and only AF_UNIX + * were prepared for this. + */ + if (sk->sk_family == AF_UNIX) + flags = PIDFD_STALE; + + pidfd = pidfd_prepare(peer_pid, flags, &pidfd_file); put_pid(peer_pid); if (pidfd < 0) return pidfd; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index c33d4bf17929..0b7564b53790 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -140,7 +140,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev) { - u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + u8 *tag; + + if (skb_linearize(skb)) + return NULL; + + tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M, KSZ_EGRESS_TAG_LEN); @@ -311,10 +316,16 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev) { - /* Tag decoding */ - u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; - unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M; unsigned int len = KSZ_EGRESS_TAG_LEN; + unsigned int port; + u8 *tag; + + if (skb_linearize(skb)) + return NULL; + + /* Tag decoding */ + tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M; /* Extra 4-bytes PTP timestamp */ if (tag[0] & KSZ9477_PTP_TAG_INDICATION) { diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 0e4076866c0a..f14a41ee4aa1 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -120,47 +120,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) } #ifdef CONFIG_INET_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk; - sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock); sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4, @@ -173,20 +142,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); } - spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; } @@ -199,8 +154,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) sk = esp_find_tcp_sk(x); err = PTR_ERR_OR_ZERO(sk); - if (err) + if (err) { + kfree_skb(skb); goto out; + } bh_lock_sock(sk); if (sock_owned_by_user(sk)) @@ -209,6 +166,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk); + sock_put(sk); + out: rcu_read_unlock(); return err; @@ -392,6 +351,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk); + sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index a8b04d4abcaa..85dc208f32e9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -120,11 +120,6 @@ static void ipmr_expire_process(struct timer_list *t); lockdep_rtnl_is_held() || \ list_empty(&net->ipv4.mr_tables)) -static bool ipmr_can_free_table(struct net *net) -{ - return !check_net(net) || !net_initialized(net); -} - static struct mr_table *ipmr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -317,11 +312,6 @@ EXPORT_SYMBOL(ipmr_rule_default); #define ipmr_for_each_table(mrt, net) \ for (mrt = net->ipv4.mrt; mrt; mrt = NULL) -static bool ipmr_can_free_table(struct net *net) -{ - return !check_net(net); -} - static struct mr_table *ipmr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -437,7 +427,7 @@ static void ipmr_free_table(struct mr_table *mrt) { struct net *net = read_pnet(&mrt->net); - WARN_ON_ONCE(!ipmr_can_free_table(net)); + WARN_ON_ONCE(!mr_can_free_table(net)); timer_shutdown_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC | diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index b5b06323cfd9..0d31a8c108d4 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -182,11 +182,15 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, int offset = skb_gro_offset(skb); const struct net_offload *ops; struct sk_buff *pp = NULL; - int ret; - - offset = offset - sizeof(struct udphdr); + int len, dlen; + __u8 *udpdata; + __be32 *udpdata32; - if (!pskb_pull(skb, offset)) + len = skb->len - offset; + dlen = offset + min(len, 8); + udpdata = skb_gro_header(skb, dlen, offset); + udpdata32 = (__be32 *)udpdata; + if (unlikely(!udpdata)) return NULL; rcu_read_lock(); @@ -194,11 +198,10 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (!ops || !ops->callbacks.gro_receive) goto out; - ret = __xfrm4_udp_encap_rcv(sk, skb, false); - if (ret) + /* check if it is a keepalive or IKE packet */ + if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out; - skb_push(skb, offset); NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); @@ -208,7 +211,6 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, out: rcu_read_unlock(); - skb_push(skb, offset); NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 1; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 9e73944e3b53..72adfc107b55 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -137,47 +137,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) } #ifdef CONFIG_INET6_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk; - sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock); sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6, @@ -190,20 +159,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); } - spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; } @@ -216,8 +171,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) sk = esp6_find_tcp_sk(x); err = PTR_ERR_OR_ZERO(sk); - if (err) + if (err) { + kfree_skb(skb); goto out; + } bh_lock_sock(sk); if (sock_owned_by_user(sk)) @@ -226,6 +183,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk); + sock_put(sk); + out: rcu_read_unlock(); return err; @@ -422,6 +381,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk); + sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index b413c9c8a21c..3276cde5ebd7 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -108,11 +108,6 @@ static void ipmr_expire_process(struct timer_list *t); lockdep_rtnl_is_held() || \ list_empty(&net->ipv6.mr6_tables)) -static bool ip6mr_can_free_table(struct net *net) -{ - return !check_net(net) || !net_initialized(net); -} - static struct mr_table *ip6mr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -306,11 +301,6 @@ EXPORT_SYMBOL(ip6mr_rule_default); #define ip6mr_for_each_table(mrt, net) \ for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) -static bool ip6mr_can_free_table(struct net *net) -{ - return !check_net(net); -} - static struct mr_table *ip6mr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -416,7 +406,7 @@ static void ip6mr_free_table(struct mr_table *mrt) { struct net *net = read_pnet(&mrt->net); - WARN_ON_ONCE(!ip6mr_can_free_table(net)); + WARN_ON_ONCE(!mr_can_free_table(net)); timer_shutdown_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC | diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 4abc5e9d6322..841c81abaaf4 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -179,14 +179,18 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, int offset = skb_gro_offset(skb); const struct net_offload *ops; struct sk_buff *pp = NULL; - int ret; + int len, dlen; + __u8 *udpdata; + __be32 *udpdata32; if (skb->protocol == htons(ETH_P_IP)) return xfrm4_gro_udp_encap_rcv(sk, head, skb); - offset = offset - sizeof(struct udphdr); - - if (!pskb_pull(skb, offset)) + len = skb->len - offset; + dlen = offset + min(len, 8); + udpdata = skb_gro_header(skb, dlen, offset); + udpdata32 = (__be32 *)udpdata; + if (unlikely(!udpdata)) return NULL; rcu_read_lock(); @@ -194,11 +198,10 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (!ops || !ops->callbacks.gro_receive) goto out; - ret = __xfrm6_udp_encap_rcv(sk, skb, false); - if (ret) + /* check if it is a keepalive or IKE packet */ + if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out; - skb_push(skb, offset); NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); @@ -208,7 +211,6 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, out: rcu_read_unlock(); - skb_push(skb, offset); NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 1; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 0259cde394ba..cc77ec5769d8 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -887,15 +887,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (sk->sk_type != SOCK_STREAM) goto copy_uaddr; + /* Partial read */ + if (used + offset < skb_len) + continue; + if (!(flags & MSG_PEEK)) { skb_unlink(skb, &sk->sk_receive_queue); kfree_skb(skb); *seq = 0; } - - /* Partial read */ - if (used + offset < skb_len) - continue; } while (len > 0); out: diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index cb8c525ea20e..7986145a527c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1569,6 +1569,9 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } + sch->qstats.backlog += len; + sch->q.qlen++; + if (first && !cl->cl_nactive) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); @@ -1584,9 +1587,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } - sch->qstats.backlog += len; - sch->q.qlen++; - return NET_XMIT_SUCCESS; } diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index eadc00410ebc..98f78cd55905 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -631,7 +631,7 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, const char *name) { struct qstr q = QSTR(name); - struct dentry *dentry = d_hash_and_lookup(parent, &q); + struct dentry *dentry = try_lookup_noperm(&q, parent); if (!dentry) { dentry = d_alloc(parent, &q); if (!dentry) @@ -658,7 +658,7 @@ static void __rpc_depopulate(struct dentry *parent, for (i = start; i < eof; i++) { name.name = files[i].name; name.len = strlen(files[i].name); - dentry = d_hash_and_lookup(parent, &name); + dentry = try_lookup_noperm(&name, parent); if (dentry == NULL) continue; @@ -1190,7 +1190,7 @@ static const struct rpc_filelist files[] = { struct dentry *rpc_d_lookup_sb(const struct super_block *sb, const unsigned char *dir_name) { - return d_hash_and_lookup(sb->s_root, &QSTR(dir_name)); + return try_lookup_noperm(&QSTR(dir_name), sb->s_root); } EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); @@ -1301,7 +1301,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) struct dentry *pipe_dentry = NULL; /* We should never get this far if "gssd" doesn't exist */ - gssd_dentry = d_hash_and_lookup(root, &QSTR(files[RPCAUTH_gssd].name)); + gssd_dentry = try_lookup_noperm(&QSTR(files[RPCAUTH_gssd].name), root); if (!gssd_dentry) return ERR_PTR(-ENOENT); @@ -1311,8 +1311,8 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) goto out; } - clnt_dentry = d_hash_and_lookup(gssd_dentry, - &QSTR(gssd_dummy_clnt_dir[0].name)); + clnt_dentry = try_lookup_noperm(&QSTR(gssd_dummy_clnt_dir[0].name), + gssd_dentry); if (!clnt_dentry) { __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); pipe_dentry = ERR_PTR(-ENOENT); diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index c524421ec652..8584893b4785 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, goto exit; } + /* Get net to avoid freed tipc_crypto when delete namespace */ + get_net(aead->crypto->net); + /* Now, do encrypt */ rc = crypto_aead_encrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) return rc; tipc_bearer_put(b); + put_net(aead->crypto->net); exit: kfree(ctx); @@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(void *data, int err) kfree(tx_ctx); tipc_bearer_put(b); tipc_aead_put(aead); + put_net(net); } /** diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f78a2492826f..59a64b2ced6e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -85,10 +85,13 @@ #include <linux/file.h> #include <linux/filter.h> #include <linux/fs.h> +#include <linux/fs_struct.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mount.h> #include <linux/namei.h> +#include <linux/net.h> +#include <linux/pidfs.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/sched/signal.h> @@ -643,6 +646,9 @@ static void unix_sock_destructor(struct sock *sk) return; } + if (sk->sk_peer_pid) + pidfs_put_pid(sk->sk_peer_pid); + if (u->addr) unix_release_addr(u->addr); @@ -734,13 +740,48 @@ static void unix_release_sock(struct sock *sk, int embrion) unix_gc(); /* Garbage collect fds */ } -static void init_peercred(struct sock *sk) +struct unix_peercred { + struct pid *peer_pid; + const struct cred *peer_cred; +}; + +static inline int prepare_peercred(struct unix_peercred *peercred) { - sk->sk_peer_pid = get_pid(task_tgid(current)); - sk->sk_peer_cred = get_current_cred(); + struct pid *pid; + int err; + + pid = task_tgid(current); + err = pidfs_register_pid(pid); + if (likely(!err)) { + peercred->peer_pid = get_pid(pid); + peercred->peer_cred = get_current_cred(); + } + return err; } -static void update_peercred(struct sock *sk) +static void drop_peercred(struct unix_peercred *peercred) +{ + const struct cred *cred = NULL; + struct pid *pid = NULL; + + might_sleep(); + + swap(peercred->peer_pid, pid); + swap(peercred->peer_cred, cred); + + pidfs_put_pid(pid); + put_pid(pid); + put_cred(cred); +} + +static inline void init_peercred(struct sock *sk, + const struct unix_peercred *peercred) +{ + sk->sk_peer_pid = peercred->peer_pid; + sk->sk_peer_cred = peercred->peer_cred; +} + +static void update_peercred(struct sock *sk, struct unix_peercred *peercred) { const struct cred *old_cred; struct pid *old_pid; @@ -748,11 +789,11 @@ static void update_peercred(struct sock *sk) spin_lock(&sk->sk_peer_lock); old_pid = sk->sk_peer_pid; old_cred = sk->sk_peer_cred; - init_peercred(sk); + init_peercred(sk, peercred); spin_unlock(&sk->sk_peer_lock); - put_pid(old_pid); - put_cred(old_cred); + peercred->peer_pid = old_pid; + peercred->peer_cred = old_cred; } static void copy_peercred(struct sock *sk, struct sock *peersk) @@ -761,6 +802,7 @@ static void copy_peercred(struct sock *sk, struct sock *peersk) spin_lock(&sk->sk_peer_lock); sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); + pidfs_get_pid(sk->sk_peer_pid); sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); spin_unlock(&sk->sk_peer_lock); } @@ -770,6 +812,7 @@ static int unix_listen(struct socket *sock, int backlog) int err; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk); + struct unix_peercred peercred = {}; err = -EOPNOTSUPP; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) @@ -777,6 +820,9 @@ static int unix_listen(struct socket *sock, int backlog) err = -EINVAL; if (!READ_ONCE(u->addr)) goto out; /* No listens on an unbound socket */ + err = prepare_peercred(&peercred); + if (err) + goto out; unix_state_lock(sk); if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) goto out_unlock; @@ -786,11 +832,12 @@ static int unix_listen(struct socket *sock, int backlog) WRITE_ONCE(sk->sk_state, TCP_LISTEN); /* set credentials so connect can copy them */ - update_peercred(sk); + update_peercred(sk, &peercred); err = 0; out_unlock: unix_state_unlock(sk); + drop_peercred(&peercred); out: return err; } @@ -1101,7 +1148,7 @@ static int unix_release(struct socket *sock) } static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len, - int type) + int type, int flags) { struct inode *inode; struct path path; @@ -1109,13 +1156,39 @@ static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len, int err; unix_mkname_bsd(sunaddr, addr_len); - err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); - if (err) - goto fail; - err = path_permission(&path, MAY_WRITE); - if (err) - goto path_put; + if (flags & SOCK_COREDUMP) { + const struct cred *cred; + struct cred *kcred; + struct path root; + + kcred = prepare_kernel_cred(&init_task); + if (!kcred) { + err = -ENOMEM; + goto fail; + } + + task_lock(&init_task); + get_fs_root(init_task.fs, &root); + task_unlock(&init_task); + + cred = override_creds(kcred); + err = vfs_path_lookup(root.dentry, root.mnt, sunaddr->sun_path, + LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS | + LOOKUP_NO_MAGICLINKS, &path); + put_cred(revert_creds(cred)); + path_put(&root); + if (err) + goto fail; + } else { + err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); + if (err) + goto fail; + + err = path_permission(&path, MAY_WRITE); + if (err) + goto path_put; + } err = -ECONNREFUSED; inode = d_backing_inode(path.dentry); @@ -1165,12 +1238,12 @@ static struct sock *unix_find_abstract(struct net *net, static struct sock *unix_find_other(struct net *net, struct sockaddr_un *sunaddr, - int addr_len, int type) + int addr_len, int type, int flags) { struct sock *sk; if (sunaddr->sun_path[0]) - sk = unix_find_bsd(sunaddr, addr_len, type); + sk = unix_find_bsd(sunaddr, addr_len, type, flags); else sk = unix_find_abstract(net, sunaddr, addr_len, type); @@ -1428,7 +1501,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, } restart: - other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type); + other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type, 0); if (IS_ERR(other)) { err = PTR_ERR(other); goto out; @@ -1525,6 +1598,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; struct sock *sk = sock->sk, *newsk = NULL, *other = NULL; struct unix_sock *u = unix_sk(sk), *newu, *otheru; + struct unix_peercred peercred = {}; struct net *net = sock_net(sk); struct sk_buff *skb = NULL; unsigned char state; @@ -1561,6 +1635,10 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, goto out; } + err = prepare_peercred(&peercred); + if (err) + goto out; + /* Allocate skb for sending to listening sock */ skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); if (!skb) { @@ -1570,7 +1648,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, restart: /* Find listening sock. */ - other = unix_find_other(net, sunaddr, addr_len, sk->sk_type); + other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, flags); if (IS_ERR(other)) { err = PTR_ERR(other); goto out_free_skb; @@ -1636,7 +1714,7 @@ restart: unix_peer(newsk) = sk; newsk->sk_state = TCP_ESTABLISHED; newsk->sk_type = sk->sk_type; - init_peercred(newsk); + init_peercred(newsk, &peercred); newu = unix_sk(newsk); newu->listener = other; RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); @@ -1695,20 +1773,33 @@ out_free_skb: out_free_sk: unix_release_sock(newsk, 0); out: + drop_peercred(&peercred); return err; } static int unix_socketpair(struct socket *socka, struct socket *sockb) { + struct unix_peercred ska_peercred = {}, skb_peercred = {}; struct sock *ska = socka->sk, *skb = sockb->sk; + int err; + + err = prepare_peercred(&ska_peercred); + if (err) + return err; + + err = prepare_peercred(&skb_peercred); + if (err) { + drop_peercred(&ska_peercred); + return err; + } /* Join our sockets back to back */ sock_hold(ska); sock_hold(skb); unix_peer(ska) = skb; unix_peer(skb) = ska; - init_peercred(ska); - init_peercred(skb); + init_peercred(ska, &ska_peercred); + init_peercred(skb, &skb_peercred); ska->sk_state = TCP_ESTABLISHED; skb->sk_state = TCP_ESTABLISHED; @@ -2026,7 +2117,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_namelen) { lookup: other = unix_find_other(sock_net(sk), msg->msg_name, - msg->msg_namelen, sk->sk_type); + msg->msg_namelen, sk->sk_type, 0); if (IS_ERR(other)) { err = PTR_ERR(other); goto out_free; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 4abc81f33d3e..72c000c0ae5f 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1304,7 +1304,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) xs->queue_id = qid; xp_add_xsk(xs->pool, xs); - if (xs->zc && qid < dev->real_num_rx_queues) { + if (qid < dev->real_num_rx_queues) { struct netdev_rx_queue *rxq; rxq = __netif_get_rx_queue(dev, qid); diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index fe82e2d07300..fc7a603b04f1 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -171,8 +171,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) struct espintcp_ctx *ctx = espintcp_getctx(sk); if (skb_queue_len(&ctx->out_queue) >= - READ_ONCE(net_hotdata.max_backlog)) + READ_ONCE(net_hotdata.max_backlog)) { + kfree_skb(skb); return -ENOBUFS; + } __skb_queue_tail(&ctx->out_queue, skb); diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 0c1420534394..907c3ccb440d 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -48,7 +48,6 @@ static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen) { struct acomp_req *req = ipcomp_cb(skb)->req; struct ipcomp_req_extra *extra; - const int plen = skb->data_len; struct scatterlist *dsg; int len, dlen; @@ -64,7 +63,7 @@ static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen) /* Only update truesize on input. */ if (!hlen) - skb->truesize += dlen - plen; + skb->truesize += dlen; skb->data_len = dlen; skb->len += dlen; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 143ac3aa7537..f4bad8c895d6 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1581,6 +1581,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) struct xfrm_policy *delpol; struct hlist_head *chain; + /* Sanitize mark before store */ + policy->mark.v &= policy->mark.m; + spin_lock_bh(&net->xfrm.xfrm_policy_lock); chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); if (chain) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 341d79ecb5c2..07fe8e5daa32 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -838,9 +838,6 @@ int __xfrm_state_delete(struct xfrm_state *x) xfrm_nat_keepalive_state_updated(x); spin_unlock(&net->xfrm.xfrm_state_lock); - if (x->encap_sk) - sock_put(rcu_dereference_raw(x->encap_sk)); - xfrm_dev_state_delete(x); /* All xfrm_state objects are created by xfrm_state_alloc. @@ -1721,6 +1718,9 @@ static void __xfrm_state_insert(struct xfrm_state *x) list_add(&x->km.all, &net->xfrm.state_all); + /* Sanitize mark before store */ + x->mark.v &= x->mark.m; + h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family); XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index ab37e1d35c70..1a532b83a9af 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -10,6 +10,7 @@ #include <linux/blk-mq.h> #include <linux/blk_types.h> #include <linux/blkdev.h> +#include <linux/configfs.h> #include <linux/cpumask.h> #include <linux/cred.h> #include <linux/device/faux.h> diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c index 06575553eda5..3e9b910a88e9 100644 --- a/rust/helpers/mutex.c +++ b/rust/helpers/mutex.c @@ -17,3 +17,8 @@ void rust_helper_mutex_assert_is_held(struct mutex *mutex) { lockdep_assert_held(mutex); } + +void rust_helper_mutex_destroy(struct mutex *lock) +{ + mutex_destroy(lock); +} diff --git a/rust/kernel/configfs.rs b/rust/kernel/configfs.rs new file mode 100644 index 000000000000..b93ac7b0bebc --- /dev/null +++ b/rust/kernel/configfs.rs @@ -0,0 +1,1049 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! configfs interface: Userspace-driven Kernel Object Configuration +//! +//! configfs is an in-memory pseudo file system for configuration of kernel +//! modules. Please see the [C documentation] for details and intended use of +//! configfs. +//! +//! This module does not support the following configfs features: +//! +//! - Items. All group children are groups. +//! - Symlink support. +//! - `disconnect_notify` hook. +//! - Default groups. +//! +//! See the [`rust_configfs.rs`] sample for a full example use of this module. +//! +//! C header: [`include/linux/configfs.h`](srctree/include/linux/configfs.h) +//! +//! # Example +//! +//! ```ignore +//! use kernel::alloc::flags; +//! use kernel::c_str; +//! use kernel::configfs_attrs; +//! use kernel::configfs; +//! use kernel::new_mutex; +//! use kernel::page::PAGE_SIZE; +//! use kernel::sync::Mutex; +//! use kernel::ThisModule; +//! +//! #[pin_data] +//! struct RustConfigfs { +//! #[pin] +//! config: configfs::Subsystem<Configuration>, +//! } +//! +//! impl kernel::InPlaceModule for RustConfigfs { +//! fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> { +//! pr_info!("Rust configfs sample (init)\n"); +//! +//! let item_type = configfs_attrs! { +//! container: configfs::Subsystem<Configuration>, +//! data: Configuration, +//! attributes: [ +//! message: 0, +//! bar: 1, +//! ], +//! }; +//! +//! try_pin_init!(Self { +//! config <- configfs::Subsystem::new( +//! c_str!("rust_configfs"), item_type, Configuration::new() +//! ), +//! }) +//! } +//! } +//! +//! #[pin_data] +//! struct Configuration { +//! message: &'static CStr, +//! #[pin] +//! bar: Mutex<(KBox<[u8; PAGE_SIZE]>, usize)>, +//! } +//! +//! impl Configuration { +//! fn new() -> impl PinInit<Self, Error> { +//! try_pin_init!(Self { +//! message: c_str!("Hello World\n"), +//! bar <- new_mutex!((KBox::new([0; PAGE_SIZE], flags::GFP_KERNEL)?, 0)), +//! }) +//! } +//! } +//! +//! #[vtable] +//! impl configfs::AttributeOperations<0> for Configuration { +//! type Data = Configuration; +//! +//! fn show(container: &Configuration, page: &mut [u8; PAGE_SIZE]) -> Result<usize> { +//! pr_info!("Show message\n"); +//! let data = container.message; +//! page[0..data.len()].copy_from_slice(data); +//! Ok(data.len()) +//! } +//! } +//! +//! #[vtable] +//! impl configfs::AttributeOperations<1> for Configuration { +//! type Data = Configuration; +//! +//! fn show(container: &Configuration, page: &mut [u8; PAGE_SIZE]) -> Result<usize> { +//! pr_info!("Show bar\n"); +//! let guard = container.bar.lock(); +//! let data = guard.0.as_slice(); +//! let len = guard.1; +//! page[0..len].copy_from_slice(&data[0..len]); +//! Ok(len) +//! } +//! +//! fn store(container: &Configuration, page: &[u8]) -> Result { +//! pr_info!("Store bar\n"); +//! let mut guard = container.bar.lock(); +//! guard.0[0..page.len()].copy_from_slice(page); +//! guard.1 = page.len(); +//! Ok(()) +//! } +//! } +//! ``` +//! +//! [C documentation]: srctree/Documentation/filesystems/configfs.rst +//! [`rust_configfs.rs`]: srctree/samples/rust/rust_configfs.rs + +use crate::alloc::flags; +use crate::container_of; +use crate::page::PAGE_SIZE; +use crate::prelude::*; +use crate::str::CString; +use crate::sync::Arc; +use crate::sync::ArcBorrow; +use crate::types::Opaque; +use core::cell::UnsafeCell; +use core::marker::PhantomData; + +/// A configfs subsystem. +/// +/// This is the top level entrypoint for a configfs hierarchy. To register +/// with configfs, embed a field of this type into your kernel module struct. +#[pin_data(PinnedDrop)] +pub struct Subsystem<Data> { + #[pin] + subsystem: Opaque<bindings::configfs_subsystem>, + #[pin] + data: Data, +} + +// SAFETY: We do not provide any operations on `Subsystem`. +unsafe impl<Data> Sync for Subsystem<Data> {} + +// SAFETY: Ownership of `Subsystem` can safely be transferred to other threads. +unsafe impl<Data> Send for Subsystem<Data> {} + +impl<Data> Subsystem<Data> { + /// Create an initializer for a [`Subsystem`]. + /// + /// The subsystem will appear in configfs as a directory name given by + /// `name`. The attributes available in directory are specified by + /// `item_type`. + pub fn new( + name: &'static CStr, + item_type: &'static ItemType<Subsystem<Data>, Data>, + data: impl PinInit<Data, Error>, + ) -> impl PinInit<Self, Error> { + try_pin_init!(Self { + subsystem <- pin_init::zeroed().chain( + |place: &mut Opaque<bindings::configfs_subsystem>| { + // SAFETY: We initialized the required fields of `place.group` above. + unsafe { + bindings::config_group_init_type_name( + &mut (*place.get()).su_group, + name.as_ptr(), + item_type.as_ptr(), + ) + }; + + // SAFETY: `place.su_mutex` is valid for use as a mutex. + unsafe { + bindings::__mutex_init( + &mut (*place.get()).su_mutex, + kernel::optional_name!().as_char_ptr(), + kernel::static_lock_class!().as_ptr(), + ) + } + Ok(()) + } + ), + data <- data, + }) + .pin_chain(|this| { + crate::error::to_result( + // SAFETY: We initialized `this.subsystem` according to C API contract above. + unsafe { bindings::configfs_register_subsystem(this.subsystem.get()) }, + ) + }) + } +} + +#[pinned_drop] +impl<Data> PinnedDrop for Subsystem<Data> { + fn drop(self: Pin<&mut Self>) { + // SAFETY: We registered `self.subsystem` in the initializer returned by `Self::new`. + unsafe { bindings::configfs_unregister_subsystem(self.subsystem.get()) }; + // SAFETY: We initialized the mutex in `Subsystem::new`. + unsafe { bindings::mutex_destroy(&raw mut (*self.subsystem.get()).su_mutex) }; + } +} + +/// Trait that allows offset calculations for structs that embed a +/// `bindings::config_group`. +/// +/// Users of the configfs API should not need to implement this trait. +/// +/// # Safety +/// +/// - Implementers of this trait must embed a `bindings::config_group`. +/// - Methods must be implemented according to method documentation. +pub unsafe trait HasGroup<Data> { + /// Return the address of the `bindings::config_group` embedded in [`Self`]. + /// + /// # Safety + /// + /// - `this` must be a valid allocation of at least the size of [`Self`]. + unsafe fn group(this: *const Self) -> *const bindings::config_group; + + /// Return the address of the [`Self`] that `group` is embedded in. + /// + /// # Safety + /// + /// - `group` must point to the `bindings::config_group` that is embedded in + /// [`Self`]. + unsafe fn container_of(group: *const bindings::config_group) -> *const Self; +} + +// SAFETY: `Subsystem<Data>` embeds a field of type `bindings::config_group` +// within the `subsystem` field. +unsafe impl<Data> HasGroup<Data> for Subsystem<Data> { + unsafe fn group(this: *const Self) -> *const bindings::config_group { + // SAFETY: By impl and function safety requirement this projection is in bounds. + unsafe { &raw const (*(*this).subsystem.get()).su_group } + } + + unsafe fn container_of(group: *const bindings::config_group) -> *const Self { + // SAFETY: By impl and function safety requirement this projection is in bounds. + let c_subsys_ptr = unsafe { container_of!(group, bindings::configfs_subsystem, su_group) }; + let opaque_ptr = c_subsys_ptr.cast::<Opaque<bindings::configfs_subsystem>>(); + // SAFETY: By impl and function safety requirement, `opaque_ptr` and the + // pointer it returns, are within the same allocation. + unsafe { container_of!(opaque_ptr, Subsystem<Data>, subsystem) } + } +} + +/// A configfs group. +/// +/// To add a subgroup to configfs, pass this type as `ctype` to +/// [`crate::configfs_attrs`] when creating a group in [`GroupOperations::make_group`]. +#[pin_data] +pub struct Group<Data> { + #[pin] + group: Opaque<bindings::config_group>, + #[pin] + data: Data, +} + +impl<Data> Group<Data> { + /// Create an initializer for a new group. + /// + /// When instantiated, the group will appear as a directory with the name + /// given by `name` and it will contain attributes specified by `item_type`. + pub fn new( + name: CString, + item_type: &'static ItemType<Group<Data>, Data>, + data: impl PinInit<Data, Error>, + ) -> impl PinInit<Self, Error> { + try_pin_init!(Self { + group <- pin_init::zeroed().chain(|v: &mut Opaque<bindings::config_group>| { + let place = v.get(); + let name = name.as_bytes_with_nul().as_ptr(); + // SAFETY: It is safe to initialize a group once it has been zeroed. + unsafe { + bindings::config_group_init_type_name(place, name.cast(), item_type.as_ptr()) + }; + Ok(()) + }), + data <- data, + }) + } +} + +// SAFETY: `Group<Data>` embeds a field of type `bindings::config_group` +// within the `group` field. +unsafe impl<Data> HasGroup<Data> for Group<Data> { + unsafe fn group(this: *const Self) -> *const bindings::config_group { + Opaque::raw_get( + // SAFETY: By impl and function safety requirements this field + // projection is within bounds of the allocation. + unsafe { &raw const (*this).group }, + ) + } + + unsafe fn container_of(group: *const bindings::config_group) -> *const Self { + let opaque_ptr = group.cast::<Opaque<bindings::config_group>>(); + // SAFETY: By impl and function safety requirement, `opaque_ptr` and + // pointer it returns will be in the same allocation. + unsafe { container_of!(opaque_ptr, Self, group) } + } +} + +/// # Safety +/// +/// `this` must be a valid pointer. +/// +/// If `this` does not represent the root group of a configfs subsystem, +/// `this` must be a pointer to a `bindings::config_group` embedded in a +/// `Group<Parent>`. +/// +/// Otherwise, `this` must be a pointer to a `bindings::config_group` that +/// is embedded in a `bindings::configfs_subsystem` that is embedded in a +/// `Subsystem<Parent>`. +unsafe fn get_group_data<'a, Parent>(this: *mut bindings::config_group) -> &'a Parent { + // SAFETY: `this` is a valid pointer. + let is_root = unsafe { (*this).cg_subsys.is_null() }; + + if !is_root { + // SAFETY: By C API contact,`this` was returned from a call to + // `make_group`. The pointer is known to be embedded within a + // `Group<Parent>`. + unsafe { &(*Group::<Parent>::container_of(this)).data } + } else { + // SAFETY: By C API contract, `this` is a pointer to the + // `bindings::config_group` field within a `Subsystem<Parent>`. + unsafe { &(*Subsystem::container_of(this)).data } + } +} + +struct GroupOperationsVTable<Parent, Child>(PhantomData<(Parent, Child)>); + +impl<Parent, Child> GroupOperationsVTable<Parent, Child> +where + Parent: GroupOperations<Child = Child>, + Child: 'static, +{ + /// # Safety + /// + /// `this` must be a valid pointer. + /// + /// If `this` does not represent the root group of a configfs subsystem, + /// `this` must be a pointer to a `bindings::config_group` embedded in a + /// `Group<Parent>`. + /// + /// Otherwise, `this` must be a pointer to a `bindings::config_group` that + /// is embedded in a `bindings::configfs_subsystem` that is embedded in a + /// `Subsystem<Parent>`. + /// + /// `name` must point to a null terminated string. + unsafe extern "C" fn make_group( + this: *mut bindings::config_group, + name: *const kernel::ffi::c_char, + ) -> *mut bindings::config_group { + // SAFETY: By function safety requirements of this function, this call + // is safe. + let parent_data = unsafe { get_group_data(this) }; + + let group_init = match Parent::make_group( + parent_data, + // SAFETY: By function safety requirements, name points to a null + // terminated string. + unsafe { CStr::from_char_ptr(name) }, + ) { + Ok(init) => init, + Err(e) => return e.to_ptr(), + }; + + let child_group = <Arc<Group<Child>> as InPlaceInit<Group<Child>>>::try_pin_init( + group_init, + flags::GFP_KERNEL, + ); + + match child_group { + Ok(child_group) => { + let child_group_ptr = child_group.into_raw(); + // SAFETY: We allocated the pointee of `child_ptr` above as a + // `Group<Child>`. + unsafe { Group::<Child>::group(child_group_ptr) }.cast_mut() + } + Err(e) => e.to_ptr(), + } + } + + /// # Safety + /// + /// If `this` does not represent the root group of a configfs subsystem, + /// `this` must be a pointer to a `bindings::config_group` embedded in a + /// `Group<Parent>`. + /// + /// Otherwise, `this` must be a pointer to a `bindings::config_group` that + /// is embedded in a `bindings::configfs_subsystem` that is embedded in a + /// `Subsystem<Parent>`. + /// + /// `item` must point to a `bindings::config_item` within a + /// `bindings::config_group` within a `Group<Child>`. + unsafe extern "C" fn drop_item( + this: *mut bindings::config_group, + item: *mut bindings::config_item, + ) { + // SAFETY: By function safety requirements of this function, this call + // is safe. + let parent_data = unsafe { get_group_data(this) }; + + // SAFETY: By function safety requirements, `item` is embedded in a + // `config_group`. + let c_child_group_ptr = unsafe { container_of!(item, bindings::config_group, cg_item) }; + // SAFETY: By function safety requirements, `c_child_group_ptr` is + // embedded within a `Group<Child>`. + let r_child_group_ptr = unsafe { Group::<Child>::container_of(c_child_group_ptr) }; + + if Parent::HAS_DROP_ITEM { + // SAFETY: We called `into_raw` to produce `r_child_group_ptr` in + // `make_group`. + let arc: Arc<Group<Child>> = unsafe { Arc::from_raw(r_child_group_ptr.cast_mut()) }; + + Parent::drop_item(parent_data, arc.as_arc_borrow()); + arc.into_raw(); + } + + // SAFETY: By C API contract, we are required to drop a refcount on + // `item`. + unsafe { bindings::config_item_put(item) }; + } + + const VTABLE: bindings::configfs_group_operations = bindings::configfs_group_operations { + make_item: None, + make_group: Some(Self::make_group), + disconnect_notify: None, + drop_item: Some(Self::drop_item), + is_visible: None, + is_bin_visible: None, + }; + + const fn vtable_ptr() -> *const bindings::configfs_group_operations { + &Self::VTABLE as *const bindings::configfs_group_operations + } +} + +struct ItemOperationsVTable<Container, Data>(PhantomData<(Container, Data)>); + +impl<Data> ItemOperationsVTable<Group<Data>, Data> +where + Data: 'static, +{ + /// # Safety + /// + /// `this` must be a pointer to a `bindings::config_group` embedded in a + /// `Group<Parent>`. + /// + /// This function will destroy the pointee of `this`. The pointee of `this` + /// must not be accessed after the function returns. + unsafe extern "C" fn release(this: *mut bindings::config_item) { + // SAFETY: By function safety requirements, `this` is embedded in a + // `config_group`. + let c_group_ptr = unsafe { kernel::container_of!(this, bindings::config_group, cg_item) }; + // SAFETY: By function safety requirements, `c_group_ptr` is + // embedded within a `Group<Data>`. + let r_group_ptr = unsafe { Group::<Data>::container_of(c_group_ptr) }; + + // SAFETY: We called `into_raw` on `r_group_ptr` in + // `make_group`. + let pin_self: Arc<Group<Data>> = unsafe { Arc::from_raw(r_group_ptr.cast_mut()) }; + drop(pin_self); + } + + const VTABLE: bindings::configfs_item_operations = bindings::configfs_item_operations { + release: Some(Self::release), + allow_link: None, + drop_link: None, + }; + + const fn vtable_ptr() -> *const bindings::configfs_item_operations { + &Self::VTABLE as *const bindings::configfs_item_operations + } +} + +impl<Data> ItemOperationsVTable<Subsystem<Data>, Data> { + const VTABLE: bindings::configfs_item_operations = bindings::configfs_item_operations { + release: None, + allow_link: None, + drop_link: None, + }; + + const fn vtable_ptr() -> *const bindings::configfs_item_operations { + &Self::VTABLE as *const bindings::configfs_item_operations + } +} + +/// Operations implemented by configfs groups that can create subgroups. +/// +/// Implement this trait on structs that embed a [`Subsystem`] or a [`Group`]. +#[vtable] +pub trait GroupOperations { + /// The child data object type. + /// + /// This group will create subgroups (subdirectories) backed by this kind of + /// object. + type Child: 'static; + + /// Creates a new subgroup. + /// + /// The kernel will call this method in response to `mkdir(2)` in the + /// directory representing `this`. + /// + /// To accept the request to create a group, implementations should + /// return an initializer of a `Group<Self::Child>`. To prevent creation, + /// return a suitable error. + fn make_group(&self, name: &CStr) -> Result<impl PinInit<Group<Self::Child>, Error>>; + + /// Prepares the group for removal from configfs. + /// + /// The kernel will call this method before the directory representing `_child` is removed from + /// configfs. + /// + /// Implementations can use this method to do house keeping before configfs drops its + /// reference to `Child`. + /// + /// NOTE: "drop" in the name of this function is not related to the Rust drop term. Rather, the + /// name is inherited from the callback name in the underlying C code. + fn drop_item(&self, _child: ArcBorrow<'_, Group<Self::Child>>) { + kernel::build_error!(kernel::error::VTABLE_DEFAULT_ERROR) + } +} + +/// A configfs attribute. +/// +/// An attribute appears as a file in configfs, inside a folder that represent +/// the group that the attribute belongs to. +#[repr(transparent)] +pub struct Attribute<const ID: u64, O, Data> { + attribute: Opaque<bindings::configfs_attribute>, + _p: PhantomData<(O, Data)>, +} + +// SAFETY: We do not provide any operations on `Attribute`. +unsafe impl<const ID: u64, O, Data> Sync for Attribute<ID, O, Data> {} + +// SAFETY: Ownership of `Attribute` can safely be transferred to other threads. +unsafe impl<const ID: u64, O, Data> Send for Attribute<ID, O, Data> {} + +impl<const ID: u64, O, Data> Attribute<ID, O, Data> +where + O: AttributeOperations<ID, Data = Data>, +{ + /// # Safety + /// + /// `item` must be embedded in a `bindings::config_group`. + /// + /// If `item` does not represent the root group of a configfs subsystem, + /// the group must be embedded in a `Group<Data>`. + /// + /// Otherwise, the group must be a embedded in a + /// `bindings::configfs_subsystem` that is embedded in a `Subsystem<Data>`. + /// + /// `page` must point to a writable buffer of size at least [`PAGE_SIZE`]. + unsafe extern "C" fn show( + item: *mut bindings::config_item, + page: *mut kernel::ffi::c_char, + ) -> isize { + let c_group: *mut bindings::config_group = + // SAFETY: By function safety requirements, `item` is embedded in a + // `config_group`. + unsafe { container_of!(item, bindings::config_group, cg_item) }.cast_mut(); + + // SAFETY: The function safety requirements for this function satisfy + // the conditions for this call. + let data: &Data = unsafe { get_group_data(c_group) }; + + // SAFETY: By function safety requirements, `page` is writable for `PAGE_SIZE`. + let ret = O::show(data, unsafe { &mut *(page as *mut [u8; PAGE_SIZE]) }); + + match ret { + Ok(size) => size as isize, + Err(err) => err.to_errno() as isize, + } + } + + /// # Safety + /// + /// `item` must be embedded in a `bindings::config_group`. + /// + /// If `item` does not represent the root group of a configfs subsystem, + /// the group must be embedded in a `Group<Data>`. + /// + /// Otherwise, the group must be a embedded in a + /// `bindings::configfs_subsystem` that is embedded in a `Subsystem<Data>`. + /// + /// `page` must point to a readable buffer of size at least `size`. + unsafe extern "C" fn store( + item: *mut bindings::config_item, + page: *const kernel::ffi::c_char, + size: usize, + ) -> isize { + let c_group: *mut bindings::config_group = + // SAFETY: By function safety requirements, `item` is embedded in a + // `config_group`. + unsafe { container_of!(item, bindings::config_group, cg_item) }.cast_mut(); + + // SAFETY: The function safety requirements for this function satisfy + // the conditions for this call. + let data: &Data = unsafe { get_group_data(c_group) }; + + let ret = O::store( + data, + // SAFETY: By function safety requirements, `page` is readable + // for at least `size`. + unsafe { core::slice::from_raw_parts(page.cast(), size) }, + ); + + match ret { + Ok(()) => size as isize, + Err(err) => err.to_errno() as isize, + } + } + + /// Create a new attribute. + /// + /// The attribute will appear as a file with name given by `name`. + pub const fn new(name: &'static CStr) -> Self { + Self { + attribute: Opaque::new(bindings::configfs_attribute { + ca_name: name.as_char_ptr(), + ca_owner: core::ptr::null_mut(), + ca_mode: 0o660, + show: Some(Self::show), + store: if O::HAS_STORE { + Some(Self::store) + } else { + None + }, + }), + _p: PhantomData, + } + } +} + +/// Operations supported by an attribute. +/// +/// Implement this trait on type and pass that type as generic parameter when +/// creating an [`Attribute`]. The type carrying the implementation serve no +/// purpose other than specifying the attribute operations. +/// +/// This trait must be implemented on the `Data` type of for types that +/// implement `HasGroup<Data>`. The trait must be implemented once for each +/// attribute of the group. The constant type parameter `ID` maps the +/// implementation to a specific `Attribute`. `ID` must be passed when declaring +/// attributes via the [`kernel::configfs_attrs`] macro, to tie +/// `AttributeOperations` implementations to concrete named attributes. +#[vtable] +pub trait AttributeOperations<const ID: u64 = 0> { + /// The type of the object that contains the field that is backing the + /// attribute for this operation. + type Data; + + /// Renders the value of an attribute. + /// + /// This function is called by the kernel to read the value of an attribute. + /// + /// Implementations should write the rendering of the attribute to `page` + /// and return the number of bytes written. + fn show(data: &Self::Data, page: &mut [u8; PAGE_SIZE]) -> Result<usize>; + + /// Stores the value of an attribute. + /// + /// This function is called by the kernel to update the value of an attribute. + /// + /// Implementations should parse the value from `page` and update internal + /// state to reflect the parsed value. + fn store(_data: &Self::Data, _page: &[u8]) -> Result { + kernel::build_error!(kernel::error::VTABLE_DEFAULT_ERROR) + } +} + +/// A list of attributes. +/// +/// This type is used to construct a new [`ItemType`]. It represents a list of +/// [`Attribute`] that will appear in the directory representing a [`Group`]. +/// Users should not directly instantiate this type, rather they should use the +/// [`kernel::configfs_attrs`] macro to declare a static set of attributes for a +/// group. +/// +/// # Note +/// +/// Instances of this type are constructed statically at compile by the +/// [`kernel::configfs_attrs`] macro. +#[repr(transparent)] +pub struct AttributeList<const N: usize, Data>( + /// Null terminated Array of pointers to [`Attribute`]. The type is [`c_void`] + /// to conform to the C API. + UnsafeCell<[*mut kernel::ffi::c_void; N]>, + PhantomData<Data>, +); + +// SAFETY: Ownership of `AttributeList` can safely be transferred to other threads. +unsafe impl<const N: usize, Data> Send for AttributeList<N, Data> {} + +// SAFETY: We do not provide any operations on `AttributeList` that need synchronization. +unsafe impl<const N: usize, Data> Sync for AttributeList<N, Data> {} + +impl<const N: usize, Data> AttributeList<N, Data> { + /// # Safety + /// + /// This function must only be called by the [`kernel::configfs_attrs`] + /// macro. + #[doc(hidden)] + pub const unsafe fn new() -> Self { + Self(UnsafeCell::new([core::ptr::null_mut(); N]), PhantomData) + } + + /// # Safety + /// + /// The caller must ensure that there are no other concurrent accesses to + /// `self`. That is, the caller has exclusive access to `self.` + #[doc(hidden)] + pub const unsafe fn add<const I: usize, const ID: u64, O>( + &'static self, + attribute: &'static Attribute<ID, O, Data>, + ) where + O: AttributeOperations<ID, Data = Data>, + { + // We need a space at the end of our list for a null terminator. + const { assert!(I < N - 1, "Invalid attribute index") }; + + // SAFETY: By function safety requirements, we have exclusive access to + // `self` and the reference created below will be exclusive. + unsafe { + (&mut *self.0.get())[I] = (attribute as *const Attribute<ID, O, Data>) + .cast_mut() + .cast() + }; + } +} + +/// A representation of the attributes that will appear in a [`Group`] or +/// [`Subsystem`]. +/// +/// Users should not directly instantiate objects of this type. Rather, they +/// should use the [`kernel::configfs_attrs`] macro to statically declare the +/// shape of a [`Group`] or [`Subsystem`]. +#[pin_data] +pub struct ItemType<Container, Data> { + #[pin] + item_type: Opaque<bindings::config_item_type>, + _p: PhantomData<(Container, Data)>, +} + +// SAFETY: We do not provide any operations on `ItemType` that need synchronization. +unsafe impl<Container, Data> Sync for ItemType<Container, Data> {} + +// SAFETY: Ownership of `ItemType` can safely be transferred to other threads. +unsafe impl<Container, Data> Send for ItemType<Container, Data> {} + +macro_rules! impl_item_type { + ($tpe:ty) => { + impl<Data> ItemType<$tpe, Data> { + #[doc(hidden)] + pub const fn new_with_child_ctor<const N: usize, Child>( + owner: &'static ThisModule, + attributes: &'static AttributeList<N, Data>, + ) -> Self + where + Data: GroupOperations<Child = Child>, + Child: 'static, + { + Self { + item_type: Opaque::new(bindings::config_item_type { + ct_owner: owner.as_ptr(), + ct_group_ops: GroupOperationsVTable::<Data, Child>::vtable_ptr().cast_mut(), + ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(), + ct_attrs: (attributes as *const AttributeList<N, Data>) + .cast_mut() + .cast(), + ct_bin_attrs: core::ptr::null_mut(), + }), + _p: PhantomData, + } + } + + #[doc(hidden)] + pub const fn new<const N: usize>( + owner: &'static ThisModule, + attributes: &'static AttributeList<N, Data>, + ) -> Self { + Self { + item_type: Opaque::new(bindings::config_item_type { + ct_owner: owner.as_ptr(), + ct_group_ops: core::ptr::null_mut(), + ct_item_ops: ItemOperationsVTable::<$tpe, Data>::vtable_ptr().cast_mut(), + ct_attrs: (attributes as *const AttributeList<N, Data>) + .cast_mut() + .cast(), + ct_bin_attrs: core::ptr::null_mut(), + }), + _p: PhantomData, + } + } + } + }; +} + +impl_item_type!(Subsystem<Data>); +impl_item_type!(Group<Data>); + +impl<Container, Data> ItemType<Container, Data> { + fn as_ptr(&self) -> *const bindings::config_item_type { + self.item_type.get() + } +} + +/// Define a list of configfs attributes statically. +/// +/// Invoking the macro in the following manner: +/// +/// ```ignore +/// let item_type = configfs_attrs! { +/// container: configfs::Subsystem<Configuration>, +/// data: Configuration, +/// child: Child, +/// attributes: [ +/// message: 0, +/// bar: 1, +/// ], +/// }; +/// ``` +/// +/// Expands the following output: +/// +/// ```ignore +/// let item_type = { +/// static CONFIGURATION_MESSAGE_ATTR: kernel::configfs::Attribute< +/// 0, +/// Configuration, +/// Configuration, +/// > = unsafe { +/// kernel::configfs::Attribute::new({ +/// const S: &str = "message\u{0}"; +/// const C: &kernel::str::CStr = match kernel::str::CStr::from_bytes_with_nul( +/// S.as_bytes() +/// ) { +/// Ok(v) => v, +/// Err(_) => { +/// core::panicking::panic_fmt(core::const_format_args!( +/// "string contains interior NUL" +/// )); +/// } +/// }; +/// C +/// }) +/// }; +/// +/// static CONFIGURATION_BAR_ATTR: kernel::configfs::Attribute< +/// 1, +/// Configuration, +/// Configuration +/// > = unsafe { +/// kernel::configfs::Attribute::new({ +/// const S: &str = "bar\u{0}"; +/// const C: &kernel::str::CStr = match kernel::str::CStr::from_bytes_with_nul( +/// S.as_bytes() +/// ) { +/// Ok(v) => v, +/// Err(_) => { +/// core::panicking::panic_fmt(core::const_format_args!( +/// "string contains interior NUL" +/// )); +/// } +/// }; +/// C +/// }) +/// }; +/// +/// const N: usize = (1usize + (1usize + 0usize)) + 1usize; +/// +/// static CONFIGURATION_ATTRS: kernel::configfs::AttributeList<N, Configuration> = +/// unsafe { kernel::configfs::AttributeList::new() }; +/// +/// { +/// const N: usize = 0usize; +/// unsafe { CONFIGURATION_ATTRS.add::<N, 0, _>(&CONFIGURATION_MESSAGE_ATTR) }; +/// } +/// +/// { +/// const N: usize = (1usize + 0usize); +/// unsafe { CONFIGURATION_ATTRS.add::<N, 1, _>(&CONFIGURATION_BAR_ATTR) }; +/// } +/// +/// static CONFIGURATION_TPE: +/// kernel::configfs::ItemType<configfs::Subsystem<Configuration> ,Configuration> +/// = kernel::configfs::ItemType::< +/// configfs::Subsystem<Configuration>, +/// Configuration +/// >::new_with_child_ctor::<N,Child>( +/// &THIS_MODULE, +/// &CONFIGURATION_ATTRS +/// ); +/// +/// &CONFIGURATION_TPE +/// } +/// ``` +#[macro_export] +macro_rules! configfs_attrs { + ( + container: $container:ty, + data: $data:ty, + attributes: [ + $($name:ident: $attr:literal),* $(,)? + ] $(,)? + ) => { + $crate::configfs_attrs!( + count: + @container($container), + @data($data), + @child(), + @no_child(x), + @attrs($($name $attr)*), + @eat($($name $attr,)*), + @assign(), + @cnt(0usize), + ) + }; + ( + container: $container:ty, + data: $data:ty, + child: $child:ty, + attributes: [ + $($name:ident: $attr:literal),* $(,)? + ] $(,)? + ) => { + $crate::configfs_attrs!( + count: + @container($container), + @data($data), + @child($child), + @no_child(), + @attrs($($name $attr)*), + @eat($($name $attr,)*), + @assign(), + @cnt(0usize), + ) + }; + (count: + @container($container:ty), + @data($data:ty), + @child($($child:ty)?), + @no_child($($no_child:ident)?), + @attrs($($aname:ident $aattr:literal)*), + @eat($name:ident $attr:literal, $($rname:ident $rattr:literal,)*), + @assign($($assign:block)*), + @cnt($cnt:expr), + ) => { + $crate::configfs_attrs!( + count: + @container($container), + @data($data), + @child($($child)?), + @no_child($($no_child)?), + @attrs($($aname $aattr)*), + @eat($($rname $rattr,)*), + @assign($($assign)* { + const N: usize = $cnt; + // The following macro text expands to a call to `Attribute::add`. + + // SAFETY: By design of this macro, the name of the variable we + // invoke the `add` method on below, is not visible outside of + // the macro expansion. The macro does not operate concurrently + // on this variable, and thus we have exclusive access to the + // variable. + unsafe { + $crate::macros::paste!( + [< $data:upper _ATTRS >] + .add::<N, $attr, _>(&[< $data:upper _ $name:upper _ATTR >]) + ) + }; + }), + @cnt(1usize + $cnt), + ) + }; + (count: + @container($container:ty), + @data($data:ty), + @child($($child:ty)?), + @no_child($($no_child:ident)?), + @attrs($($aname:ident $aattr:literal)*), + @eat(), + @assign($($assign:block)*), + @cnt($cnt:expr), + ) => + { + $crate::configfs_attrs!( + final: + @container($container), + @data($data), + @child($($child)?), + @no_child($($no_child)?), + @attrs($($aname $aattr)*), + @assign($($assign)*), + @cnt($cnt), + ) + }; + (final: + @container($container:ty), + @data($data:ty), + @child($($child:ty)?), + @no_child($($no_child:ident)?), + @attrs($($name:ident $attr:literal)*), + @assign($($assign:block)*), + @cnt($cnt:expr), + ) => + { + $crate::macros::paste!{ + { + $( + // SAFETY: We are expanding `configfs_attrs`. + static [< $data:upper _ $name:upper _ATTR >]: + $crate::configfs::Attribute<$attr, $data, $data> = + unsafe { + $crate::configfs::Attribute::new(c_str!(::core::stringify!($name))) + }; + )* + + + // We need space for a null terminator. + const N: usize = $cnt + 1usize; + + // SAFETY: We are expanding `configfs_attrs`. + static [< $data:upper _ATTRS >]: + $crate::configfs::AttributeList<N, $data> = + unsafe { $crate::configfs::AttributeList::new() }; + + $($assign)* + + $( + const [<$no_child:upper>]: bool = true; + + static [< $data:upper _TPE >] : $crate::configfs::ItemType<$container, $data> = + $crate::configfs::ItemType::<$container, $data>::new::<N>( + &THIS_MODULE, &[<$ data:upper _ATTRS >] + ); + )? + + $( + static [< $data:upper _TPE >]: + $crate::configfs::ItemType<$container, $data> = + $crate::configfs::ItemType::<$container, $data>:: + new_with_child_ctor::<N, $child>( + &THIS_MODULE, &[<$ data:upper _ATTRS >] + ); + )? + + & [< $data:upper _TPE >] + } + } + }; + +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index de07aadd1ff5..bcbf5026449d 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -42,6 +42,8 @@ pub mod alloc; pub mod block; #[doc(hidden)] pub mod build_assert; +#[cfg(CONFIG_CONFIGFS_FS)] +pub mod configfs; pub mod cred; pub mod device; pub mod device_id; diff --git a/rust/kernel/sync/rcu.rs b/rust/kernel/sync/rcu.rs index b51d9150ffe2..a32bef6e490b 100644 --- a/rust/kernel/sync/rcu.rs +++ b/rust/kernel/sync/rcu.rs @@ -17,6 +17,7 @@ pub struct Guard(NotThreadSafe); impl Guard { /// Acquires the RCU read side lock and returns a guard. + #[inline] pub fn new() -> Self { // SAFETY: An FFI call with no additional requirements. unsafe { bindings::rcu_read_lock() }; @@ -25,16 +26,19 @@ impl Guard { } /// Explicitly releases the RCU read side lock. + #[inline] pub fn unlock(self) {} } impl Default for Guard { + #[inline] fn default() -> Self { Self::new() } } impl Drop for Guard { + #[inline] fn drop(&mut self) { // SAFETY: By the type invariants, the RCU read side is locked, so it is ok to unlock it. unsafe { bindings::rcu_read_unlock() }; @@ -42,6 +46,7 @@ impl Drop for Guard { } /// Acquires the RCU read side lock. +#[inline] pub fn read_lock() -> Guard { Guard::new() } diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig index cad52b7120b5..be491ad9b3af 100644 --- a/samples/rust/Kconfig +++ b/samples/rust/Kconfig @@ -10,6 +10,17 @@ menuconfig SAMPLES_RUST if SAMPLES_RUST +config SAMPLE_RUST_CONFIGFS + tristate "Configfs sample" + depends on CONFIGFS_FS + help + This option builds the Rust configfs sample. + + To compile this as a module, choose M here: + the module will be called rust_configfs. + + If unsure, say N. + config SAMPLE_RUST_MINIMAL tristate "Minimal" help diff --git a/samples/rust/Makefile b/samples/rust/Makefile index c6a2479f7d9c..b3c9178d654a 100644 --- a/samples/rust/Makefile +++ b/samples/rust/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_SAMPLE_RUST_DMA) += rust_dma.o obj-$(CONFIG_SAMPLE_RUST_DRIVER_PCI) += rust_driver_pci.o obj-$(CONFIG_SAMPLE_RUST_DRIVER_PLATFORM) += rust_driver_platform.o obj-$(CONFIG_SAMPLE_RUST_DRIVER_FAUX) += rust_driver_faux.o +obj-$(CONFIG_SAMPLE_RUST_CONFIGFS) += rust_configfs.o rust_print-y := rust_print_main.o rust_print_events.o diff --git a/samples/rust/rust_configfs.rs b/samples/rust/rust_configfs.rs new file mode 100644 index 000000000000..60ddbe62cda3 --- /dev/null +++ b/samples/rust/rust_configfs.rs @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Rust configfs sample. + +use kernel::alloc::flags; +use kernel::c_str; +use kernel::configfs; +use kernel::configfs_attrs; +use kernel::new_mutex; +use kernel::page::PAGE_SIZE; +use kernel::prelude::*; +use kernel::sync::Mutex; + +module! { + type: RustConfigfs, + name: "rust_configfs", + author: "Rust for Linux Contributors", + description: "Rust configfs sample", + license: "GPL", +} + +#[pin_data] +struct RustConfigfs { + #[pin] + config: configfs::Subsystem<Configuration>, +} + +#[pin_data] +struct Configuration { + message: &'static CStr, + #[pin] + bar: Mutex<(KBox<[u8; PAGE_SIZE]>, usize)>, +} + +impl Configuration { + fn new() -> impl PinInit<Self, Error> { + try_pin_init!(Self { + message: c_str!("Hello World\n"), + bar <- new_mutex!((KBox::new([0; PAGE_SIZE], flags::GFP_KERNEL)?, 0)), + }) + } +} + +impl kernel::InPlaceModule for RustConfigfs { + fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> { + pr_info!("Rust configfs sample (init)\n"); + + // Define a subsystem with the data type `Configuration`, two + // attributes, `message` and `bar` and child group type `Child`. `mkdir` + // in the directory representing this subsystem will create directories + // backed by the `Child` type. + let item_type = configfs_attrs! { + container: configfs::Subsystem<Configuration>, + data: Configuration, + child: Child, + attributes: [ + message: 0, + bar: 1, + ], + }; + + try_pin_init!(Self { + config <- configfs::Subsystem::new( + c_str!("rust_configfs"), item_type, Configuration::new() + ), + }) + } +} + +#[vtable] +impl configfs::GroupOperations for Configuration { + type Child = Child; + + fn make_group(&self, name: &CStr) -> Result<impl PinInit<configfs::Group<Child>, Error>> { + // Define a group with data type `Child`, one attribute `baz` and child + // group type `GrandChild`. `mkdir` in the directory representing this + // group will create directories backed by the `GrandChild` type. + let tpe = configfs_attrs! { + container: configfs::Group<Child>, + data: Child, + child: GrandChild, + attributes: [ + baz: 0, + ], + }; + + Ok(configfs::Group::new(name.try_into()?, tpe, Child::new())) + } +} + +#[vtable] +impl configfs::AttributeOperations<0> for Configuration { + type Data = Configuration; + + fn show(container: &Configuration, page: &mut [u8; PAGE_SIZE]) -> Result<usize> { + pr_info!("Show message\n"); + let data = container.message; + page[0..data.len()].copy_from_slice(data); + Ok(data.len()) + } +} + +#[vtable] +impl configfs::AttributeOperations<1> for Configuration { + type Data = Configuration; + + fn show(container: &Configuration, page: &mut [u8; PAGE_SIZE]) -> Result<usize> { + pr_info!("Show bar\n"); + let guard = container.bar.lock(); + let data = guard.0.as_slice(); + let len = guard.1; + page[0..len].copy_from_slice(&data[0..len]); + Ok(len) + } + + fn store(container: &Configuration, page: &[u8]) -> Result { + pr_info!("Store bar\n"); + let mut guard = container.bar.lock(); + guard.0[0..page.len()].copy_from_slice(page); + guard.1 = page.len(); + Ok(()) + } +} + +// `pin_data` cannot handle structs without braces. +#[pin_data] +struct Child {} + +impl Child { + fn new() -> impl PinInit<Self, Error> { + try_pin_init!(Self {}) + } +} + +#[vtable] +impl configfs::GroupOperations for Child { + type Child = GrandChild; + + fn make_group(&self, name: &CStr) -> Result<impl PinInit<configfs::Group<GrandChild>, Error>> { + // Define a group with data type `GrandChild`, one attribute `gc`. As no + // child type is specified, it will not be possible to create subgroups + // in this group, and `mkdir`in the directory representing this group + // will return an error. + let tpe = configfs_attrs! { + container: configfs::Group<GrandChild>, + data: GrandChild, + attributes: [ + gc: 0, + ], + }; + + Ok(configfs::Group::new( + name.try_into()?, + tpe, + GrandChild::new(), + )) + } +} + +#[vtable] +impl configfs::AttributeOperations<0> for Child { + type Data = Child; + + fn show(_container: &Child, page: &mut [u8; PAGE_SIZE]) -> Result<usize> { + pr_info!("Show baz\n"); + let data = c"Hello Baz\n".to_bytes(); + page[0..data.len()].copy_from_slice(data); + Ok(data.len()) + } +} + +// `pin_data` cannot handle structs without braces. +#[pin_data] +struct GrandChild {} + +impl GrandChild { + fn new() -> impl PinInit<Self, Error> { + try_pin_init!(Self {}) + } +} + +#[vtable] +impl configfs::AttributeOperations<0> for GrandChild { + type Data = GrandChild; + + fn show(_container: &GrandChild, page: &mut [u8; PAGE_SIZE]) -> Result<usize> { + pr_info!("Show grand child\n"); + let data = c"Hello GC\n".to_bytes(); + page[0..data.len()].copy_from_slice(data); + Ok(data.len()) + } +} diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 3d22bf863eec..b3b1939ccd19 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -839,6 +839,8 @@ our %deprecated_apis = ( "kunmap" => "kunmap_local", "kmap_atomic" => "kmap_local_page", "kunmap_atomic" => "kunmap_local", + "srcu_read_lock_lite" => "srcu_read_lock_fast", + "srcu_read_unlock_lite" => "srcu_read_unlock_fast", ); #Create a search pattern for all these strings to speed up a loop below diff --git a/scripts/gdb/linux/pgtable.py b/scripts/gdb/linux/pgtable.py index 30d837f3dfae..09aac2421fb8 100644 --- a/scripts/gdb/linux/pgtable.py +++ b/scripts/gdb/linux/pgtable.py @@ -29,11 +29,9 @@ def page_mask(level=1): raise Exception(f'Unknown page level: {level}') -#page_offset_base in case CONFIG_DYNAMIC_MEMORY_LAYOUT is disabled -POB_NO_DYNAMIC_MEM_LAYOUT = '0xffff888000000000' def _page_offset_base(): pob_symbol = gdb.lookup_global_symbol('page_offset_base') - pob = pob_symbol.name if pob_symbol else POB_NO_DYNAMIC_MEM_LAYOUT + pob = pob_symbol.name return gdb.parse_and_eval(pob) diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 6039afae4bfc..0aef34b9609b 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -283,7 +283,7 @@ static struct dentry *aafs_create(const char *name, umode_t mode, dir = d_inode(parent); inode_lock(dir); - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); goto fail_lock; @@ -2551,7 +2551,7 @@ static int aa_mk_null_file(struct dentry *parent) return error; inode_lock(d_inode(parent)); - dentry = lookup_one_len(NULL_FILE_NAME, parent, strlen(NULL_FILE_NAME)); + dentry = lookup_noperm(&QSTR(NULL_FILE_NAME), parent); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); goto out; diff --git a/security/inode.c b/security/inode.c index da3ab44c8e57..3913501621fa 100644 --- a/security/inode.c +++ b/security/inode.c @@ -128,7 +128,7 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode, dir = d_inode(parent); inode_lock(dir); - dentry = lookup_one_len(name, parent, strlen(name)); + dentry = lookup_noperm(&QSTR(name), parent); if (IS_ERR(dentry)) goto out; diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 47480eb2189b..e67a8ce4b64c 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -2158,8 +2158,8 @@ static int __init init_sel_fs(void) return err; } - selinux_null.dentry = d_hash_and_lookup(selinux_null.mnt->mnt_root, - &null_name); + selinux_null.dentry = try_lookup_noperm(&null_name, + selinux_null.mnt->mnt_root); if (IS_ERR(selinux_null.dentry)) { pr_err("selinuxfs: could not lookup null!\n"); err = PTR_ERR(selinux_null.dentry); diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 4683b9139c56..4ecb17bd5436 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1074,8 +1074,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; - if (runtime->dma_area) - snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_runtime_buffer_set_silence(runtime); runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 6c2b6a62d9d2..853ac5bb33ff 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -723,6 +723,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) atomic_inc(&runtime->buffer_accessing); } +/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime) +{ + snd_pcm_buffer_access_lock(runtime); + if (runtime->dma_area) + snd_pcm_format_set_silence(runtime->format, runtime->dma_area, + bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_buffer_access_unlock(runtime); +} +EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence); + #if IS_ENABLED(CONFIG_SND_PCM_OSS) #define is_oss_stream(substream) ((substream)->oss.oss) #else diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8a2b09e4a7d5..20ab1fb2195f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6830,7 +6830,10 @@ static void alc256_fixup_chromebook(struct hda_codec *codec, switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: - spec->gen.suppress_auto_mute = 1; + if (codec->core.subsystem_id == 0x10280d76) + spec->gen.suppress_auto_mute = 0; + else + spec->gen.suppress_auto_mute = 1; spec->gen.suppress_auto_mic = 1; spec->en_3kpull_low = false; break; @@ -10882,9 +10885,12 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e1d, "HP ZBook X Gli 16 G12", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e2c, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e36, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e37, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e3a, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e3b, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e60, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e61, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e62, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), @@ -11300,6 +11306,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x390d, "Lenovo Yoga Pro 7 14ASP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), SND_PCI_QUIRK(0x17aa, 0x391f, "Yoga S990-16 pro Quad YC Quad", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3920, "Yoga S990-16 pro Quad VECO Quad", ALC287_FIXUP_TAS2781_I2C), diff --git a/sound/soc/amd/acp/acp-rembrandt.c b/sound/soc/amd/acp/acp-rembrandt.c index cccdd10c345e..aeffd24710e7 100644 --- a/sound/soc/amd/acp/acp-rembrandt.c +++ b/sound/soc/amd/acp/acp-rembrandt.c @@ -22,7 +22,7 @@ #include <linux/pci.h> #include <linux/pm_runtime.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "amd.h" #include "../mach-config.h" diff --git a/sound/soc/amd/acp/acp63.c b/sound/soc/amd/acp/acp63.c index 1f15c96a9b94..10fb416b959d 100644 --- a/sound/soc/amd/acp/acp63.c +++ b/sound/soc/amd/acp/acp63.c @@ -21,7 +21,7 @@ #include <linux/pm_runtime.h> #include <linux/pci.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "amd.h" #include "acp-mach.h" diff --git a/sound/soc/amd/acp/acp70.c b/sound/soc/amd/acp/acp70.c index 217b717e9beb..b95e3949e70b 100644 --- a/sound/soc/amd/acp/acp70.c +++ b/sound/soc/amd/acp/acp70.c @@ -23,7 +23,7 @@ #include "amd.h" #include "acp-mach.h" -#include <asm/amd_node.h> +#include <asm/amd/node.h> #define DRV_NAME "acp_asoc_acp70" diff --git a/sound/soc/intel/avs/tgl.c b/sound/soc/intel/avs/tgl.c index 56905f2b9eb2..9dbb3ad0954a 100644 --- a/sound/soc/intel/avs/tgl.c +++ b/sound/soc/intel/avs/tgl.c @@ -47,7 +47,7 @@ static int avs_tgl_config_basefw(struct avs_dev *adev) #ifdef CONFIG_X86 unsigned int ecx; -#include <asm/cpuid.h> +#include <asm/cpuid/api.h> ecx = cpuid_ecx(CPUID_TSC_LEAF); if (ecx) { ret = avs_ipc_set_fw_config(adev, 1, AVS_FW_CFG_XTAL_FREQ_HZ, sizeof(ecx), &ecx); diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig index 3033e2d3fe16..90e367586493 100644 --- a/sound/soc/mediatek/Kconfig +++ b/sound/soc/mediatek/Kconfig @@ -228,6 +228,7 @@ config SND_SOC_MT8188 config SND_SOC_MT8188_MT6359 tristate "ASoC Audio driver for MT8188 with MT6359 and I2S codecs" depends on SND_SOC_MT8188 && MTK_PMIC_WRAP + depends on SND_SOC_MT6359_ACCDET || !SND_SOC_MT6359_ACCDET depends on I2C select SND_SOC_MT6359 select SND_SOC_HDMI_CODEC diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c index 7c6d647fa253..7e6f10726ff0 100644 --- a/sound/soc/sof/amd/acp.c +++ b/sound/soc/sof/amd/acp.c @@ -16,7 +16,7 @@ #include <linux/module.h> #include <linux/pci.h> -#include <asm/amd_node.h> +#include <asm/amd/node.h> #include "../ops.h" #include "acp.h" diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c index b1be03011d7e..6492e1cefbfb 100644 --- a/sound/soc/sof/intel/hda-bus.c +++ b/sound/soc/sof/intel/hda-bus.c @@ -76,7 +76,7 @@ void sof_hda_bus_init(struct snd_sof_dev *sdev, struct device *dev) snd_hdac_ext_bus_init(bus, dev, &bus_core_ops, sof_hda_ext_ops); - if (chip && chip->hw_ip_version == SOF_INTEL_ACE_2_0) + if (chip && chip->hw_ip_version >= SOF_INTEL_ACE_2_0) bus->use_pio_for_commands = true; #else snd_hdac_ext_bus_init(bus, dev, NULL, NULL); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index b34e5fdf10f1..6a3932d90b43 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1049,7 +1049,21 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev, if (!*mach && codec_num <= 2) { bool tplg_fixup = false; - hda_mach = snd_soc_acpi_intel_hda_machines; + /* + * make a local copy of the match array since we might + * be modifying it + */ + hda_mach = devm_kmemdup_array(sdev->dev, + snd_soc_acpi_intel_hda_machines, + 2, /* we have one entry + sentinel in the array */ + sizeof(snd_soc_acpi_intel_hda_machines[0]), + GFP_KERNEL); + if (!hda_mach) { + dev_err(bus->dev, + "%s: failed to duplicate the HDA match table\n", + __func__); + return; + } dev_info(bus->dev, "using HDA machine driver %s now\n", hda_mach->drv_name); diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c index 576f407cd456..976a4794d610 100644 --- a/sound/soc/sof/ipc4-control.c +++ b/sound/soc/sof/ipc4-control.c @@ -531,6 +531,14 @@ static int sof_ipc4_bytes_ext_put(struct snd_sof_control *scontrol, return -EINVAL; } + /* Check header id */ + if (header.numid != SOF_CTRL_CMD_BINARY) { + dev_err_ratelimited(scomp->dev, + "Incorrect numid for bytes put %d\n", + header.numid); + return -EINVAL; + } + /* Verify the ABI header first */ if (copy_from_user(&abi_hdr, tlvd->tlv, sizeof(abi_hdr))) return -EFAULT; @@ -613,7 +621,8 @@ static int _sof_ipc4_bytes_ext_get(struct snd_sof_control *scontrol, if (data_size > size) return -ENOSPC; - header.numid = scontrol->comp_id; + /* Set header id and length */ + header.numid = SOF_CTRL_CMD_BINARY; header.length = data_size; if (copy_to_user(tlvd, &header, sizeof(struct snd_ctl_tlv))) diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c index 1a2841899ff5..c09b424ab863 100644 --- a/sound/soc/sof/ipc4-pcm.c +++ b/sound/soc/sof/ipc4-pcm.c @@ -798,7 +798,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm spcm->stream[stream].private = stream_priv; - if (!support_info) + /* Delay reporting is only supported on playback */ + if (!support_info || stream == SNDRV_PCM_STREAM_CAPTURE) continue; time_info = kzalloc(sizeof(*time_info), GFP_KERNEL); diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index dc9cb8324067..14aa8ecc4bc4 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -1063,7 +1063,7 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, struct snd_sof_dai *dai) { struct snd_soc_card *card = scomp->card; - struct snd_soc_pcm_runtime *rtd; + struct snd_soc_pcm_runtime *rtd, *full, *partial; struct snd_soc_dai *cpu_dai; int stream; int i; @@ -1080,12 +1080,22 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, else goto end; + full = NULL; + partial = NULL; list_for_each_entry(rtd, &card->rtd_list, list) { /* does stream match DAI link ? */ - if (!rtd->dai_link->stream_name || - !strstr(rtd->dai_link->stream_name, w->sname)) - continue; + if (rtd->dai_link->stream_name) { + if (!strcmp(rtd->dai_link->stream_name, w->sname)) { + full = rtd; + break; + } else if (strstr(rtd->dai_link->stream_name, w->sname)) { + partial = rtd; + } + } + } + rtd = full ? full : partial; + if (rtd) { for_each_rtd_cpu_dais(rtd, i, cpu_dai) { /* * Please create DAI widget in the right order diff --git a/tools/arch/x86/include/asm/amd-ibs.h b/tools/arch/x86/include/asm/amd/ibs.h index cb1740bc3da2..300b6e0765b2 100644 --- a/tools/arch/x86/include/asm/amd-ibs.h +++ b/tools/arch/x86/include/asm/amd/ibs.h @@ -4,7 +4,7 @@ * 55898 Rev 0.35 - Feb 5, 2021 */ -#include "msr-index.h" +#include "../msr-index.h" /* IBS_OP_DATA2 DataSrc */ #define IBS_DATA_SRC_LOC_CACHE 2 diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 6c2c152d8a67..bc81b9d1aeca 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -476,11 +476,11 @@ #define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* Clear branch history at syscall entry using SW loop */ #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */ #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */ -#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */ -#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */ -#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */ -#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */ -#define X86_FEATURE_PREFER_YMM (21*32 + 8) /* Avoid ZMM registers due to downclocking */ +#define X86_FEATURE_CLEAR_BHB_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */ +#define X86_FEATURE_AMD_FAST_CPPC (21*32+ 5) /* Fast CPPC */ +#define X86_FEATURE_AMD_HTR_CORES (21*32+ 6) /* Heterogeneous Core Topology */ +#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32+ 7) /* Workload Classification */ +#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */ /* * BUG word(s) @@ -519,7 +519,7 @@ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* "itlb_multihit" CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* "srbds" CPU may leak RNG bits if not mitigated */ #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* "mmio_stale_data" CPU is affected by Processor MMIO Stale Data vulnerabilities */ -#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */ +/* unused, was #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */ #define X86_BUG_RETBLEED X86_BUG(27) /* "retbleed" CPU is affected by RETBleed */ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* "eibrs_pbrsb" EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* "smt_rsb" CPU is vulnerable to Cross-Thread Return Address Predictions */ @@ -527,10 +527,10 @@ #define X86_BUG_TDX_PW_MCE X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */ /* BUG word 2 */ -#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */ -#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */ -#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ -#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */ -#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ -#define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */ +#define X86_BUG_SRSO X86_BUG( 1*32+ 0) /* "srso" AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG( 1*32+ 1) /* "div0" AMD DIV0 speculation bug */ +#define X86_BUG_RFDS X86_BUG( 1*32+ 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */ +#define X86_BUG_BHI X86_BUG( 1*32+ 3) /* "bhi" CPU is affected by Branch History Injection */ +#define X86_BUG_IBPB_NO_RET X86_BUG( 1*32+ 4) /* "ibpb_no_ret" IBPB omits return target predictions */ +#define X86_BUG_SPECTRE_V2_USER X86_BUG( 1*32+ 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/inat.h b/tools/arch/x86/include/asm/inat.h index 253690eb3c26..183aa662b165 100644 --- a/tools/arch/x86/include/asm/inat.h +++ b/tools/arch/x86/include/asm/inat.h @@ -82,6 +82,7 @@ #define INAT_NO_REX2 (1 << (INAT_FLAG_OFFS + 8)) #define INAT_REX2_VARIANT (1 << (INAT_FLAG_OFFS + 9)) #define INAT_EVEX_SCALABLE (1 << (INAT_FLAG_OFFS + 10)) +#define INAT_INV64 (1 << (INAT_FLAG_OFFS + 11)) /* Attribute making macros for attribute tables */ #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) @@ -242,4 +243,9 @@ static inline int inat_evex_scalable(insn_attr_t attr) { return attr & INAT_EVEX_SCALABLE; } + +static inline int inat_is_invalid64(insn_attr_t attr) +{ + return attr & INAT_INV64; +} #endif diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv index d751eb8585d0..8d925ce9750f 100644 --- a/tools/arch/x86/kcpuid/cpuid.csv +++ b/tools/arch/x86/kcpuid/cpuid.csv @@ -1,5 +1,5 @@ # SPDX-License-Identifier: CC0-1.0 -# Generator: x86-cpuid-db v1.0 +# Generator: x86-cpuid-db v2.4 # # Auto-generated file. @@ -12,297 +12,298 @@ # Leaf 0H # Maximum standard leaf number + CPU vendor string - 0, 0, eax, 31:0, max_std_leaf , Highest cpuid standard leaf supported - 0, 0, ebx, 31:0, cpu_vendorid_0 , CPU vendor ID string bytes 0 - 3 - 0, 0, ecx, 31:0, cpu_vendorid_2 , CPU vendor ID string bytes 8 - 11 - 0, 0, edx, 31:0, cpu_vendorid_1 , CPU vendor ID string bytes 4 - 7 + 0x0, 0, eax, 31:0, max_std_leaf , Highest standard CPUID leaf supported + 0x0, 0, ebx, 31:0, cpu_vendorid_0 , CPU vendor ID string bytes 0 - 3 + 0x0, 0, ecx, 31:0, cpu_vendorid_2 , CPU vendor ID string bytes 8 - 11 + 0x0, 0, edx, 31:0, cpu_vendorid_1 , CPU vendor ID string bytes 4 - 7 # Leaf 1H # CPU FMS (Family/Model/Stepping) + standard feature flags - 1, 0, eax, 3:0, stepping , Stepping ID - 1, 0, eax, 7:4, base_model , Base CPU model ID - 1, 0, eax, 11:8, base_family_id , Base CPU family ID - 1, 0, eax, 13:12, cpu_type , CPU type - 1, 0, eax, 19:16, ext_model , Extended CPU model ID - 1, 0, eax, 27:20, ext_family , Extended CPU family ID - 1, 0, ebx, 7:0, brand_id , Brand index - 1, 0, ebx, 15:8, clflush_size , CLFLUSH instruction cache line size - 1, 0, ebx, 23:16, n_logical_cpu , Logical CPU (HW threads) count - 1, 0, ebx, 31:24, local_apic_id , Initial local APIC physical ID - 1, 0, ecx, 0, pni , Streaming SIMD Extensions 3 (SSE3) - 1, 0, ecx, 1, pclmulqdq , PCLMULQDQ instruction support - 1, 0, ecx, 2, dtes64 , 64-bit DS save area - 1, 0, ecx, 3, monitor , MONITOR/MWAIT support - 1, 0, ecx, 4, ds_cpl , CPL Qualified Debug Store - 1, 0, ecx, 5, vmx , Virtual Machine Extensions - 1, 0, ecx, 6, smx , Safer Mode Extensions - 1, 0, ecx, 7, est , Enhanced Intel SpeedStep - 1, 0, ecx, 8, tm2 , Thermal Monitor 2 - 1, 0, ecx, 9, ssse3 , Supplemental SSE3 - 1, 0, ecx, 10, cid , L1 Context ID - 1, 0, ecx, 11, sdbg , Sillicon Debug - 1, 0, ecx, 12, fma , FMA extensions using YMM state - 1, 0, ecx, 13, cx16 , CMPXCHG16B instruction support - 1, 0, ecx, 14, xtpr , xTPR Update Control - 1, 0, ecx, 15, pdcm , Perfmon and Debug Capability - 1, 0, ecx, 17, pcid , Process-context identifiers - 1, 0, ecx, 18, dca , Direct Cache Access - 1, 0, ecx, 19, sse4_1 , SSE4.1 - 1, 0, ecx, 20, sse4_2 , SSE4.2 - 1, 0, ecx, 21, x2apic , X2APIC support - 1, 0, ecx, 22, movbe , MOVBE instruction support - 1, 0, ecx, 23, popcnt , POPCNT instruction support - 1, 0, ecx, 24, tsc_deadline_timer , APIC timer one-shot operation - 1, 0, ecx, 25, aes , AES instructions - 1, 0, ecx, 26, xsave , XSAVE (and related instructions) support - 1, 0, ecx, 27, osxsave , XSAVE (and related instructions) are enabled by OS - 1, 0, ecx, 28, avx , AVX instructions support - 1, 0, ecx, 29, f16c , Half-precision floating-point conversion support - 1, 0, ecx, 30, rdrand , RDRAND instruction support - 1, 0, ecx, 31, guest_status , System is running as guest; (para-)virtualized system - 1, 0, edx, 0, fpu , Floating-Point Unit on-chip (x87) - 1, 0, edx, 1, vme , Virtual-8086 Mode Extensions - 1, 0, edx, 2, de , Debugging Extensions - 1, 0, edx, 3, pse , Page Size Extension - 1, 0, edx, 4, tsc , Time Stamp Counter - 1, 0, edx, 5, msr , Model-Specific Registers (RDMSR and WRMSR support) - 1, 0, edx, 6, pae , Physical Address Extensions - 1, 0, edx, 7, mce , Machine Check Exception - 1, 0, edx, 8, cx8 , CMPXCHG8B instruction - 1, 0, edx, 9, apic , APIC on-chip - 1, 0, edx, 11, sep , SYSENTER, SYSEXIT, and associated MSRs - 1, 0, edx, 12, mtrr , Memory Type Range Registers - 1, 0, edx, 13, pge , Page Global Extensions - 1, 0, edx, 14, mca , Machine Check Architecture - 1, 0, edx, 15, cmov , Conditional Move Instruction - 1, 0, edx, 16, pat , Page Attribute Table - 1, 0, edx, 17, pse36 , Page Size Extension (36-bit) - 1, 0, edx, 18, pn , Processor Serial Number - 1, 0, edx, 19, clflush , CLFLUSH instruction - 1, 0, edx, 21, dts , Debug Store - 1, 0, edx, 22, acpi , Thermal monitor and clock control - 1, 0, edx, 23, mmx , MMX instructions - 1, 0, edx, 24, fxsr , FXSAVE and FXRSTOR instructions - 1, 0, edx, 25, sse , SSE instructions - 1, 0, edx, 26, sse2 , SSE2 instructions - 1, 0, edx, 27, ss , Self Snoop - 1, 0, edx, 28, ht , Hyper-threading - 1, 0, edx, 29, tm , Thermal Monitor - 1, 0, edx, 30, ia64 , Legacy IA-64 (Itanium) support bit, now resreved - 1, 0, edx, 31, pbe , Pending Break Enable + 0x1, 0, eax, 3:0, stepping , Stepping ID + 0x1, 0, eax, 7:4, base_model , Base CPU model ID + 0x1, 0, eax, 11:8, base_family_id , Base CPU family ID + 0x1, 0, eax, 13:12, cpu_type , CPU type + 0x1, 0, eax, 19:16, ext_model , Extended CPU model ID + 0x1, 0, eax, 27:20, ext_family , Extended CPU family ID + 0x1, 0, ebx, 7:0, brand_id , Brand index + 0x1, 0, ebx, 15:8, clflush_size , CLFLUSH instruction cache line size + 0x1, 0, ebx, 23:16, n_logical_cpu , Logical CPU count + 0x1, 0, ebx, 31:24, local_apic_id , Initial local APIC physical ID + 0x1, 0, ecx, 0, pni , Streaming SIMD Extensions 3 (SSE3) + 0x1, 0, ecx, 1, pclmulqdq , PCLMULQDQ instruction support + 0x1, 0, ecx, 2, dtes64 , 64-bit DS save area + 0x1, 0, ecx, 3, monitor , MONITOR/MWAIT support + 0x1, 0, ecx, 4, ds_cpl , CPL Qualified Debug Store + 0x1, 0, ecx, 5, vmx , Virtual Machine Extensions + 0x1, 0, ecx, 6, smx , Safer Mode Extensions + 0x1, 0, ecx, 7, est , Enhanced Intel SpeedStep + 0x1, 0, ecx, 8, tm2 , Thermal Monitor 2 + 0x1, 0, ecx, 9, ssse3 , Supplemental SSE3 + 0x1, 0, ecx, 10, cid , L1 Context ID + 0x1, 0, ecx, 11, sdbg , Silicon Debug + 0x1, 0, ecx, 12, fma , FMA extensions using YMM state + 0x1, 0, ecx, 13, cx16 , CMPXCHG16B instruction support + 0x1, 0, ecx, 14, xtpr , xTPR Update Control + 0x1, 0, ecx, 15, pdcm , Perfmon and Debug Capability + 0x1, 0, ecx, 17, pcid , Process-context identifiers + 0x1, 0, ecx, 18, dca , Direct Cache Access + 0x1, 0, ecx, 19, sse4_1 , SSE4.1 + 0x1, 0, ecx, 20, sse4_2 , SSE4.2 + 0x1, 0, ecx, 21, x2apic , X2APIC support + 0x1, 0, ecx, 22, movbe , MOVBE instruction support + 0x1, 0, ecx, 23, popcnt , POPCNT instruction support + 0x1, 0, ecx, 24, tsc_deadline_timer , APIC timer one-shot operation + 0x1, 0, ecx, 25, aes , AES instructions + 0x1, 0, ecx, 26, xsave , XSAVE (and related instructions) support + 0x1, 0, ecx, 27, osxsave , XSAVE (and related instructions) are enabled by OS + 0x1, 0, ecx, 28, avx , AVX instructions support + 0x1, 0, ecx, 29, f16c , Half-precision floating-point conversion support + 0x1, 0, ecx, 30, rdrand , RDRAND instruction support + 0x1, 0, ecx, 31, guest_status , System is running as guest; (para-)virtualized system + 0x1, 0, edx, 0, fpu , Floating-Point Unit on-chip (x87) + 0x1, 0, edx, 1, vme , Virtual-8086 Mode Extensions + 0x1, 0, edx, 2, de , Debugging Extensions + 0x1, 0, edx, 3, pse , Page Size Extension + 0x1, 0, edx, 4, tsc , Time Stamp Counter + 0x1, 0, edx, 5, msr , Model-Specific Registers (RDMSR and WRMSR support) + 0x1, 0, edx, 6, pae , Physical Address Extensions + 0x1, 0, edx, 7, mce , Machine Check Exception + 0x1, 0, edx, 8, cx8 , CMPXCHG8B instruction + 0x1, 0, edx, 9, apic , APIC on-chip + 0x1, 0, edx, 11, sep , SYSENTER, SYSEXIT, and associated MSRs + 0x1, 0, edx, 12, mtrr , Memory Type Range Registers + 0x1, 0, edx, 13, pge , Page Global Extensions + 0x1, 0, edx, 14, mca , Machine Check Architecture + 0x1, 0, edx, 15, cmov , Conditional Move Instruction + 0x1, 0, edx, 16, pat , Page Attribute Table + 0x1, 0, edx, 17, pse36 , Page Size Extension (36-bit) + 0x1, 0, edx, 18, pn , Processor Serial Number + 0x1, 0, edx, 19, clflush , CLFLUSH instruction + 0x1, 0, edx, 21, dts , Debug Store + 0x1, 0, edx, 22, acpi , Thermal monitor and clock control + 0x1, 0, edx, 23, mmx , MMX instructions + 0x1, 0, edx, 24, fxsr , FXSAVE and FXRSTOR instructions + 0x1, 0, edx, 25, sse , SSE instructions + 0x1, 0, edx, 26, sse2 , SSE2 instructions + 0x1, 0, edx, 27, ss , Self Snoop + 0x1, 0, edx, 28, ht , Hyper-threading + 0x1, 0, edx, 29, tm , Thermal Monitor + 0x1, 0, edx, 30, ia64 , Legacy IA-64 (Itanium) support bit, now reserved + 0x1, 0, edx, 31, pbe , Pending Break Enable # Leaf 2H # Intel cache and TLB information one-byte descriptors - 2, 0, eax, 7:0, iteration_count , Number of times this CPUD leaf must be queried - 2, 0, eax, 15:8, desc1 , Descriptor #1 - 2, 0, eax, 23:16, desc2 , Descriptor #2 - 2, 0, eax, 30:24, desc3 , Descriptor #3 - 2, 0, eax, 31, eax_invalid , Descriptors 1-3 are invalid if set - 2, 0, ebx, 7:0, desc4 , Descriptor #4 - 2, 0, ebx, 15:8, desc5 , Descriptor #5 - 2, 0, ebx, 23:16, desc6 , Descriptor #6 - 2, 0, ebx, 30:24, desc7 , Descriptor #7 - 2, 0, ebx, 31, ebx_invalid , Descriptors 4-7 are invalid if set - 2, 0, ecx, 7:0, desc8 , Descriptor #8 - 2, 0, ecx, 15:8, desc9 , Descriptor #9 - 2, 0, ecx, 23:16, desc10 , Descriptor #10 - 2, 0, ecx, 30:24, desc11 , Descriptor #11 - 2, 0, ecx, 31, ecx_invalid , Descriptors 8-11 are invalid if set - 2, 0, edx, 7:0, desc12 , Descriptor #12 - 2, 0, edx, 15:8, desc13 , Descriptor #13 - 2, 0, edx, 23:16, desc14 , Descriptor #14 - 2, 0, edx, 30:24, desc15 , Descriptor #15 - 2, 0, edx, 31, edx_invalid , Descriptors 12-15 are invalid if set + 0x2, 0, eax, 7:0, iteration_count , Number of times this leaf must be queried + 0x2, 0, eax, 15:8, desc1 , Descriptor #1 + 0x2, 0, eax, 23:16, desc2 , Descriptor #2 + 0x2, 0, eax, 30:24, desc3 , Descriptor #3 + 0x2, 0, eax, 31, eax_invalid , Descriptors 1-3 are invalid if set + 0x2, 0, ebx, 7:0, desc4 , Descriptor #4 + 0x2, 0, ebx, 15:8, desc5 , Descriptor #5 + 0x2, 0, ebx, 23:16, desc6 , Descriptor #6 + 0x2, 0, ebx, 30:24, desc7 , Descriptor #7 + 0x2, 0, ebx, 31, ebx_invalid , Descriptors 4-7 are invalid if set + 0x2, 0, ecx, 7:0, desc8 , Descriptor #8 + 0x2, 0, ecx, 15:8, desc9 , Descriptor #9 + 0x2, 0, ecx, 23:16, desc10 , Descriptor #10 + 0x2, 0, ecx, 30:24, desc11 , Descriptor #11 + 0x2, 0, ecx, 31, ecx_invalid , Descriptors 8-11 are invalid if set + 0x2, 0, edx, 7:0, desc12 , Descriptor #12 + 0x2, 0, edx, 15:8, desc13 , Descriptor #13 + 0x2, 0, edx, 23:16, desc14 , Descriptor #14 + 0x2, 0, edx, 30:24, desc15 , Descriptor #15 + 0x2, 0, edx, 31, edx_invalid , Descriptors 12-15 are invalid if set # Leaf 4H # Intel deterministic cache parameters - 4, 31:0, eax, 4:0, cache_type , Cache type field - 4, 31:0, eax, 7:5, cache_level , Cache level (1-based) - 4, 31:0, eax, 8, cache_self_init , Self-initialializing cache level - 4, 31:0, eax, 9, fully_associative , Fully-associative cache - 4, 31:0, eax, 25:14, num_threads_sharing , Number logical CPUs sharing this cache - 4, 31:0, eax, 31:26, num_cores_on_die , Number of cores in the physical package - 4, 31:0, ebx, 11:0, cache_linesize , System coherency line size (0-based) - 4, 31:0, ebx, 21:12, cache_npartitions , Physical line partitions (0-based) - 4, 31:0, ebx, 31:22, cache_nways , Ways of associativity (0-based) - 4, 31:0, ecx, 30:0, cache_nsets , Cache number of sets (0-based) - 4, 31:0, edx, 0, wbinvd_rll_no_guarantee, WBINVD/INVD not guaranteed for Remote Lower-Level caches - 4, 31:0, edx, 1, ll_inclusive , Cache is inclusive of Lower-Level caches - 4, 31:0, edx, 2, complex_indexing , Not a direct-mapped cache (complex function) + 0x4, 31:0, eax, 4:0, cache_type , Cache type field + 0x4, 31:0, eax, 7:5, cache_level , Cache level (1-based) + 0x4, 31:0, eax, 8, cache_self_init , Self-initializing cache level + 0x4, 31:0, eax, 9, fully_associative , Fully-associative cache + 0x4, 31:0, eax, 25:14, num_threads_sharing , Number logical CPUs sharing this cache + 0x4, 31:0, eax, 31:26, num_cores_on_die , Number of cores in the physical package + 0x4, 31:0, ebx, 11:0, cache_linesize , System coherency line size (0-based) + 0x4, 31:0, ebx, 21:12, cache_npartitions , Physical line partitions (0-based) + 0x4, 31:0, ebx, 31:22, cache_nways , Ways of associativity (0-based) + 0x4, 31:0, ecx, 30:0, cache_nsets , Cache number of sets (0-based) + 0x4, 31:0, edx, 0, wbinvd_rll_no_guarantee, WBINVD/INVD not guaranteed for Remote Lower-Level caches + 0x4, 31:0, edx, 1, ll_inclusive , Cache is inclusive of Lower-Level caches + 0x4, 31:0, edx, 2, complex_indexing , Not a direct-mapped cache (complex function) # Leaf 5H # MONITOR/MWAIT instructions enumeration - 5, 0, eax, 15:0, min_mon_size , Smallest monitor-line size, in bytes - 5, 0, ebx, 15:0, max_mon_size , Largest monitor-line size, in bytes - 5, 0, ecx, 0, mwait_ext , Enumeration of MONITOR/MWAIT extensions is supported - 5, 0, ecx, 1, mwait_irq_break , Interrupts as a break-event for MWAIT is supported - 5, 0, edx, 3:0, n_c0_substates , Number of C0 sub C-states supported using MWAIT - 5, 0, edx, 7:4, n_c1_substates , Number of C1 sub C-states supported using MWAIT - 5, 0, edx, 11:8, n_c2_substates , Number of C2 sub C-states supported using MWAIT - 5, 0, edx, 15:12, n_c3_substates , Number of C3 sub C-states supported using MWAIT - 5, 0, edx, 19:16, n_c4_substates , Number of C4 sub C-states supported using MWAIT - 5, 0, edx, 23:20, n_c5_substates , Number of C5 sub C-states supported using MWAIT - 5, 0, edx, 27:24, n_c6_substates , Number of C6 sub C-states supported using MWAIT - 5, 0, edx, 31:28, n_c7_substates , Number of C7 sub C-states supported using MWAIT + 0x5, 0, eax, 15:0, min_mon_size , Smallest monitor-line size, in bytes + 0x5, 0, ebx, 15:0, max_mon_size , Largest monitor-line size, in bytes + 0x5, 0, ecx, 0, mwait_ext , Enumeration of MONITOR/MWAIT extensions is supported + 0x5, 0, ecx, 1, mwait_irq_break , Interrupts as a break-event for MWAIT is supported + 0x5, 0, edx, 3:0, n_c0_substates , Number of C0 sub C-states supported using MWAIT + 0x5, 0, edx, 7:4, n_c1_substates , Number of C1 sub C-states supported using MWAIT + 0x5, 0, edx, 11:8, n_c2_substates , Number of C2 sub C-states supported using MWAIT + 0x5, 0, edx, 15:12, n_c3_substates , Number of C3 sub C-states supported using MWAIT + 0x5, 0, edx, 19:16, n_c4_substates , Number of C4 sub C-states supported using MWAIT + 0x5, 0, edx, 23:20, n_c5_substates , Number of C5 sub C-states supported using MWAIT + 0x5, 0, edx, 27:24, n_c6_substates , Number of C6 sub C-states supported using MWAIT + 0x5, 0, edx, 31:28, n_c7_substates , Number of C7 sub C-states supported using MWAIT # Leaf 6H # Thermal and Power Management enumeration - 6, 0, eax, 0, dtherm , Digital temprature sensor - 6, 0, eax, 1, turbo_boost , Intel Turbo Boost - 6, 0, eax, 2, arat , Always-Running APIC Timer (not affected by p-state) - 6, 0, eax, 4, pln , Power Limit Notification (PLN) event - 6, 0, eax, 5, ecmd , Clock modulation duty cycle extension - 6, 0, eax, 6, pts , Package thermal management - 6, 0, eax, 7, hwp , HWP (Hardware P-states) base registers are supported - 6, 0, eax, 8, hwp_notify , HWP notification (IA32_HWP_INTERRUPT MSR) - 6, 0, eax, 9, hwp_act_window , HWP activity window (IA32_HWP_REQUEST[bits 41:32]) supported - 6, 0, eax, 10, hwp_epp , HWP Energy Performance Preference - 6, 0, eax, 11, hwp_pkg_req , HWP Package Level Request - 6, 0, eax, 13, hdc_base_regs , HDC base registers are supported - 6, 0, eax, 14, turbo_boost_3_0 , Intel Turbo Boost Max 3.0 - 6, 0, eax, 15, hwp_capabilities , HWP Highest Performance change - 6, 0, eax, 16, hwp_peci_override , HWP PECI override - 6, 0, eax, 17, hwp_flexible , Flexible HWP - 6, 0, eax, 18, hwp_fast , IA32_HWP_REQUEST MSR fast access mode - 6, 0, eax, 19, hfi , HW_FEEDBACK MSRs supported - 6, 0, eax, 20, hwp_ignore_idle , Ignoring idle logical CPU HWP req is supported - 6, 0, eax, 23, thread_director , Intel thread director support - 6, 0, eax, 24, therm_interrupt_bit25 , IA32_THERM_INTERRUPT MSR bit 25 is supported - 6, 0, ebx, 3:0, n_therm_thresholds , Digital thermometer thresholds - 6, 0, ecx, 0, aperfmperf , MPERF/APERF MSRs (effective frequency interface) - 6, 0, ecx, 3, epb , IA32_ENERGY_PERF_BIAS MSR support - 6, 0, ecx, 15:8, thrd_director_nclasses , Number of classes, Intel thread director - 6, 0, edx, 0, perfcap_reporting , Performance capability reporting - 6, 0, edx, 1, encap_reporting , Energy efficiency capability reporting - 6, 0, edx, 11:8, feedback_sz , HW feedback interface struct size, in 4K pages - 6, 0, edx, 31:16, this_lcpu_hwfdbk_idx , This logical CPU index @ HW feedback struct, 0-based + 0x6, 0, eax, 0, dtherm , Digital temperature sensor + 0x6, 0, eax, 1, turbo_boost , Intel Turbo Boost + 0x6, 0, eax, 2, arat , Always-Running APIC Timer (not affected by p-state) + 0x6, 0, eax, 4, pln , Power Limit Notification (PLN) event + 0x6, 0, eax, 5, ecmd , Clock modulation duty cycle extension + 0x6, 0, eax, 6, pts , Package thermal management + 0x6, 0, eax, 7, hwp , HWP (Hardware P-states) base registers are supported + 0x6, 0, eax, 8, hwp_notify , HWP notification (IA32_HWP_INTERRUPT MSR) + 0x6, 0, eax, 9, hwp_act_window , HWP activity window (IA32_HWP_REQUEST[bits 41:32]) supported + 0x6, 0, eax, 10, hwp_epp , HWP Energy Performance Preference + 0x6, 0, eax, 11, hwp_pkg_req , HWP Package Level Request + 0x6, 0, eax, 13, hdc_base_regs , HDC base registers are supported + 0x6, 0, eax, 14, turbo_boost_3_0 , Intel Turbo Boost Max 3.0 + 0x6, 0, eax, 15, hwp_capabilities , HWP Highest Performance change + 0x6, 0, eax, 16, hwp_peci_override , HWP PECI override + 0x6, 0, eax, 17, hwp_flexible , Flexible HWP + 0x6, 0, eax, 18, hwp_fast , IA32_HWP_REQUEST MSR fast access mode + 0x6, 0, eax, 19, hfi , HW_FEEDBACK MSRs supported + 0x6, 0, eax, 20, hwp_ignore_idle , Ignoring idle logical CPU HWP req is supported + 0x6, 0, eax, 23, thread_director , Intel thread director support + 0x6, 0, eax, 24, therm_interrupt_bit25 , IA32_THERM_INTERRUPT MSR bit 25 is supported + 0x6, 0, ebx, 3:0, n_therm_thresholds , Digital thermometer thresholds + 0x6, 0, ecx, 0, aperfmperf , MPERF/APERF MSRs (effective frequency interface) + 0x6, 0, ecx, 3, epb , IA32_ENERGY_PERF_BIAS MSR support + 0x6, 0, ecx, 15:8, thrd_director_nclasses , Number of classes, Intel thread director + 0x6, 0, edx, 0, perfcap_reporting , Performance capability reporting + 0x6, 0, edx, 1, encap_reporting , Energy efficiency capability reporting + 0x6, 0, edx, 11:8, feedback_sz , Feedback interface structure size, in 4K pages + 0x6, 0, edx, 31:16, this_lcpu_hwfdbk_idx , This logical CPU hardware feedback interface index # Leaf 7H # Extended CPU features enumeration - 7, 0, eax, 31:0, leaf7_n_subleaves , Number of cpuid 0x7 subleaves - 7, 0, ebx, 0, fsgsbase , FSBASE/GSBASE read/write support - 7, 0, ebx, 1, tsc_adjust , IA32_TSC_ADJUST MSR supported - 7, 0, ebx, 2, sgx , Intel SGX (Software Guard Extensions) - 7, 0, ebx, 3, bmi1 , Bit manipulation extensions group 1 - 7, 0, ebx, 4, hle , Hardware Lock Elision - 7, 0, ebx, 5, avx2 , AVX2 instruction set - 7, 0, ebx, 6, fdp_excptn_only , FPU Data Pointer updated only on x87 exceptions - 7, 0, ebx, 7, smep , Supervisor Mode Execution Protection - 7, 0, ebx, 8, bmi2 , Bit manipulation extensions group 2 - 7, 0, ebx, 9, erms , Enhanced REP MOVSB/STOSB - 7, 0, ebx, 10, invpcid , INVPCID instruction (Invalidate Processor Context ID) - 7, 0, ebx, 11, rtm , Intel restricted transactional memory - 7, 0, ebx, 12, cqm , Intel RDT-CMT / AMD Platform-QoS cache monitoring - 7, 0, ebx, 13, zero_fcs_fds , Deprecated FPU CS/DS (stored as zero) - 7, 0, ebx, 14, mpx , Intel memory protection extensions - 7, 0, ebx, 15, rdt_a , Intel RDT / AMD Platform-QoS Enforcemeent - 7, 0, ebx, 16, avx512f , AVX-512 foundation instructions - 7, 0, ebx, 17, avx512dq , AVX-512 double/quadword instructions - 7, 0, ebx, 18, rdseed , RDSEED instruction - 7, 0, ebx, 19, adx , ADCX/ADOX instructions - 7, 0, ebx, 20, smap , Supervisor mode access prevention - 7, 0, ebx, 21, avx512ifma , AVX-512 integer fused multiply add - 7, 0, ebx, 23, clflushopt , CLFLUSHOPT instruction - 7, 0, ebx, 24, clwb , CLWB instruction - 7, 0, ebx, 25, intel_pt , Intel processor trace - 7, 0, ebx, 26, avx512pf , AVX-512 prefetch instructions - 7, 0, ebx, 27, avx512er , AVX-512 exponent/reciprocal instrs - 7, 0, ebx, 28, avx512cd , AVX-512 conflict detection instrs - 7, 0, ebx, 29, sha_ni , SHA/SHA256 instructions - 7, 0, ebx, 30, avx512bw , AVX-512 BW (byte/word granular) instructions - 7, 0, ebx, 31, avx512vl , AVX-512 VL (128/256 vector length) extensions - 7, 0, ecx, 0, prefetchwt1 , PREFETCHWT1 (Intel Xeon Phi only) - 7, 0, ecx, 1, avx512vbmi , AVX-512 Vector byte manipulation instrs - 7, 0, ecx, 2, umip , User mode instruction protection - 7, 0, ecx, 3, pku , Protection keys for user-space - 7, 0, ecx, 4, ospke , OS protection keys enable - 7, 0, ecx, 5, waitpkg , WAITPKG instructions - 7, 0, ecx, 6, avx512_vbmi2 , AVX-512 vector byte manipulation instrs group 2 - 7, 0, ecx, 7, cet_ss , CET shadow stack features - 7, 0, ecx, 8, gfni , Galois field new instructions - 7, 0, ecx, 9, vaes , Vector AES instrs - 7, 0, ecx, 10, vpclmulqdq , VPCLMULQDQ 256-bit instruction support - 7, 0, ecx, 11, avx512_vnni , Vector neural network instructions - 7, 0, ecx, 12, avx512_bitalg , AVX-512 bit count/shiffle - 7, 0, ecx, 13, tme , Intel total memory encryption - 7, 0, ecx, 14, avx512_vpopcntdq , AVX-512: POPCNT for vectors of DW/QW - 7, 0, ecx, 16, la57 , 57-bit linear addreses (five-level paging) - 7, 0, ecx, 21:17, mawau_val_lm , BNDLDX/BNDSTX MAWAU value in 64-bit mode - 7, 0, ecx, 22, rdpid , RDPID instruction - 7, 0, ecx, 23, key_locker , Intel key locker support - 7, 0, ecx, 24, bus_lock_detect , OS bus-lock detection - 7, 0, ecx, 25, cldemote , CLDEMOTE instruction - 7, 0, ecx, 27, movdiri , MOVDIRI instruction - 7, 0, ecx, 28, movdir64b , MOVDIR64B instruction - 7, 0, ecx, 29, enqcmd , Enqueue stores supported (ENQCMD{,S}) - 7, 0, ecx, 30, sgx_lc , Intel SGX launch configuration - 7, 0, ecx, 31, pks , Protection keys for supervisor-mode pages - 7, 0, edx, 1, sgx_keys , Intel SGX attestation services - 7, 0, edx, 2, avx512_4vnniw , AVX-512 neural network instructions - 7, 0, edx, 3, avx512_4fmaps , AVX-512 multiply accumulation single precision - 7, 0, edx, 4, fsrm , Fast short REP MOV - 7, 0, edx, 5, uintr , CPU supports user interrupts - 7, 0, edx, 8, avx512_vp2intersect , VP2INTERSECT{D,Q} instructions - 7, 0, edx, 9, srdbs_ctrl , SRBDS mitigation MSR available - 7, 0, edx, 10, md_clear , VERW MD_CLEAR microcode support - 7, 0, edx, 11, rtm_always_abort , XBEGIN (RTM transaction) always aborts - 7, 0, edx, 13, tsx_force_abort , MSR TSX_FORCE_ABORT, RTM_ABORT bit, supported - 7, 0, edx, 14, serialize , SERIALIZE instruction - 7, 0, edx, 15, hybrid_cpu , The CPU is identified as a 'hybrid part' - 7, 0, edx, 16, tsxldtrk , TSX suspend/resume load address tracking - 7, 0, edx, 18, pconfig , PCONFIG instruction - 7, 0, edx, 19, arch_lbr , Intel architectural LBRs - 7, 0, edx, 20, ibt , CET indirect branch tracking - 7, 0, edx, 22, amx_bf16 , AMX-BF16: tile bfloat16 support - 7, 0, edx, 23, avx512_fp16 , AVX-512 FP16 instructions - 7, 0, edx, 24, amx_tile , AMX-TILE: tile architecture support - 7, 0, edx, 25, amx_int8 , AMX-INT8: tile 8-bit integer support - 7, 0, edx, 26, spec_ctrl , Speculation Control (IBRS/IBPB: indirect branch restrictions) - 7, 0, edx, 27, intel_stibp , Single thread indirect branch predictors - 7, 0, edx, 28, flush_l1d , FLUSH L1D cache: IA32_FLUSH_CMD MSR - 7, 0, edx, 29, arch_capabilities , Intel IA32_ARCH_CAPABILITIES MSR - 7, 0, edx, 30, core_capabilities , IA32_CORE_CAPABILITIES MSR - 7, 0, edx, 31, spec_ctrl_ssbd , Speculative store bypass disable - 7, 1, eax, 4, avx_vnni , AVX-VNNI instructions - 7, 1, eax, 5, avx512_bf16 , AVX-512 bFloat16 instructions - 7, 1, eax, 6, lass , Linear address space separation - 7, 1, eax, 7, cmpccxadd , CMPccXADD instructions - 7, 1, eax, 8, arch_perfmon_ext , ArchPerfmonExt: CPUID leaf 0x23 is supported - 7, 1, eax, 10, fzrm , Fast zero-length REP MOVSB - 7, 1, eax, 11, fsrs , Fast short REP STOSB - 7, 1, eax, 12, fsrc , Fast Short REP CMPSB/SCASB - 7, 1, eax, 17, fred , FRED: Flexible return and event delivery transitions - 7, 1, eax, 18, lkgs , LKGS: Load 'kernel' (userspace) GS - 7, 1, eax, 19, wrmsrns , WRMSRNS instr (WRMSR-non-serializing) - 7, 1, eax, 21, amx_fp16 , AMX-FP16: FP16 tile operations - 7, 1, eax, 22, hreset , History reset support - 7, 1, eax, 23, avx_ifma , Integer fused multiply add - 7, 1, eax, 26, lam , Linear address masking - 7, 1, eax, 27, rd_wr_msrlist , RDMSRLIST/WRMSRLIST instructions - 7, 1, ebx, 0, intel_ppin , Protected processor inventory number (PPIN{,_CTL} MSRs) - 7, 1, edx, 4, avx_vnni_int8 , AVX-VNNI-INT8 instructions - 7, 1, edx, 5, avx_ne_convert , AVX-NE-CONVERT instructions - 7, 1, edx, 8, amx_complex , AMX-COMPLEX instructions (starting from Granite Rapids) - 7, 1, edx, 14, prefetchit_0_1 , PREFETCHIT0/1 instructions - 7, 1, edx, 18, cet_sss , CET supervisor shadow stacks safe to use - 7, 2, edx, 0, intel_psfd , Intel predictive store forward disable - 7, 2, edx, 1, ipred_ctrl , MSR bits IA32_SPEC_CTRL.IPRED_DIS_{U,S} - 7, 2, edx, 2, rrsba_ctrl , MSR bits IA32_SPEC_CTRL.RRSBA_DIS_{U,S} - 7, 2, edx, 3, ddp_ctrl , MSR bit IA32_SPEC_CTRL.DDPD_U - 7, 2, edx, 4, bhi_ctrl , MSR bit IA32_SPEC_CTRL.BHI_DIS_S - 7, 2, edx, 5, mcdt_no , MCDT mitigation not needed - 7, 2, edx, 6, uclock_disable , UC-lock disable is supported + 0x7, 0, eax, 31:0, leaf7_n_subleaves , Number of leaf 0x7 subleaves + 0x7, 0, ebx, 0, fsgsbase , FSBASE/GSBASE read/write support + 0x7, 0, ebx, 1, tsc_adjust , IA32_TSC_ADJUST MSR supported + 0x7, 0, ebx, 2, sgx , Intel SGX (Software Guard Extensions) + 0x7, 0, ebx, 3, bmi1 , Bit manipulation extensions group 1 + 0x7, 0, ebx, 4, hle , Hardware Lock Elision + 0x7, 0, ebx, 5, avx2 , AVX2 instruction set + 0x7, 0, ebx, 6, fdp_excptn_only , FPU Data Pointer updated only on x87 exceptions + 0x7, 0, ebx, 7, smep , Supervisor Mode Execution Protection + 0x7, 0, ebx, 8, bmi2 , Bit manipulation extensions group 2 + 0x7, 0, ebx, 9, erms , Enhanced REP MOVSB/STOSB + 0x7, 0, ebx, 10, invpcid , INVPCID instruction (Invalidate Processor Context ID) + 0x7, 0, ebx, 11, rtm , Intel restricted transactional memory + 0x7, 0, ebx, 12, cqm , Intel RDT-CMT / AMD Platform-QoS cache monitoring + 0x7, 0, ebx, 13, zero_fcs_fds , Deprecated FPU CS/DS (stored as zero) + 0x7, 0, ebx, 14, mpx , Intel memory protection extensions + 0x7, 0, ebx, 15, rdt_a , Intel RDT / AMD Platform-QoS Enforcement + 0x7, 0, ebx, 16, avx512f , AVX-512 foundation instructions + 0x7, 0, ebx, 17, avx512dq , AVX-512 double/quadword instructions + 0x7, 0, ebx, 18, rdseed , RDSEED instruction + 0x7, 0, ebx, 19, adx , ADCX/ADOX instructions + 0x7, 0, ebx, 20, smap , Supervisor mode access prevention + 0x7, 0, ebx, 21, avx512ifma , AVX-512 integer fused multiply add + 0x7, 0, ebx, 23, clflushopt , CLFLUSHOPT instruction + 0x7, 0, ebx, 24, clwb , CLWB instruction + 0x7, 0, ebx, 25, intel_pt , Intel processor trace + 0x7, 0, ebx, 26, avx512pf , AVX-512 prefetch instructions + 0x7, 0, ebx, 27, avx512er , AVX-512 exponent/reciprocal instructions + 0x7, 0, ebx, 28, avx512cd , AVX-512 conflict detection instructions + 0x7, 0, ebx, 29, sha_ni , SHA/SHA256 instructions + 0x7, 0, ebx, 30, avx512bw , AVX-512 byte/word instructions + 0x7, 0, ebx, 31, avx512vl , AVX-512 VL (128/256 vector length) extensions + 0x7, 0, ecx, 0, prefetchwt1 , PREFETCHWT1 (Intel Xeon Phi only) + 0x7, 0, ecx, 1, avx512vbmi , AVX-512 Vector byte manipulation instructions + 0x7, 0, ecx, 2, umip , User mode instruction protection + 0x7, 0, ecx, 3, pku , Protection keys for user-space + 0x7, 0, ecx, 4, ospke , OS protection keys enable + 0x7, 0, ecx, 5, waitpkg , WAITPKG instructions + 0x7, 0, ecx, 6, avx512_vbmi2 , AVX-512 vector byte manipulation instructions group 2 + 0x7, 0, ecx, 7, cet_ss , CET shadow stack features + 0x7, 0, ecx, 8, gfni , Galois field new instructions + 0x7, 0, ecx, 9, vaes , Vector AES instructions + 0x7, 0, ecx, 10, vpclmulqdq , VPCLMULQDQ 256-bit instruction support + 0x7, 0, ecx, 11, avx512_vnni , Vector neural network instructions + 0x7, 0, ecx, 12, avx512_bitalg , AVX-512 bitwise algorithms + 0x7, 0, ecx, 13, tme , Intel total memory encryption + 0x7, 0, ecx, 14, avx512_vpopcntdq , AVX-512: POPCNT for vectors of DWORD/QWORD + 0x7, 0, ecx, 16, la57 , 57-bit linear addresses (five-level paging) + 0x7, 0, ecx, 21:17, mawau_val_lm , BNDLDX/BNDSTX MAWAU value in 64-bit mode + 0x7, 0, ecx, 22, rdpid , RDPID instruction + 0x7, 0, ecx, 23, key_locker , Intel key locker support + 0x7, 0, ecx, 24, bus_lock_detect , OS bus-lock detection + 0x7, 0, ecx, 25, cldemote , CLDEMOTE instruction + 0x7, 0, ecx, 27, movdiri , MOVDIRI instruction + 0x7, 0, ecx, 28, movdir64b , MOVDIR64B instruction + 0x7, 0, ecx, 29, enqcmd , Enqueue stores supported (ENQCMD{,S}) + 0x7, 0, ecx, 30, sgx_lc , Intel SGX launch configuration + 0x7, 0, ecx, 31, pks , Protection keys for supervisor-mode pages + 0x7, 0, edx, 1, sgx_keys , Intel SGX attestation services + 0x7, 0, edx, 2, avx512_4vnniw , AVX-512 neural network instructions + 0x7, 0, edx, 3, avx512_4fmaps , AVX-512 multiply accumulation single precision + 0x7, 0, edx, 4, fsrm , Fast short REP MOV + 0x7, 0, edx, 5, uintr , CPU supports user interrupts + 0x7, 0, edx, 8, avx512_vp2intersect , VP2INTERSECT{D,Q} instructions + 0x7, 0, edx, 9, srdbs_ctrl , SRBDS mitigation MSR available + 0x7, 0, edx, 10, md_clear , VERW MD_CLEAR microcode support + 0x7, 0, edx, 11, rtm_always_abort , XBEGIN (RTM transaction) always aborts + 0x7, 0, edx, 13, tsx_force_abort , MSR TSX_FORCE_ABORT, RTM_ABORT bit, supported + 0x7, 0, edx, 14, serialize , SERIALIZE instruction + 0x7, 0, edx, 15, hybrid_cpu , The CPU is identified as a 'hybrid part' + 0x7, 0, edx, 16, tsxldtrk , TSX suspend/resume load address tracking + 0x7, 0, edx, 18, pconfig , PCONFIG instruction + 0x7, 0, edx, 19, arch_lbr , Intel architectural LBRs + 0x7, 0, edx, 20, ibt , CET indirect branch tracking + 0x7, 0, edx, 22, amx_bf16 , AMX-BF16: tile bfloat16 support + 0x7, 0, edx, 23, avx512_fp16 , AVX-512 FP16 instructions + 0x7, 0, edx, 24, amx_tile , AMX-TILE: tile architecture support + 0x7, 0, edx, 25, amx_int8 , AMX-INT8: tile 8-bit integer support + 0x7, 0, edx, 26, spec_ctrl , Speculation Control (IBRS/IBPB: indirect branch restrictions) + 0x7, 0, edx, 27, intel_stibp , Single thread indirect branch predictors + 0x7, 0, edx, 28, flush_l1d , FLUSH L1D cache: IA32_FLUSH_CMD MSR + 0x7, 0, edx, 29, arch_capabilities , Intel IA32_ARCH_CAPABILITIES MSR + 0x7, 0, edx, 30, core_capabilities , IA32_CORE_CAPABILITIES MSR + 0x7, 0, edx, 31, spec_ctrl_ssbd , Speculative store bypass disable + 0x7, 1, eax, 4, avx_vnni , AVX-VNNI instructions + 0x7, 1, eax, 5, avx512_bf16 , AVX-512 bfloat16 instructions + 0x7, 1, eax, 6, lass , Linear address space separation + 0x7, 1, eax, 7, cmpccxadd , CMPccXADD instructions + 0x7, 1, eax, 8, arch_perfmon_ext , ArchPerfmonExt: leaf 0x23 is supported + 0x7, 1, eax, 10, fzrm , Fast zero-length REP MOVSB + 0x7, 1, eax, 11, fsrs , Fast short REP STOSB + 0x7, 1, eax, 12, fsrc , Fast Short REP CMPSB/SCASB + 0x7, 1, eax, 17, fred , FRED: Flexible return and event delivery transitions + 0x7, 1, eax, 18, lkgs , LKGS: Load 'kernel' (userspace) GS + 0x7, 1, eax, 19, wrmsrns , WRMSRNS instruction (WRMSR-non-serializing) + 0x7, 1, eax, 20, nmi_src , NMI-source reporting with FRED event data + 0x7, 1, eax, 21, amx_fp16 , AMX-FP16: FP16 tile operations + 0x7, 1, eax, 22, hreset , History reset support + 0x7, 1, eax, 23, avx_ifma , Integer fused multiply add + 0x7, 1, eax, 26, lam , Linear address masking + 0x7, 1, eax, 27, rd_wr_msrlist , RDMSRLIST/WRMSRLIST instructions + 0x7, 1, ebx, 0, intel_ppin , Protected processor inventory number (PPIN{,_CTL} MSRs) + 0x7, 1, edx, 4, avx_vnni_int8 , AVX-VNNI-INT8 instructions + 0x7, 1, edx, 5, avx_ne_convert , AVX-NE-CONVERT instructions + 0x7, 1, edx, 8, amx_complex , AMX-COMPLEX instructions (starting from Granite Rapids) + 0x7, 1, edx, 14, prefetchit_0_1 , PREFETCHIT0/1 instructions + 0x7, 1, edx, 18, cet_sss , CET supervisor shadow stacks safe to use + 0x7, 2, edx, 0, intel_psfd , Intel predictive store forward disable + 0x7, 2, edx, 1, ipred_ctrl , MSR bits IA32_SPEC_CTRL.IPRED_DIS_{U,S} + 0x7, 2, edx, 2, rrsba_ctrl , MSR bits IA32_SPEC_CTRL.RRSBA_DIS_{U,S} + 0x7, 2, edx, 3, ddp_ctrl , MSR bit IA32_SPEC_CTRL.DDPD_U + 0x7, 2, edx, 4, bhi_ctrl , MSR bit IA32_SPEC_CTRL.BHI_DIS_S + 0x7, 2, edx, 5, mcdt_no , MCDT mitigation not needed + 0x7, 2, edx, 6, uclock_disable , UC-lock disable is supported # Leaf 9H # Intel DCA (Direct Cache Access) enumeration - 9, 0, eax, 0, dca_enabled_in_bios , DCA is enabled in BIOS + 0x9, 0, eax, 0, dca_enabled_in_bios , DCA is enabled in BIOS # Leaf AH # Intel PMU (Performance Monitoring Unit) enumeration @@ -310,7 +311,7 @@ 0xa, 0, eax, 7:0, pmu_version , Performance monitoring unit version ID 0xa, 0, eax, 15:8, pmu_n_gcounters , Number of general PMU counters per logical CPU 0xa, 0, eax, 23:16, pmu_gcounters_nbits , Bitwidth of PMU general counters - 0xa, 0, eax, 31:24, pmu_cpuid_ebx_bits , Length of cpuid leaf 0xa EBX bit vector + 0xa, 0, eax, 31:24, pmu_cpuid_ebx_bits , Length of leaf 0xa EBX bit vector 0xa, 0, ebx, 0, no_core_cycle_evt , Core cycle event not available 0xa, 0, ebx, 1, no_insn_retired_evt , Instruction retired event not available 0xa, 0, ebx, 2, no_refcycle_evt , Reference cycles event not available @@ -339,18 +340,18 @@ 0xd, 0, eax, 0, xcr0_x87 , XCR0.X87 (bit 0) supported 0xd, 0, eax, 1, xcr0_sse , XCR0.SEE (bit 1) supported 0xd, 0, eax, 2, xcr0_avx , XCR0.AVX (bit 2) supported - 0xd, 0, eax, 3, xcr0_mpx_bndregs , XCR0.BNDREGS (bit 3) supported (MPX BND0-BND3 regs) - 0xd, 0, eax, 4, xcr0_mpx_bndcsr , XCR0.BNDCSR (bit 4) supported (MPX BNDCFGU/BNDSTATUS regs) - 0xd, 0, eax, 5, xcr0_avx512_opmask , XCR0.OPMASK (bit 5) supported (AVX-512 k0-k7 regs) - 0xd, 0, eax, 6, xcr0_avx512_zmm_hi256 , XCR0.ZMM_Hi256 (bit 6) supported (AVX-512 ZMM0->ZMM7/15 regs) - 0xd, 0, eax, 7, xcr0_avx512_hi16_zmm , XCR0.HI16_ZMM (bit 7) supported (AVX-512 ZMM16->ZMM31 regs) - 0xd, 0, eax, 9, xcr0_pkru , XCR0.PKRU (bit 9) supported (XSAVE PKRU reg) - 0xd, 0, eax, 11, xcr0_cet_u , AMD XCR0.CET_U (bit 11) supported (CET supervisor state) - 0xd, 0, eax, 12, xcr0_cet_s , AMD XCR0.CET_S (bit 12) support (CET user state) + 0xd, 0, eax, 3, xcr0_mpx_bndregs , XCR0.BNDREGS (bit 3) supported (MPX BND0-BND3 registers) + 0xd, 0, eax, 4, xcr0_mpx_bndcsr , XCR0.BNDCSR (bit 4) supported (MPX BNDCFGU/BNDSTATUS registers) + 0xd, 0, eax, 5, xcr0_avx512_opmask , XCR0.OPMASK (bit 5) supported (AVX-512 k0-k7 registers) + 0xd, 0, eax, 6, xcr0_avx512_zmm_hi256 , XCR0.ZMM_Hi256 (bit 6) supported (AVX-512 ZMM0->ZMM7/15 registers) + 0xd, 0, eax, 7, xcr0_avx512_hi16_zmm , XCR0.HI16_ZMM (bit 7) supported (AVX-512 ZMM16->ZMM31 registers) + 0xd, 0, eax, 9, xcr0_pkru , XCR0.PKRU (bit 9) supported (XSAVE PKRU registers) + 0xd, 0, eax, 11, xcr0_cet_u , XCR0.CET_U (bit 11) supported (CET user state) + 0xd, 0, eax, 12, xcr0_cet_s , XCR0.CET_S (bit 12) supported (CET supervisor state) 0xd, 0, eax, 17, xcr0_tileconfig , XCR0.TILECONFIG (bit 17) supported (AMX can manage TILECONFIG) 0xd, 0, eax, 18, xcr0_tiledata , XCR0.TILEDATA (bit 18) supported (AMX can manage TILEDATA) - 0xd, 0, ebx, 31:0, xsave_sz_xcr0_enabled , XSAVE/XRSTR area byte size, for XCR0 enabled features - 0xd, 0, ecx, 31:0, xsave_sz_max , XSAVE/XRSTR area max byte size, all CPU features + 0xd, 0, ebx, 31:0, xsave_sz_xcr0_enabled , XSAVE/XRSTOR area byte size, for XCR0 enabled features + 0xd, 0, ecx, 31:0, xsave_sz_max , XSAVE/XRSTOR area max byte size, all CPU features 0xd, 0, edx, 30, xcr0_lwp , AMD XCR0.LWP (bit 62) supported (Light-weight Profiling) 0xd, 1, eax, 0, xsaveopt , XSAVEOPT instruction 0xd, 1, eax, 1, xsavec , XSAVEC instruction @@ -369,7 +370,7 @@ 0xd, 63:2, eax, 31:0, xsave_sz , Size of save area for subleaf-N feature, in bytes 0xd, 63:2, ebx, 31:0, xsave_offset , Offset of save area for subleaf-N feature, in bytes 0xd, 63:2, ecx, 0, is_xss_bit , Subleaf N describes an XSS bit, otherwise XCR0 bit - 0xd, 63:2, ecx, 1, compacted_xsave_64byte_aligned, When compacted, subleaf-N feature xsave area is 64-byte aligned + 0xd, 63:2, ecx, 1, compacted_xsave_64byte_aligned, When compacted, subleaf-N feature XSAVE area is 64-byte aligned # Leaf FH # Intel RDT / AMD PQoS resource monitoring @@ -426,17 +427,17 @@ 0x12, 1, ecx, 0, xfrm_x87 , Enclave XFRM.X87 (bit 0) supported 0x12, 1, ecx, 1, xfrm_sse , Enclave XFRM.SEE (bit 1) supported 0x12, 1, ecx, 2, xfrm_avx , Enclave XFRM.AVX (bit 2) supported - 0x12, 1, ecx, 3, xfrm_mpx_bndregs , Enclave XFRM.BNDREGS (bit 3) supported (MPX BND0-BND3 regs) - 0x12, 1, ecx, 4, xfrm_mpx_bndcsr , Enclave XFRM.BNDCSR (bit 4) supported (MPX BNDCFGU/BNDSTATUS regs) - 0x12, 1, ecx, 5, xfrm_avx512_opmask , Enclave XFRM.OPMASK (bit 5) supported (AVX-512 k0-k7 regs) - 0x12, 1, ecx, 6, xfrm_avx512_zmm_hi256 , Enclave XFRM.ZMM_Hi256 (bit 6) supported (AVX-512 ZMM0->ZMM7/15 regs) - 0x12, 1, ecx, 7, xfrm_avx512_hi16_zmm , Enclave XFRM.HI16_ZMM (bit 7) supported (AVX-512 ZMM16->ZMM31 regs) - 0x12, 1, ecx, 9, xfrm_pkru , Enclave XFRM.PKRU (bit 9) supported (XSAVE PKRU reg) + 0x12, 1, ecx, 3, xfrm_mpx_bndregs , Enclave XFRM.BNDREGS (bit 3) supported (MPX BND0-BND3 registers) + 0x12, 1, ecx, 4, xfrm_mpx_bndcsr , Enclave XFRM.BNDCSR (bit 4) supported (MPX BNDCFGU/BNDSTATUS registers) + 0x12, 1, ecx, 5, xfrm_avx512_opmask , Enclave XFRM.OPMASK (bit 5) supported (AVX-512 k0-k7 registers) + 0x12, 1, ecx, 6, xfrm_avx512_zmm_hi256 , Enclave XFRM.ZMM_Hi256 (bit 6) supported (AVX-512 ZMM0->ZMM7/15 registers) + 0x12, 1, ecx, 7, xfrm_avx512_hi16_zmm , Enclave XFRM.HI16_ZMM (bit 7) supported (AVX-512 ZMM16->ZMM31 registers) + 0x12, 1, ecx, 9, xfrm_pkru , Enclave XFRM.PKRU (bit 9) supported (XSAVE PKRU registers) 0x12, 1, ecx, 17, xfrm_tileconfig , Enclave XFRM.TILECONFIG (bit 17) supported (AMX can manage TILECONFIG) 0x12, 1, ecx, 18, xfrm_tiledata , Enclave XFRM.TILEDATA (bit 18) supported (AMX can manage TILEDATA) 0x12, 31:2, eax, 3:0, subleaf_type , Subleaf type (dictates output layout) - 0x12, 31:2, eax, 31:12, epc_sec_base_addr_0 , EPC section base addr, bits[12:31] - 0x12, 31:2, ebx, 19:0, epc_sec_base_addr_1 , EPC section base addr, bits[32:51] + 0x12, 31:2, eax, 31:12, epc_sec_base_addr_0 , EPC section base address, bits[12:31] + 0x12, 31:2, ebx, 19:0, epc_sec_base_addr_1 , EPC section base address, bits[32:51] 0x12, 31:2, ecx, 3:0, epc_sec_type , EPC section type / property encoding 0x12, 31:2, ecx, 31:12, epc_sec_size_0 , EPC section size, bits[12:31] 0x12, 31:2, edx, 19:0, epc_sec_size_1 , EPC section size, bits[32:51] @@ -444,7 +445,7 @@ # Leaf 14H # Intel Processor Trace enumeration - 0x14, 0, eax, 31:0, pt_max_subleaf , Max cpuid 0x14 subleaf + 0x14, 0, eax, 31:0, pt_max_subleaf , Maximum leaf 0x14 subleaf 0x14, 0, ebx, 0, cr3_filtering , IA32_RTIT_CR3_MATCH is accessible 0x14, 0, ebx, 1, psb_cyc , Configurable PSB and cycle-accurate mode 0x14, 0, ebx, 2, ip_filtering , IP/TraceStop filtering; Warm-reset PT MSRs preservation @@ -472,7 +473,7 @@ 0x15, 0, ecx, 31:0, cpu_crystal_hz , Core crystal clock nominal frequency, in Hz # Leaf 16H -# Intel processor fequency enumeration +# Intel processor frequency enumeration 0x16, 0, eax, 15:0, cpu_base_mhz , Processor base frequency, in MHz 0x16, 0, ebx, 15:0, cpu_max_mhz , Processor max frequency, in MHz @@ -481,9 +482,9 @@ # Leaf 17H # Intel SoC vendor attributes enumeration - 0x17, 0, eax, 31:0, soc_max_subleaf , Max cpuid leaf 0x17 subleaf + 0x17, 0, eax, 31:0, soc_max_subleaf , Maximum leaf 0x17 subleaf 0x17, 0, ebx, 15:0, soc_vendor_id , SoC vendor ID - 0x17, 0, ebx, 16, is_vendor_scheme , Assigned by industry enumaeratoion scheme (not Intel) + 0x17, 0, ebx, 16, is_vendor_scheme , Assigned by industry enumeration scheme (not Intel) 0x17, 0, ecx, 31:0, soc_proj_id , SoC project ID, assigned by vendor 0x17, 0, edx, 31:0, soc_stepping_id , Soc project stepping ID, assigned by vendor 0x17, 3:1, eax, 31:0, vendor_brand_a , Vendor Brand ID string, bytes subleaf_nr * (0 -> 3) @@ -494,18 +495,18 @@ # Leaf 18H # Intel determenestic address translation (TLB) parameters - 0x18, 31:0, eax, 31:0, tlb_max_subleaf , Max cpuid 0x18 subleaf + 0x18, 31:0, eax, 31:0, tlb_max_subleaf , Maximum leaf 0x18 subleaf 0x18, 31:0, ebx, 0, tlb_4k_page , TLB 4KB-page entries supported 0x18, 31:0, ebx, 1, tlb_2m_page , TLB 2MB-page entries supported 0x18, 31:0, ebx, 2, tlb_4m_page , TLB 4MB-page entries supported 0x18, 31:0, ebx, 3, tlb_1g_page , TLB 1GB-page entries supported - 0x18, 31:0, ebx, 10:8, hard_partitioning , (Hard/Soft) partitioning between logical CPUs sharing this struct + 0x18, 31:0, ebx, 10:8, hard_partitioning , (Hard/Soft) partitioning between logical CPUs sharing this structure 0x18, 31:0, ebx, 31:16, n_way_associative , Ways of associativity 0x18, 31:0, ecx, 31:0, n_sets , Number of sets 0x18, 31:0, edx, 4:0, tlb_type , Translation cache type (TLB type) 0x18, 31:0, edx, 7:5, tlb_cache_level , Translation cache level (1-based) 0x18, 31:0, edx, 8, is_fully_associative , Fully-associative structure - 0x18, 31:0, edx, 25:14, tlb_max_addressible_ids, Max num of addressible IDs for logical CPUs sharing this TLB - 1 + 0x18, 31:0, edx, 25:14, tlb_max_addressible_ids, Max number of addressable IDs for logical CPUs sharing this TLB - 1 # Leaf 19H # Intel Key Locker enumeration @@ -568,7 +569,7 @@ # Intel AMX, TMUL (Tile-matrix MULtiply) accelerator unit enumeration 0x1e, 0, ebx, 7:0, tmul_maxk , TMUL unit maximum height, K (rows or columns) - 0x1e, 0, ebx, 23:8, tmul_maxn , TMUL unit maxiumum SIMD dimension, N (column bytes) + 0x1e, 0, ebx, 23:8, tmul_maxn , TMUL unit maximum SIMD dimension, N (column bytes) # Leaf 1FH # Intel extended topology enumeration v2 @@ -623,9 +624,9 @@ 0x40000000, 0, edx, 31:0, hypervisor_id_2 , Hypervisor ID string bytes 8 - 11 # Leaf 80000000H -# Maximum extended leaf number + CPU vendor string (AMD) +# Maximum extended leaf number + AMD/Transmeta CPU vendor string -0x80000000, 0, eax, 31:0, max_ext_leaf , Maximum extended cpuid leaf supported +0x80000000, 0, eax, 31:0, max_ext_leaf , Maximum extended CPUID leaf supported 0x80000000, 0, ebx, 31:0, cpu_vendorid_0 , Vendor ID string bytes 0 - 3 0x80000000, 0, ecx, 31:0, cpu_vendorid_2 , Vendor ID string bytes 8 - 11 0x80000000, 0, edx, 31:0, cpu_vendorid_1 , Vendor ID string bytes 4 - 7 @@ -636,6 +637,7 @@ 0x80000001, 0, eax, 3:0, e_stepping_id , Stepping ID 0x80000001, 0, eax, 7:4, e_base_model , Base processor model 0x80000001, 0, eax, 11:8, e_base_family , Base processor family +0x80000001, 0, eax, 13:12, e_base_type , Base processor type (Transmeta) 0x80000001, 0, eax, 19:16, e_ext_model , Extended processor model 0x80000001, 0, eax, 27:20, e_ext_family , Extended processor family 0x80000001, 0, ebx, 15:0, brand_id , Brand ID @@ -659,7 +661,7 @@ 0x80000001, 0, ecx, 17, tce , Translation cache extension 0x80000001, 0, ecx, 19, nodeid_msr , NodeId MSR (0xc001100c) 0x80000001, 0, ecx, 21, tbm , Trailing bit manipulations -0x80000001, 0, ecx, 22, topoext , Topology Extensions (cpuid leaf 0x8000001d) +0x80000001, 0, ecx, 22, topoext , Topology Extensions (leaf 0x8000001d) 0x80000001, 0, ecx, 23, perfctr_core , Core performance counter extensions 0x80000001, 0, ecx, 24, perfctr_nb , NB/DF performance counter extensions 0x80000001, 0, ecx, 26, bpext , Data access breakpoint extension @@ -687,6 +689,7 @@ 0x80000001, 0, edx, 19, mp , Out-of-spec AMD Multiprocessing bit 0x80000001, 0, edx, 20, nx , No-execute page protection 0x80000001, 0, edx, 22, mmxext , AMD MMX extensions +0x80000001, 0, edx, 23, e_mmx , MMX instructions 0x80000001, 0, edx, 24, e_fxsr , FXSAVE and FXRSTOR instructions 0x80000001, 0, edx, 25, fxsr_opt , FXSAVE and FXRSTOR optimizations 0x80000001, 0, edx, 26, pdpe1gb , 1-GB large page support @@ -720,11 +723,11 @@ 0x80000004, 0, edx, 31:0, cpu_brandid_11 , CPU brand ID string, bytes 44 - 47 # Leaf 80000005H -# AMD L1 cache and L1 TLB enumeration +# AMD/Transmeta L1 cache and L1 TLB enumeration -0x80000005, 0, eax, 7:0, l1_itlb_2m_4m_nentries , L1 ITLB #entires, 2M and 4M pages +0x80000005, 0, eax, 7:0, l1_itlb_2m_4m_nentries , L1 ITLB #entries, 2M and 4M pages 0x80000005, 0, eax, 15:8, l1_itlb_2m_4m_assoc , L1 ITLB associativity, 2M and 4M pages -0x80000005, 0, eax, 23:16, l1_dtlb_2m_4m_nentries , L1 DTLB #entires, 2M and 4M pages +0x80000005, 0, eax, 23:16, l1_dtlb_2m_4m_nentries , L1 DTLB #entries, 2M and 4M pages 0x80000005, 0, eax, 31:24, l1_dtlb_2m_4m_assoc , L1 DTLB associativity, 2M and 4M pages 0x80000005, 0, ebx, 7:0, l1_itlb_4k_nentries , L1 ITLB #entries, 4K pages 0x80000005, 0, ebx, 15:8, l1_itlb_4k_assoc , L1 ITLB associativity, 4K pages @@ -763,11 +766,11 @@ # CPU power management (mostly AMD) and AMD RAS enumeration 0x80000007, 0, ebx, 0, overflow_recov , MCA overflow conditions not fatal -0x80000007, 0, ebx, 1, succor , Software containment of UnCORRectable errors +0x80000007, 0, ebx, 1, succor , Software containment of uncorrectable errors 0x80000007, 0, ebx, 2, hw_assert , Hardware assert MSRs 0x80000007, 0, ebx, 3, smca , Scalable MCA (MCAX MSRs) 0x80000007, 0, ecx, 31:0, cpu_pwr_sample_ratio , CPU power sample time ratio -0x80000007, 0, edx, 0, digital_temp , Digital temprature sensor +0x80000007, 0, edx, 0, digital_temp , Digital temperature sensor 0x80000007, 0, edx, 1, powernow_freq_id , PowerNOW! frequency scaling 0x80000007, 0, edx, 2, powernow_volt_id , PowerNOW! voltage scaling 0x80000007, 0, edx, 3, thermal_trip , THERMTRIP (Thermal Trip) @@ -810,7 +813,7 @@ 0x80000008, 0, ebx, 23, amd_ppin , Protected Processor Inventory Number 0x80000008, 0, ebx, 24, amd_ssbd , Speculative Store Bypass Disable 0x80000008, 0, ebx, 25, virt_ssbd , virtualized SSBD (Speculative Store Bypass Disable) -0x80000008, 0, ebx, 26, amd_ssb_no , SSBD not needed (fixed in HW) +0x80000008, 0, ebx, 26, amd_ssb_no , SSBD is not needed (fixed in hardware) 0x80000008, 0, ebx, 27, cppc , Collaborative Processor Performance Control 0x80000008, 0, ebx, 28, amd_psfd , Predictive Store Forward Disable 0x80000008, 0, ebx, 29, btc_no , CPU not affected by Branch Type Confusion @@ -838,7 +841,7 @@ 0x8000000a, 0, edx, 10, pausefilter , Pause intercept filter 0x8000000a, 0, edx, 12, pfthreshold , Pause filter threshold 0x8000000a, 0, edx, 13, avic , Advanced virtual interrupt controller -0x8000000a, 0, edx, 15, v_vmsave_vmload , Virtual VMSAVE/VMLOAD (nested virt) +0x8000000a, 0, edx, 15, v_vmsave_vmload , Virtual VMSAVE/VMLOAD (nested virtualization) 0x8000000a, 0, edx, 16, vgif , Virtualize the Global Interrupt Flag 0x8000000a, 0, edx, 17, gmet , Guest mode execution trap 0x8000000a, 0, edx, 18, x2avic , Virtual x2APIC @@ -850,7 +853,7 @@ 0x8000000a, 0, edx, 25, vnmi , NMI virtualization 0x8000000a, 0, edx, 26, ibs_virt , IBS Virtualization 0x8000000a, 0, edx, 27, ext_lvt_off_chg , Extended LVT offset fault change -0x8000000a, 0, edx, 28, svme_addr_chk , Guest SVME addr check +0x8000000a, 0, edx, 28, svme_addr_chk , Guest SVME address check # Leaf 80000019H # AMD TLB 1G-pages enumeration @@ -891,20 +894,20 @@ # AMD LWP (Lightweight Profiling) 0x8000001c, 0, eax, 0, os_lwp_avail , LWP is available to application programs (supported by OS) -0x8000001c, 0, eax, 1, os_lpwval , LWPVAL instruction (EventId=1) is supported by OS -0x8000001c, 0, eax, 2, os_lwp_ire , Instructions Retired Event (EventId=2) is supported by OS -0x8000001c, 0, eax, 3, os_lwp_bre , Branch Retired Event (EventId=3) is supported by OS -0x8000001c, 0, eax, 4, os_lwp_dme , DCache Miss Event (EventId=4) is supported by OS -0x8000001c, 0, eax, 5, os_lwp_cnh , CPU Clocks Not Halted event (EventId=5) is supported by OS -0x8000001c, 0, eax, 6, os_lwp_rnh , CPU Reference clocks Not Halted event (EventId=6) is supported by OS +0x8000001c, 0, eax, 1, os_lpwval , LWPVAL instruction is supported by OS +0x8000001c, 0, eax, 2, os_lwp_ire , Instructions Retired Event is supported by OS +0x8000001c, 0, eax, 3, os_lwp_bre , Branch Retired Event is supported by OS +0x8000001c, 0, eax, 4, os_lwp_dme , Dcache Miss Event is supported by OS +0x8000001c, 0, eax, 5, os_lwp_cnh , CPU Clocks Not Halted event is supported by OS +0x8000001c, 0, eax, 6, os_lwp_rnh , CPU Reference clocks Not Halted event is supported by OS 0x8000001c, 0, eax, 29, os_lwp_cont , LWP sampling in continuous mode is supported by OS 0x8000001c, 0, eax, 30, os_lwp_ptsc , Performance Time Stamp Counter in event records is supported by OS 0x8000001c, 0, eax, 31, os_lwp_int , Interrupt on threshold overflow is supported by OS 0x8000001c, 0, ebx, 7:0, lwp_lwpcb_sz , LWP Control Block size, in quadwords 0x8000001c, 0, ebx, 15:8, lwp_event_sz , LWP event record size, in bytes -0x8000001c, 0, ebx, 23:16, lwp_max_events , LWP max supported EventId value (EventID 255 not included) +0x8000001c, 0, ebx, 23:16, lwp_max_events , LWP max supported EventID value (EventID 255 not included) 0x8000001c, 0, ebx, 31:24, lwp_event_offset , LWP events area offset in the LWP Control Block -0x8000001c, 0, ecx, 4:0, lwp_latency_max , Num of bits in cache latency counters (10 to 31) +0x8000001c, 0, ecx, 4:0, lwp_latency_max , Number of bits in cache latency counters (10 to 31) 0x8000001c, 0, ecx, 5, lwp_data_adddr , Cache miss events report the data address of the reference 0x8000001c, 0, ecx, 8:6, lwp_latency_rnd , Amount by which cache latency is rounded 0x8000001c, 0, ecx, 15:9, lwp_version , LWP implementation version @@ -913,16 +916,16 @@ 0x8000001c, 0, ecx, 29, lwp_ip_filtering , IP filtering (IPI, IPF, BaseIP, and LimitIP @ LWPCP) supported 0x8000001c, 0, ecx, 30, lwp_cache_levels , Cache-related events can be filtered by cache level 0x8000001c, 0, ecx, 31, lwp_cache_latency , Cache-related events can be filtered by latency -0x8000001c, 0, edx, 0, hw_lwp_avail , LWP is available in Hardware -0x8000001c, 0, edx, 1, hw_lpwval , LWPVAL instruction (EventId=1) is available in HW -0x8000001c, 0, edx, 2, hw_lwp_ire , Instructions Retired Event (EventId=2) is available in HW -0x8000001c, 0, edx, 3, hw_lwp_bre , Branch Retired Event (EventId=3) is available in HW -0x8000001c, 0, edx, 4, hw_lwp_dme , DCache Miss Event (EventId=4) is available in HW -0x8000001c, 0, edx, 5, hw_lwp_cnh , CPU Clocks Not Halted event (EventId=5) is available in HW -0x8000001c, 0, edx, 6, hw_lwp_rnh , CPU Reference clocks Not Halted event (EventId=6) is available in HW -0x8000001c, 0, edx, 29, hw_lwp_cont , LWP sampling in continuous mode is available in HW -0x8000001c, 0, edx, 30, hw_lwp_ptsc , Performance Time Stamp Counter in event records is available in HW -0x8000001c, 0, edx, 31, hw_lwp_int , Interrupt on threshold overflow is available in HW +0x8000001c, 0, edx, 0, hw_lwp_avail , LWP is available in hardware +0x8000001c, 0, edx, 1, hw_lpwval , LWPVAL instruction is available in hardware +0x8000001c, 0, edx, 2, hw_lwp_ire , Instructions Retired Event is available in hardware +0x8000001c, 0, edx, 3, hw_lwp_bre , Branch Retired Event is available in hardware +0x8000001c, 0, edx, 4, hw_lwp_dme , Dcache Miss Event is available in hardware +0x8000001c, 0, edx, 5, hw_lwp_cnh , Clocks Not Halted event is available in hardware +0x8000001c, 0, edx, 6, hw_lwp_rnh , Reference clocks Not Halted event is available in hardware +0x8000001c, 0, edx, 29, hw_lwp_cont , LWP sampling in continuous mode is available in hardware +0x8000001c, 0, edx, 30, hw_lwp_ptsc , Performance Time Stamp Counter in event records is available in hardware +0x8000001c, 0, edx, 31, hw_lwp_int , Interrupt on threshold overflow is available in hardware # Leaf 8000001DH # AMD deterministic cache parameters @@ -958,10 +961,10 @@ 0x8000001f, 0, eax, 4, sev_nested_paging , SEV secure nested paging supported 0x8000001f, 0, eax, 5, vm_permission_levels , VMPL supported 0x8000001f, 0, eax, 6, rpmquery , RPMQUERY instruction supported -0x8000001f, 0, eax, 7, vmpl_sss , VMPL supervisor shadwo stack supported +0x8000001f, 0, eax, 7, vmpl_sss , VMPL supervisor shadow stack supported 0x8000001f, 0, eax, 8, secure_tsc , Secure TSC supported 0x8000001f, 0, eax, 9, v_tsc_aux , Hardware virtualizes TSC_AUX -0x8000001f, 0, eax, 10, sme_coherent , HW enforces cache coherency across encryption domains +0x8000001f, 0, eax, 10, sme_coherent , Cache coherency is enforced across encryption domains 0x8000001f, 0, eax, 11, req_64bit_hypervisor , SEV guest mandates 64-bit hypervisor 0x8000001f, 0, eax, 12, restricted_injection , Restricted Injection supported 0x8000001f, 0, eax, 13, alternate_injection , Alternate Injection supported @@ -973,13 +976,13 @@ 0x8000001f, 0, eax, 19, virt_ibs , IBS state virtualization is supported for SEV-ES guests 0x8000001f, 0, eax, 24, vmsa_reg_protection , VMSA register protection is supported 0x8000001f, 0, eax, 25, smt_protection , SMT protection is supported -0x8000001f, 0, eax, 28, svsm_page_msr , SVSM communication page MSR (0xc001f000h) is supported +0x8000001f, 0, eax, 28, svsm_page_msr , SVSM communication page MSR (0xc001f000) is supported 0x8000001f, 0, eax, 29, nested_virt_snp_msr , VIRT_RMPUPDATE/VIRT_PSMASH MSRs are supported 0x8000001f, 0, ebx, 5:0, pte_cbit_pos , PTE bit number used to enable memory encryption 0x8000001f, 0, ebx, 11:6, phys_addr_reduction_nbits, Reduction of phys address space when encryption is enabled, in bits 0x8000001f, 0, ebx, 15:12, vmpl_count , Number of VM permission levels (VMPL) supported 0x8000001f, 0, ecx, 31:0, enc_guests_max , Max supported number of simultaneous encrypted guests -0x8000001f, 0, edx, 31:0, min_sev_asid_no_sev_es , Mininum ASID for SEV-enabled SEV-ES-disabled guest +0x8000001f, 0, edx, 31:0, min_sev_asid_no_sev_es , Minimum ASID for SEV-enabled SEV-ES-disabled guest # Leaf 80000020H # AMD Platform QoS extended feature IDs @@ -988,6 +991,8 @@ 0x80000020, 0, ebx, 2, smba , Slow Memory Bandwidth Allocation support 0x80000020, 0, ebx, 3, bmec , Bandwidth Monitoring Event Configuration support 0x80000020, 0, ebx, 4, l3rr , L3 Range Reservation support +0x80000020, 0, ebx, 5, abmc , Assignable Bandwidth Monitoring Counters +0x80000020, 0, ebx, 6, sdciae , Smart Data Cache Injection (SDCI) Allocation Enforcement 0x80000020, 1, eax, 31:0, mba_limit_len , MBA enforcement limit size 0x80000020, 1, edx, 31:0, mba_cos_max , MBA max Class of Service number (zero-based) 0x80000020, 2, eax, 31:0, smba_limit_len , SMBA enforcement limit size @@ -1007,17 +1012,26 @@ 0x80000021, 0, eax, 0, no_nested_data_bp , No nested data breakpoints 0x80000021, 0, eax, 1, fsgs_non_serializing , WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing 0x80000021, 0, eax, 2, lfence_rdtsc , LFENCE always serializing / synchronizes RDTSC -0x80000021, 0, eax, 3, smm_page_cfg_lock , SMM paging configuration lock is supported +0x80000021, 0, eax, 3, smm_page_cfg_lock , SMM paging configuration lock 0x80000021, 0, eax, 6, null_sel_clr_base , Null selector clears base -0x80000021, 0, eax, 7, upper_addr_ignore , EFER MSR Upper Address Ignore Enable bit supported -0x80000021, 0, eax, 8, autoibrs , EFER MSR Automatic IBRS enable bit supported -0x80000021, 0, eax, 9, no_smm_ctl_msr , SMM_CTL MSR (0xc0010116) is not present -0x80000021, 0, eax, 10, fsrs_supported , Fast Short Rep Stosb (FSRS) is supported -0x80000021, 0, eax, 11, fsrc_supported , Fast Short Repe Cmpsb (FSRC) is supported -0x80000021, 0, eax, 13, prefetch_ctl_msr , Prefetch control MSR is supported +0x80000021, 0, eax, 7, upper_addr_ignore , EFER MSR Upper Address Ignore +0x80000021, 0, eax, 8, autoibrs , EFER MSR Automatic IBRS +0x80000021, 0, eax, 9, no_smm_ctl_msr , SMM_CTL MSR (0xc0010116) is not available +0x80000021, 0, eax, 10, fsrs , Fast Short Rep STOSB +0x80000021, 0, eax, 11, fsrc , Fast Short Rep CMPSB +0x80000021, 0, eax, 13, prefetch_ctl_msr , Prefetch control MSR is available +0x80000021, 0, eax, 16, opcode_reclaim , Reserves opcode space 0x80000021, 0, eax, 17, user_cpuid_disable , #GP when executing CPUID at CPL > 0 is supported -0x80000021, 0, eax, 18, epsf_supported , Enhanced Predictive Store Forwarding (EPSF) is supported -0x80000021, 0, ebx, 11:0, microcode_patch_size , Size of microcode patch, in 16-byte units +0x80000021, 0, eax, 18, epsf , Enhanced Predictive Store Forwarding +0x80000021, 0, eax, 22, wl_feedback , Workload-based heuristic feedback to OS +0x80000021, 0, eax, 24, eraps , Enhanced Return Address Predictor Security +0x80000021, 0, eax, 27, sbpb , Selective Branch Predictor Barrier +0x80000021, 0, eax, 28, ibpb_brtype , Branch predictions flushed from CPU branch predictor +0x80000021, 0, eax, 29, srso_no , CPU is not subject to the SRSO vulnerability +0x80000021, 0, eax, 30, srso_uk_no , CPU is not vulnerable to SRSO at user-kernel boundary +0x80000021, 0, eax, 31, srso_msr_fix , Software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO +0x80000021, 0, ebx, 15:0, microcode_patch_size , Size of microcode patch, in 16-byte units +0x80000021, 0, ebx, 23:16, rap_size , Return Address Predictor size # Leaf 80000022H # AMD Performance Monitoring v2 enumeration @@ -1025,7 +1039,7 @@ 0x80000022, 0, eax, 0, perfmon_v2 , Performance monitoring v2 supported 0x80000022, 0, eax, 1, lbr_v2 , Last Branch Record v2 extensions (LBR Stack) 0x80000022, 0, eax, 2, lbr_pmc_freeze , Freezing core performance counters / LBR Stack supported -0x80000022, 0, ebx, 3:0, n_pmc_core , Number of core perfomance counters +0x80000022, 0, ebx, 3:0, n_pmc_core , Number of core performance counters 0x80000022, 0, ebx, 9:4, lbr_v2_stack_size , Number of available LBR stack entries 0x80000022, 0, ebx, 15:10, n_pmc_northbridge , Number of available northbridge (data fabric) performance counters 0x80000022, 0, ebx, 21:16, n_pmc_umc , Number of available UMC performance counters @@ -1035,7 +1049,7 @@ # AMD Secure Multi-key Encryption enumeration 0x80000023, 0, eax, 0, mem_hmk_mode , MEM-HMK encryption mode is supported -0x80000023, 0, ebx, 15:0, mem_hmk_avail_keys , MEM-HMK mode: total num of available encryption keys +0x80000023, 0, ebx, 15:0, mem_hmk_avail_keys , MEM-HMK mode: total number of available encryption keys # Leaf 80000026H # AMD extended topology enumeration v2 @@ -1051,3 +1065,108 @@ 0x80000026, 3:0, ecx, 7:0, domain_level , This domain level (subleaf ID) 0x80000026, 3:0, ecx, 15:8, domain_type , This domain type 0x80000026, 3:0, edx, 31:0, x2apic_id , x2APIC ID of current logical CPU + +# Leaf 80860000H +# Maximum Transmeta leaf number + CPU vendor ID string + +0x80860000, 0, eax, 31:0, max_tra_leaf , Maximum supported Transmeta leaf number +0x80860000, 0, ebx, 31:0, cpu_vendorid_0 , Transmeta Vendor ID string bytes 0 - 3 +0x80860000, 0, ecx, 31:0, cpu_vendorid_2 , Transmeta Vendor ID string bytes 8 - 11 +0x80860000, 0, edx, 31:0, cpu_vendorid_1 , Transmeta Vendor ID string bytes 4 - 7 + +# Leaf 80860001H +# Transmeta extended CPU information + +0x80860001, 0, eax, 3:0, stepping , Stepping ID +0x80860001, 0, eax, 7:4, base_model , Base CPU model ID +0x80860001, 0, eax, 11:8, base_family_id , Base CPU family ID +0x80860001, 0, eax, 13:12, cpu_type , CPU type +0x80860001, 0, ebx, 7:0, cpu_rev_mask_minor , CPU revision ID, mask minor +0x80860001, 0, ebx, 15:8, cpu_rev_mask_major , CPU revision ID, mask major +0x80860001, 0, ebx, 23:16, cpu_rev_minor , CPU revision ID, minor +0x80860001, 0, ebx, 31:24, cpu_rev_major , CPU revision ID, major +0x80860001, 0, ecx, 31:0, cpu_base_mhz , CPU nominal frequency, in MHz +0x80860001, 0, edx, 0, recovery , Recovery CMS is active (after bad flush) +0x80860001, 0, edx, 1, longrun , LongRun power management capabilities +0x80860001, 0, edx, 3, lrti , LongRun Table Interface + +# Leaf 80860002H +# Transmeta Code Morphing Software (CMS) enumeration + +0x80860002, 0, eax, 31:0, cpu_rev_id , CPU revision ID +0x80860002, 0, ebx, 7:0, cms_rev_mask_2 , CMS revision ID, mask component 2 +0x80860002, 0, ebx, 15:8, cms_rev_mask_1 , CMS revision ID, mask component 1 +0x80860002, 0, ebx, 23:16, cms_rev_minor , CMS revision ID, minor +0x80860002, 0, ebx, 31:24, cms_rev_major , CMS revision ID, major +0x80860002, 0, ecx, 31:0, cms_rev_mask_3 , CMS revision ID, mask component 3 + +# Leaf 80860003H +# Transmeta CPU information string, bytes 0 - 15 + +0x80860003, 0, eax, 31:0, cpu_info_0 , CPU info string bytes 0 - 3 +0x80860003, 0, ebx, 31:0, cpu_info_1 , CPU info string bytes 4 - 7 +0x80860003, 0, ecx, 31:0, cpu_info_2 , CPU info string bytes 8 - 11 +0x80860003, 0, edx, 31:0, cpu_info_3 , CPU info string bytes 12 - 15 + +# Leaf 80860004H +# Transmeta CPU information string, bytes 16 - 31 + +0x80860004, 0, eax, 31:0, cpu_info_4 , CPU info string bytes 16 - 19 +0x80860004, 0, ebx, 31:0, cpu_info_5 , CPU info string bytes 20 - 23 +0x80860004, 0, ecx, 31:0, cpu_info_6 , CPU info string bytes 24 - 27 +0x80860004, 0, edx, 31:0, cpu_info_7 , CPU info string bytes 28 - 31 + +# Leaf 80860005H +# Transmeta CPU information string, bytes 32 - 47 + +0x80860005, 0, eax, 31:0, cpu_info_8 , CPU info string bytes 32 - 35 +0x80860005, 0, ebx, 31:0, cpu_info_9 , CPU info string bytes 36 - 39 +0x80860005, 0, ecx, 31:0, cpu_info_10 , CPU info string bytes 40 - 43 +0x80860005, 0, edx, 31:0, cpu_info_11 , CPU info string bytes 44 - 47 + +# Leaf 80860006H +# Transmeta CPU information string, bytes 48 - 63 + +0x80860006, 0, eax, 31:0, cpu_info_12 , CPU info string bytes 48 - 51 +0x80860006, 0, ebx, 31:0, cpu_info_13 , CPU info string bytes 52 - 55 +0x80860006, 0, ecx, 31:0, cpu_info_14 , CPU info string bytes 56 - 59 +0x80860006, 0, edx, 31:0, cpu_info_15 , CPU info string bytes 60 - 63 + +# Leaf 80860007H +# Transmeta live CPU information + +0x80860007, 0, eax, 31:0, cpu_cur_mhz , Current CPU frequency, in MHz +0x80860007, 0, ebx, 31:0, cpu_cur_voltage , Current CPU voltage, in millivolts +0x80860007, 0, ecx, 31:0, cpu_cur_perf_pctg , Current CPU performance percentage, 0 - 100 +0x80860007, 0, edx, 31:0, cpu_cur_gate_delay , Current CPU gate delay, in femtoseconds + +# Leaf C0000000H +# Maximum Centaur/Zhaoxin leaf number + +0xc0000000, 0, eax, 31:0, max_cntr_leaf , Maximum Centaur/Zhaoxin leaf number + +# Leaf C0000001H +# Centaur/Zhaoxin extended CPU features + +0xc0000001, 0, edx, 0, ccs_sm2 , CCS SM2 instructions +0xc0000001, 0, edx, 1, ccs_sm2_en , CCS SM2 enabled +0xc0000001, 0, edx, 2, xstore , Random Number Generator +0xc0000001, 0, edx, 3, xstore_en , RNG enabled +0xc0000001, 0, edx, 4, ccs_sm3_sm4 , CCS SM3 and SM4 instructions +0xc0000001, 0, edx, 5, ccs_sm3_sm4_en , CCS SM3/SM4 enabled +0xc0000001, 0, edx, 6, ace , Advanced Cryptography Engine +0xc0000001, 0, edx, 7, ace_en , ACE enabled +0xc0000001, 0, edx, 8, ace2 , Advanced Cryptography Engine v2 +0xc0000001, 0, edx, 9, ace2_en , ACE v2 enabled +0xc0000001, 0, edx, 10, phe , PadLock Hash Engine +0xc0000001, 0, edx, 11, phe_en , PHE enabled +0xc0000001, 0, edx, 12, pmm , PadLock Montgomery Multiplier +0xc0000001, 0, edx, 13, pmm_en , PMM enabled +0xc0000001, 0, edx, 16, parallax , Parallax auto adjust processor voltage +0xc0000001, 0, edx, 17, parallax_en , Parallax enabled +0xc0000001, 0, edx, 20, tm3 , Thermal Monitor v3 +0xc0000001, 0, edx, 21, tm3_en , TM v3 enabled +0xc0000001, 0, edx, 25, phe2 , PadLock Hash Engine v2 (SHA384/SHA512) +0xc0000001, 0, edx, 26, phe2_en , PHE v2 enabled +0xc0000001, 0, edx, 27, rsa , RSA instructions (XMODEXP/MONTMUL2) +0xc0000001, 0, edx, 28, rsa_en , RSA instructions enabled diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c index 1b25c0a95d3f..7dc6b9235d02 100644 --- a/tools/arch/x86/kcpuid/kcpuid.c +++ b/tools/arch/x86/kcpuid/kcpuid.c @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE -#include <stdio.h> +#include <cpuid.h> +#include <err.h> +#include <getopt.h> #include <stdbool.h> +#include <stdio.h> #include <stdlib.h> #include <string.h> -#include <getopt.h> #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define min(a, b) (((a) < (b)) ? (a) : (b)) +#define __noreturn __attribute__((__noreturn__)) typedef unsigned int u32; typedef unsigned long long u64; @@ -49,7 +52,7 @@ static const char * const reg_names[] = { struct subleaf { u32 index; u32 sub; - u32 eax, ebx, ecx, edx; + u32 output[NR_REGS]; struct reg_desc info[NR_REGS]; }; @@ -63,21 +66,64 @@ struct cpuid_func { int nr; }; +enum range_index { + RANGE_STD = 0, /* Standard */ + RANGE_EXT = 0x80000000, /* Extended */ + RANGE_TSM = 0x80860000, /* Transmeta */ + RANGE_CTR = 0xc0000000, /* Centaur/Zhaoxin */ +}; + +#define CPUID_INDEX_MASK 0xffff0000 +#define CPUID_FUNCTION_MASK (~CPUID_INDEX_MASK) + struct cpuid_range { /* array of main leafs */ struct cpuid_func *funcs; /* number of valid leafs */ int nr; - bool is_ext; + enum range_index index; }; -/* - * basic: basic functions range: [0... ] - * ext: extended functions range: [0x80000000... ] - */ -struct cpuid_range *leafs_basic, *leafs_ext; +static struct cpuid_range ranges[] = { + { .index = RANGE_STD, }, + { .index = RANGE_EXT, }, + { .index = RANGE_TSM, }, + { .index = RANGE_CTR, }, +}; + +static char *range_to_str(struct cpuid_range *range) +{ + switch (range->index) { + case RANGE_STD: return "Standard"; + case RANGE_EXT: return "Extended"; + case RANGE_TSM: return "Transmeta"; + case RANGE_CTR: return "Centaur"; + default: return NULL; + } +} + +#define __for_each_cpuid_range(range, __condition) \ + for (unsigned int i = 0; \ + i < ARRAY_SIZE(ranges) && ((range) = &ranges[i]) && (__condition); \ + i++) + +#define for_each_valid_cpuid_range(range) __for_each_cpuid_range(range, (range)->nr != 0) +#define for_each_cpuid_range(range) __for_each_cpuid_range(range, true) + +struct cpuid_range *index_to_cpuid_range(u32 index) +{ + u32 func_idx = index & CPUID_FUNCTION_MASK; + u32 range_idx = index & CPUID_INDEX_MASK; + struct cpuid_range *range; + + for_each_valid_cpuid_range(range) { + if (range->index == range_idx && (u32)range->nr > func_idx) + return range; + } + + return NULL; +} -static bool is_amd; static bool show_details; static bool show_raw; static bool show_flags_only = true; @@ -85,16 +131,16 @@ static u32 user_index = 0xFFFFFFFF; static u32 user_sub = 0xFFFFFFFF; static int flines; -static inline void cpuid(u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) -{ - /* ecx is often an input as well as an output. */ - asm volatile("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); -} +/* + * Force using <cpuid.h> __cpuid_count() instead of __cpuid(). The + * latter leaves ECX uninitialized, which can break CPUID queries. + */ + +#define cpuid(leaf, a, b, c, d) \ + __cpuid_count(leaf, 0, a, b, c, d) + +#define cpuid_count(leaf, subleaf, a, b, c, d) \ + __cpuid_count(leaf, subleaf, a, b, c, d) static inline bool has_subleafs(u32 f) { @@ -117,11 +163,11 @@ static void leaf_print_raw(struct subleaf *leaf) if (leaf->sub == 0) printf("0x%08x: subleafs:\n", leaf->index); - printf(" %2d: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n", - leaf->sub, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx); + printf(" %2d: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n", leaf->sub, + leaf->output[0], leaf->output[1], leaf->output[2], leaf->output[3]); } else { - printf("0x%08x: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n", - leaf->index, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx); + printf("0x%08x: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n", leaf->index, + leaf->output[0], leaf->output[1], leaf->output[2], leaf->output[3]); } } @@ -140,19 +186,19 @@ static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf, * Cut off vendor-prefix from CPUID function as we're using it as an * index into ->funcs. */ - func = &range->funcs[f & 0xffff]; + func = &range->funcs[f & CPUID_FUNCTION_MASK]; if (!func->leafs) { func->leafs = malloc(sizeof(struct subleaf)); if (!func->leafs) - perror("malloc func leaf"); + err(EXIT_FAILURE, NULL); func->nr = 1; } else { s = func->nr; func->leafs = realloc(func->leafs, (s + 1) * sizeof(*leaf)); if (!func->leafs) - perror("realloc f->leafs"); + err(EXIT_FAILURE, NULL); func->nr++; } @@ -161,84 +207,73 @@ static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf, leaf->index = f; leaf->sub = subleaf; - leaf->eax = a; - leaf->ebx = b; - leaf->ecx = c; - leaf->edx = d; + leaf->output[R_EAX] = a; + leaf->output[R_EBX] = b; + leaf->output[R_ECX] = c; + leaf->output[R_EDX] = d; return false; } static void raw_dump_range(struct cpuid_range *range) { - u32 f; - int i; - - printf("%s Leafs :\n", range->is_ext ? "Extended" : "Basic"); + printf("%s Leafs :\n", range_to_str(range)); printf("================\n"); - for (f = 0; (int)f < range->nr; f++) { + for (u32 f = 0; (int)f < range->nr; f++) { struct cpuid_func *func = &range->funcs[f]; - u32 index = f; - - if (range->is_ext) - index += 0x80000000; /* Skip leaf without valid items */ if (!func->nr) continue; /* First item is the main leaf, followed by all subleafs */ - for (i = 0; i < func->nr; i++) + for (int i = 0; i < func->nr; i++) leaf_print_raw(&func->leafs[i]); } } #define MAX_SUBLEAF_NUM 64 -struct cpuid_range *setup_cpuid_range(u32 input_eax) +#define MAX_RANGE_INDEX_OFFSET 0xff +void setup_cpuid_range(struct cpuid_range *range) { - u32 max_func, idx_func, subleaf, max_subleaf; - u32 eax, ebx, ecx, edx, f = input_eax; - struct cpuid_range *range; - bool allzero; - - eax = input_eax; - ebx = ecx = edx = 0; + u32 max_func, range_funcs_sz; + u32 eax, ebx, ecx, edx; - cpuid(&eax, &ebx, &ecx, &edx); - max_func = eax; - idx_func = (max_func & 0xffff) + 1; + cpuid(range->index, max_func, ebx, ecx, edx); - range = malloc(sizeof(struct cpuid_range)); - if (!range) - perror("malloc range"); + /* + * If the CPUID range's maximum function value is garbage, then it + * is not recognized by this CPU. Set the range's number of valid + * leaves to zero so that for_each_valid_cpu_range() can ignore it. + */ + if (max_func < range->index || max_func > (range->index + MAX_RANGE_INDEX_OFFSET)) { + range->nr = 0; + return; + } - if (input_eax & 0x80000000) - range->is_ext = true; - else - range->is_ext = false; + range->nr = (max_func & CPUID_FUNCTION_MASK) + 1; + range_funcs_sz = range->nr * sizeof(struct cpuid_func); - range->funcs = malloc(sizeof(struct cpuid_func) * idx_func); + range->funcs = malloc(range_funcs_sz); if (!range->funcs) - perror("malloc range->funcs"); + err(EXIT_FAILURE, NULL); - range->nr = idx_func; - memset(range->funcs, 0, sizeof(struct cpuid_func) * idx_func); + memset(range->funcs, 0, range_funcs_sz); - for (; f <= max_func; f++) { - eax = f; - subleaf = ecx = 0; + for (u32 f = range->index; f <= max_func; f++) { + u32 max_subleaf = MAX_SUBLEAF_NUM; + bool allzero; - cpuid(&eax, &ebx, &ecx, &edx); - allzero = cpuid_store(range, f, subleaf, eax, ebx, ecx, edx); + cpuid(f, eax, ebx, ecx, edx); + + allzero = cpuid_store(range, f, 0, eax, ebx, ecx, edx); if (allzero) continue; if (!has_subleafs(f)) continue; - max_subleaf = MAX_SUBLEAF_NUM; - /* * Some can provide the exact number of subleafs, * others have to be tried (0xf) @@ -256,20 +291,15 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax) if (f == 0x80000026) max_subleaf = 5; - for (subleaf = 1; subleaf < max_subleaf; subleaf++) { - eax = f; - ecx = subleaf; + for (u32 subleaf = 1; subleaf < max_subleaf; subleaf++) { + cpuid_count(f, subleaf, eax, ebx, ecx, edx); - cpuid(&eax, &ebx, &ecx, &edx); - allzero = cpuid_store(range, f, subleaf, - eax, ebx, ecx, edx); + allzero = cpuid_store(range, f, subleaf, eax, ebx, ecx, edx); if (allzero) continue; } } - - return range; } /* @@ -280,15 +310,13 @@ struct cpuid_range *setup_cpuid_range(u32 input_eax) * 0, 0, EAX, 31:0, max_basic_leafs, Max input value for supported subleafs * 1, 0, ECX, 0, sse3, Streaming SIMD Extensions 3(SSE3) */ -static int parse_line(char *line) +static void parse_line(char *line) { char *str; - int i; struct cpuid_range *range; struct cpuid_func *func; struct subleaf *leaf; u32 index; - u32 sub; char buffer[512]; char *buf; /* @@ -310,12 +338,12 @@ static int parse_line(char *line) /* Skip comments and NULL line */ if (line[0] == '#' || line[0] == '\n') - return 0; + return; strncpy(buffer, line, 511); buffer[511] = 0; str = buffer; - for (i = 0; i < 5; i++) { + for (int i = 0; i < 5; i++) { tokens[i] = strtok(str, ","); if (!tokens[i]) goto err_exit; @@ -328,21 +356,19 @@ static int parse_line(char *line) /* index/main-leaf */ index = strtoull(tokens[0], NULL, 0); - if (index & 0x80000000) - range = leafs_ext; - else - range = leafs_basic; - - index &= 0x7FFFFFFF; - /* Skip line parsing for non-existing indexes */ - if ((int)index >= range->nr) - return -1; + /* + * Skip line parsing if the index is not covered by known-valid + * CPUID ranges on this CPU. + */ + range = index_to_cpuid_range(index); + if (!range) + return; + /* Skip line parsing if the index CPUID output is all zero */ + index &= CPUID_FUNCTION_MASK; func = &range->funcs[index]; - - /* Return if the index has no valid item on this platform */ if (!func->nr) - return 0; + return; /* subleaf */ buf = tokens[1]; @@ -355,11 +381,11 @@ static int parse_line(char *line) subleaf_start = strtoul(start, NULL, 0); subleaf_end = min(subleaf_end, (u32)(func->nr - 1)); if (subleaf_start > subleaf_end) - return 0; + return; } else { subleaf_start = subleaf_end; if (subleaf_start > (u32)(func->nr - 1)) - return 0; + return; } /* register */ @@ -382,7 +408,7 @@ static int parse_line(char *line) bit_end = strtoul(end, NULL, 0); bit_start = (start) ? strtoul(start, NULL, 0) : bit_end; - for (sub = subleaf_start; sub <= subleaf_end; sub++) { + for (u32 sub = subleaf_start; sub <= subleaf_end; sub++) { leaf = &func->leafs[sub]; reg = &leaf->info[reg_index]; bdesc = ®->descs[reg->nr++]; @@ -392,12 +418,11 @@ static int parse_line(char *line) strcpy(bdesc->simp, strtok(tokens[4], " \t")); strcpy(bdesc->detail, tokens[5]); } - return 0; + return; err_exit: - printf("Warning: wrong line format:\n"); - printf("\tline[%d]: %s\n", flines, line); - return -1; + warnx("Wrong line format:\n" + "\tline[%d]: %s", flines, line); } /* Parse csv file, and construct the array of all leafs and subleafs */ @@ -418,10 +443,8 @@ static void parse_text(void) file = fopen("./cpuid.csv", "r"); } - if (!file) { - printf("Fail to open '%s'\n", filename); - return; - } + if (!file) + err(EXIT_FAILURE, "%s", filename); while (1) { ret = getline(&line, &len, file); @@ -436,21 +459,13 @@ static void parse_text(void) fclose(file); } - -/* Decode every eax/ebx/ecx/edx */ -static void decode_bits(u32 value, struct reg_desc *rdesc, enum cpuid_reg reg) +static void show_reg(const struct reg_desc *rdesc, u32 value) { - struct bits_desc *bdesc; - int start, end, i; + const struct bits_desc *bdesc; + int start, end; u32 mask; - if (!rdesc->nr) { - if (show_details) - printf("\t %s: 0x%08x\n", reg_names[reg], value); - return; - } - - for (i = 0; i < rdesc->nr; i++) { + for (int i = 0; i < rdesc->nr; i++) { bdesc = &rdesc->descs[i]; start = bdesc->start; @@ -480,23 +495,21 @@ static void decode_bits(u32 value, struct reg_desc *rdesc, enum cpuid_reg reg) } } -static void show_leaf(struct subleaf *leaf) +static void show_reg_header(bool has_entries, u32 leaf, u32 subleaf, const char *reg_name) { - if (!leaf) - return; + if (show_details && has_entries) + printf("CPUID_0x%x_%s[0x%x]:\n", leaf, reg_name, subleaf); +} - if (show_raw) { +static void show_leaf(struct subleaf *leaf) +{ + if (show_raw) leaf_print_raw(leaf); - } else { - if (show_details) - printf("CPUID_0x%x_ECX[0x%x]:\n", - leaf->index, leaf->sub); - } - decode_bits(leaf->eax, &leaf->info[R_EAX], R_EAX); - decode_bits(leaf->ebx, &leaf->info[R_EBX], R_EBX); - decode_bits(leaf->ecx, &leaf->info[R_ECX], R_ECX); - decode_bits(leaf->edx, &leaf->info[R_EDX], R_EDX); + for (int i = R_EAX; i < NR_REGS; i++) { + show_reg_header((leaf->info[i].nr > 0), leaf->index, leaf->sub, reg_names[i]); + show_reg(&leaf->info[i], leaf->output[i]); + } if (!show_raw && show_details) printf("\n"); @@ -504,46 +517,37 @@ static void show_leaf(struct subleaf *leaf) static void show_func(struct cpuid_func *func) { - int i; - - if (!func) - return; - - for (i = 0; i < func->nr; i++) + for (int i = 0; i < func->nr; i++) show_leaf(&func->leafs[i]); } static void show_range(struct cpuid_range *range) { - int i; - - for (i = 0; i < range->nr; i++) + for (int i = 0; i < range->nr; i++) show_func(&range->funcs[i]); } static inline struct cpuid_func *index_to_func(u32 index) { + u32 func_idx = index & CPUID_FUNCTION_MASK; struct cpuid_range *range; - u32 func_idx; - - range = (index & 0x80000000) ? leafs_ext : leafs_basic; - func_idx = index & 0xffff; - if ((func_idx + 1) > (u32)range->nr) { - printf("ERR: invalid input index (0x%x)\n", index); + range = index_to_cpuid_range(index); + if (!range) return NULL; - } + return &range->funcs[func_idx]; } static void show_info(void) { + struct cpuid_range *range; struct cpuid_func *func; if (show_raw) { /* Show all of the raw output of 'cpuid' instr */ - raw_dump_range(leafs_basic); - raw_dump_range(leafs_ext); + for_each_valid_cpuid_range(range) + raw_dump_range(range); return; } @@ -551,18 +555,19 @@ static void show_info(void) /* Only show specific leaf/subleaf info */ func = index_to_func(user_index); if (!func) - return; + errx(EXIT_FAILURE, "Invalid input leaf (0x%x)", user_index); /* Dump the raw data also */ show_raw = true; if (user_sub != 0xFFFFFFFF) { - if (user_sub + 1 <= (u32)func->nr) { - show_leaf(&func->leafs[user_sub]); - return; + if (user_sub + 1 > (u32)func->nr) { + errx(EXIT_FAILURE, "Leaf 0x%x has no valid subleaf = 0x%x", + user_index, user_sub); } - printf("ERR: invalid input subleaf (0x%x)\n", user_sub); + show_leaf(&func->leafs[user_sub]); + return; } show_func(func); @@ -570,38 +575,21 @@ static void show_info(void) } printf("CPU features:\n=============\n\n"); - show_range(leafs_basic); - show_range(leafs_ext); + for_each_valid_cpuid_range(range) + show_range(range); } -static void setup_platform_cpuid(void) +static void __noreturn usage(int exit_code) { - u32 eax, ebx, ecx, edx; - - /* Check vendor */ - eax = ebx = ecx = edx = 0; - cpuid(&eax, &ebx, &ecx, &edx); - - /* "htuA" */ - if (ebx == 0x68747541) - is_amd = true; - - /* Setup leafs for the basic and extended range */ - leafs_basic = setup_cpuid_range(0x0); - leafs_ext = setup_cpuid_range(0x80000000); -} - -static void usage(void) -{ - printf("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n" - "\t-a|--all Show both bit flags and complex bit fields info\n" - "\t-b|--bitflags Show boolean flags only\n" - "\t-d|--detail Show details of the flag/fields (default)\n" - "\t-f|--flags Specify the cpuid csv file\n" - "\t-h|--help Show usage info\n" - "\t-l|--leaf=index Specify the leaf you want to check\n" - "\t-r|--raw Show raw cpuid data\n" - "\t-s|--subleaf=sub Specify the subleaf you want to check\n" + errx(exit_code, "kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n" + "\t-a|--all Show both bit flags and complex bit fields info\n" + "\t-b|--bitflags Show boolean flags only\n" + "\t-d|--detail Show details of the flag/fields (default)\n" + "\t-f|--flags Specify the CPUID CSV file\n" + "\t-h|--help Show usage info\n" + "\t-l|--leaf=index Specify the leaf you want to check\n" + "\t-r|--raw Show raw CPUID data\n" + "\t-s|--subleaf=sub Specify the subleaf you want to check" ); } @@ -617,7 +605,7 @@ static struct option opts[] = { { NULL, 0, NULL, 0 } }; -static int parse_options(int argc, char *argv[]) +static void parse_options(int argc, char *argv[]) { int c; @@ -637,9 +625,7 @@ static int parse_options(int argc, char *argv[]) user_csv = optarg; break; case 'h': - usage(); - exit(1); - break; + usage(EXIT_SUCCESS); case 'l': /* main leaf */ user_index = strtoul(optarg, NULL, 0); @@ -652,11 +638,8 @@ static int parse_options(int argc, char *argv[]) user_sub = strtoul(optarg, NULL, 0); break; default: - printf("%s: Invalid option '%c'\n", argv[0], optopt); - return -1; - } - - return 0; + usage(EXIT_FAILURE); + } } /* @@ -669,11 +652,13 @@ static int parse_options(int argc, char *argv[]) */ int main(int argc, char *argv[]) { - if (parse_options(argc, argv)) - return -1; + struct cpuid_range *range; + + parse_options(argc, argv); /* Setup the cpuid leafs of current platform */ - setup_platform_cpuid(); + for_each_cpuid_range(range) + setup_cpuid_range(range); /* Read and parse the 'cpuid.csv' */ parse_text(); diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c index e91d4c4e1c16..bce69c6bfa69 100644 --- a/tools/arch/x86/lib/insn.c +++ b/tools/arch/x86/lib/insn.c @@ -324,6 +324,11 @@ int insn_get_opcode(struct insn *insn) } insn->attr = inat_get_opcode_attribute(op); + if (insn->x86_64 && inat_is_invalid64(insn->attr)) { + /* This instruction is invalid, like UD2. Stop decoding. */ + insn->attr &= INAT_INV64; + } + while (inat_is_escape(insn->attr)) { /* Get escaped opcode */ op = get_next(insn_byte_t, insn); @@ -337,6 +342,7 @@ int insn_get_opcode(struct insn *insn) insn->attr = 0; return -EINVAL; } + end: opcode->got = 1; return 0; @@ -658,7 +664,6 @@ int insn_get_immediate(struct insn *insn) } if (!inat_has_immediate(insn->attr)) - /* no immediates */ goto done; switch (inat_immediate_size(insn->attr)) { diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt index f5dd84eb55dc..262f7ca1fb95 100644 --- a/tools/arch/x86/lib/x86-opcode-map.txt +++ b/tools/arch/x86/lib/x86-opcode-map.txt @@ -35,7 +35,7 @@ # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) # - (66&F2): Both 0x66 and 0xF2 prefixes are specified. # -# REX2 Prefix +# REX2 Prefix Superscripts # - (!REX2): REX2 is not allowed # - (REX2): REX2 variant e.g. JMPABS @@ -147,7 +147,7 @@ AVXcode: # 0x60 - 0x6f 60: PUSHA/PUSHAD (i64) 61: POPA/POPAD (i64) -62: BOUND Gv,Ma (i64) | EVEX (Prefix) +62: BOUND Gv,Ma (i64) | EVEX (Prefix),(o64) 63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64) 64: SEG=FS (Prefix) 65: SEG=GS (Prefix) @@ -253,8 +253,8 @@ c0: Grp2 Eb,Ib (1A) c1: Grp2 Ev,Ib (1A) c2: RETN Iw (f64) c3: RETN -c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) -c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) +c4: LES Gz,Mp (i64) | VEX+2byte (Prefix),(o64) +c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix),(o64) c6: Grp11A Eb,Ib (1A) c7: Grp11B Ev,Iz (1A) c8: ENTER Iw,Ib @@ -286,10 +286,10 @@ df: ESC # Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix # in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation # to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD. -e0: LOOPNE/LOOPNZ Jb (f64) (!REX2) -e1: LOOPE/LOOPZ Jb (f64) (!REX2) -e2: LOOP Jb (f64) (!REX2) -e3: JrCXZ Jb (f64) (!REX2) +e0: LOOPNE/LOOPNZ Jb (f64),(!REX2) +e1: LOOPE/LOOPZ Jb (f64),(!REX2) +e2: LOOP Jb (f64),(!REX2) +e3: JrCXZ Jb (f64),(!REX2) e4: IN AL,Ib (!REX2) e5: IN eAX,Ib (!REX2) e6: OUT Ib,AL (!REX2) @@ -298,10 +298,10 @@ e7: OUT Ib,eAX (!REX2) # in "near" jumps and calls is 16-bit. For CALL, # push of return address is 16-bit wide, RSP is decremented by 2 # but is not truncated to 16 bits, unlike RIP. -e8: CALL Jz (f64) (!REX2) -e9: JMP-near Jz (f64) (!REX2) -ea: JMP-far Ap (i64) (!REX2) -eb: JMP-short Jb (f64) (!REX2) +e8: CALL Jz (f64),(!REX2) +e9: JMP-near Jz (f64),(!REX2) +ea: JMP-far Ap (i64),(!REX2) +eb: JMP-short Jb (f64),(!REX2) ec: IN AL,DX (!REX2) ed: IN eAX,DX (!REX2) ee: OUT DX,AL (!REX2) @@ -478,22 +478,22 @@ AVXcode: 1 7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev) # 0x0f 0x80-0x8f # Note: "forced64" is Intel CPU behavior (see comment about CALL insn). -80: JO Jz (f64) (!REX2) -81: JNO Jz (f64) (!REX2) -82: JB/JC/JNAE Jz (f64) (!REX2) -83: JAE/JNB/JNC Jz (f64) (!REX2) -84: JE/JZ Jz (f64) (!REX2) -85: JNE/JNZ Jz (f64) (!REX2) -86: JBE/JNA Jz (f64) (!REX2) -87: JA/JNBE Jz (f64) (!REX2) -88: JS Jz (f64) (!REX2) -89: JNS Jz (f64) (!REX2) -8a: JP/JPE Jz (f64) (!REX2) -8b: JNP/JPO Jz (f64) (!REX2) -8c: JL/JNGE Jz (f64) (!REX2) -8d: JNL/JGE Jz (f64) (!REX2) -8e: JLE/JNG Jz (f64) (!REX2) -8f: JNLE/JG Jz (f64) (!REX2) +80: JO Jz (f64),(!REX2) +81: JNO Jz (f64),(!REX2) +82: JB/JC/JNAE Jz (f64),(!REX2) +83: JAE/JNB/JNC Jz (f64),(!REX2) +84: JE/JZ Jz (f64),(!REX2) +85: JNE/JNZ Jz (f64),(!REX2) +86: JBE/JNA Jz (f64),(!REX2) +87: JA/JNBE Jz (f64),(!REX2) +88: JS Jz (f64),(!REX2) +89: JNS Jz (f64),(!REX2) +8a: JP/JPE Jz (f64),(!REX2) +8b: JNP/JPO Jz (f64),(!REX2) +8c: JL/JNGE Jz (f64),(!REX2) +8d: JNL/JGE Jz (f64),(!REX2) +8e: JLE/JNG Jz (f64),(!REX2) +8f: JNLE/JG Jz (f64),(!REX2) # 0x0f 0x90-0x9f 90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66) 91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66) diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk index 5770c8097f32..2c19d7fc8a85 100644 --- a/tools/arch/x86/tools/gen-insn-attr-x86.awk +++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk @@ -64,6 +64,8 @@ BEGIN { modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])" force64_expr = "\\([df]64\\)" + invalid64_expr = "\\(i64\\)" + only64_expr = "\\(o64\\)" rex_expr = "^((REX(\\.[XRWB]+)+)|(REX$))" rex2_expr = "\\(REX2\\)" no_rex2_expr = "\\(!REX2\\)" @@ -319,6 +321,11 @@ function convert_operands(count,opnd, i,j,imm,mod) if (match(ext, force64_expr)) flags = add_flags(flags, "INAT_FORCE64") + # check invalid in 64-bit (and no only64) + if (match(ext, invalid64_expr) && + !match($0, only64_expr)) + flags = add_flags(flags, "INAT_INV64") + # check REX2 not allowed if (match(ext, no_rex2_expr)) flags = add_flags(flags, "INAT_NO_REX2") diff --git a/tools/include/uapi/linux/fanotify.h b/tools/include/uapi/linux/fanotify.h new file mode 100644 index 000000000000..e710967c7c26 --- /dev/null +++ b/tools/include/uapi/linux/fanotify.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_FANOTIFY_H +#define _UAPI_LINUX_FANOTIFY_H + +#include <linux/types.h> + +/* the following events that user-space can register for */ +#define FAN_ACCESS 0x00000001 /* File was accessed */ +#define FAN_MODIFY 0x00000002 /* File was modified */ +#define FAN_ATTRIB 0x00000004 /* Metadata changed */ +#define FAN_CLOSE_WRITE 0x00000008 /* Writable file closed */ +#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */ +#define FAN_OPEN 0x00000020 /* File was opened */ +#define FAN_MOVED_FROM 0x00000040 /* File was moved from X */ +#define FAN_MOVED_TO 0x00000080 /* File was moved to Y */ +#define FAN_CREATE 0x00000100 /* Subfile was created */ +#define FAN_DELETE 0x00000200 /* Subfile was deleted */ +#define FAN_DELETE_SELF 0x00000400 /* Self was deleted */ +#define FAN_MOVE_SELF 0x00000800 /* Self was moved */ +#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */ + +#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ +#define FAN_FS_ERROR 0x00008000 /* Filesystem error */ + +#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ +#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ +#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */ +/* #define FAN_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */ + +#define FAN_PRE_ACCESS 0x00100000 /* Pre-content access hook */ +#define FAN_MNT_ATTACH 0x01000000 /* Mount was attached */ +#define FAN_MNT_DETACH 0x02000000 /* Mount was detached */ + +#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */ + +#define FAN_RENAME 0x10000000 /* File was renamed */ + +#define FAN_ONDIR 0x40000000 /* Event occurred against dir */ + +/* helper events */ +#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */ +#define FAN_MOVE (FAN_MOVED_FROM | FAN_MOVED_TO) /* moves */ + +/* flags used for fanotify_init() */ +#define FAN_CLOEXEC 0x00000001 +#define FAN_NONBLOCK 0x00000002 + +/* These are NOT bitwise flags. Both bits are used together. */ +#define FAN_CLASS_NOTIF 0x00000000 +#define FAN_CLASS_CONTENT 0x00000004 +#define FAN_CLASS_PRE_CONTENT 0x00000008 + +/* Deprecated - do not use this in programs and do not add new flags here! */ +#define FAN_ALL_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ + FAN_CLASS_PRE_CONTENT) + +#define FAN_UNLIMITED_QUEUE 0x00000010 +#define FAN_UNLIMITED_MARKS 0x00000020 +#define FAN_ENABLE_AUDIT 0x00000040 + +/* Flags to determine fanotify event format */ +#define FAN_REPORT_PIDFD 0x00000080 /* Report pidfd for event->pid */ +#define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */ +#define FAN_REPORT_FID 0x00000200 /* Report unique file id */ +#define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */ +#define FAN_REPORT_NAME 0x00000800 /* Report events with name */ +#define FAN_REPORT_TARGET_FID 0x00001000 /* Report dirent target id */ +#define FAN_REPORT_FD_ERROR 0x00002000 /* event->fd can report error */ +#define FAN_REPORT_MNT 0x00004000 /* Report mount events */ + +/* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */ +#define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME) +/* Convenience macro - FAN_REPORT_TARGET_FID requires all other FID flags */ +#define FAN_REPORT_DFID_NAME_TARGET (FAN_REPORT_DFID_NAME | \ + FAN_REPORT_FID | FAN_REPORT_TARGET_FID) + +/* Deprecated - do not use this in programs and do not add new flags here! */ +#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \ + FAN_ALL_CLASS_BITS | FAN_UNLIMITED_QUEUE |\ + FAN_UNLIMITED_MARKS) + +/* flags used for fanotify_modify_mark() */ +#define FAN_MARK_ADD 0x00000001 +#define FAN_MARK_REMOVE 0x00000002 +#define FAN_MARK_DONT_FOLLOW 0x00000004 +#define FAN_MARK_ONLYDIR 0x00000008 +/* FAN_MARK_MOUNT is 0x00000010 */ +#define FAN_MARK_IGNORED_MASK 0x00000020 +#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040 +#define FAN_MARK_FLUSH 0x00000080 +/* FAN_MARK_FILESYSTEM is 0x00000100 */ +#define FAN_MARK_EVICTABLE 0x00000200 +/* This bit is mutually exclusive with FAN_MARK_IGNORED_MASK bit */ +#define FAN_MARK_IGNORE 0x00000400 + +/* These are NOT bitwise flags. Both bits can be used togther. */ +#define FAN_MARK_INODE 0x00000000 +#define FAN_MARK_MOUNT 0x00000010 +#define FAN_MARK_FILESYSTEM 0x00000100 +#define FAN_MARK_MNTNS 0x00000110 + +/* + * Convenience macro - FAN_MARK_IGNORE requires FAN_MARK_IGNORED_SURV_MODIFY + * for non-inode mark types. + */ +#define FAN_MARK_IGNORE_SURV (FAN_MARK_IGNORE | FAN_MARK_IGNORED_SURV_MODIFY) + +/* Deprecated - do not use this in programs and do not add new flags here! */ +#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\ + FAN_MARK_REMOVE |\ + FAN_MARK_DONT_FOLLOW |\ + FAN_MARK_ONLYDIR |\ + FAN_MARK_MOUNT |\ + FAN_MARK_IGNORED_MASK |\ + FAN_MARK_IGNORED_SURV_MODIFY |\ + FAN_MARK_FLUSH) + +/* Deprecated - do not use this in programs and do not add new flags here! */ +#define FAN_ALL_EVENTS (FAN_ACCESS |\ + FAN_MODIFY |\ + FAN_CLOSE |\ + FAN_OPEN) + +/* + * All events which require a permission response from userspace + */ +/* Deprecated - do not use this in programs and do not add new flags here! */ +#define FAN_ALL_PERM_EVENTS (FAN_OPEN_PERM |\ + FAN_ACCESS_PERM) + +/* Deprecated - do not use this in programs and do not add new flags here! */ +#define FAN_ALL_OUTGOING_EVENTS (FAN_ALL_EVENTS |\ + FAN_ALL_PERM_EVENTS |\ + FAN_Q_OVERFLOW) + +#define FANOTIFY_METADATA_VERSION 3 + +struct fanotify_event_metadata { + __u32 event_len; + __u8 vers; + __u8 reserved; + __u16 metadata_len; + __aligned_u64 mask; + __s32 fd; + __s32 pid; +}; + +#define FAN_EVENT_INFO_TYPE_FID 1 +#define FAN_EVENT_INFO_TYPE_DFID_NAME 2 +#define FAN_EVENT_INFO_TYPE_DFID 3 +#define FAN_EVENT_INFO_TYPE_PIDFD 4 +#define FAN_EVENT_INFO_TYPE_ERROR 5 +#define FAN_EVENT_INFO_TYPE_RANGE 6 +#define FAN_EVENT_INFO_TYPE_MNT 7 + +/* Special info types for FAN_RENAME */ +#define FAN_EVENT_INFO_TYPE_OLD_DFID_NAME 10 +/* Reserved for FAN_EVENT_INFO_TYPE_OLD_DFID 11 */ +#define FAN_EVENT_INFO_TYPE_NEW_DFID_NAME 12 +/* Reserved for FAN_EVENT_INFO_TYPE_NEW_DFID 13 */ + +/* Variable length info record following event metadata */ +struct fanotify_event_info_header { + __u8 info_type; + __u8 pad; + __u16 len; +}; + +/* + * Unique file identifier info record. + * This structure is used for records of types FAN_EVENT_INFO_TYPE_FID, + * FAN_EVENT_INFO_TYPE_DFID and FAN_EVENT_INFO_TYPE_DFID_NAME. + * For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null terminated + * name immediately after the file handle. + */ +struct fanotify_event_info_fid { + struct fanotify_event_info_header hdr; + __kernel_fsid_t fsid; + /* + * Following is an opaque struct file_handle that can be passed as + * an argument to open_by_handle_at(2). + */ + unsigned char handle[]; +}; + +/* + * This structure is used for info records of type FAN_EVENT_INFO_TYPE_PIDFD. + * It holds a pidfd for the pid that was responsible for generating an event. + */ +struct fanotify_event_info_pidfd { + struct fanotify_event_info_header hdr; + __s32 pidfd; +}; + +struct fanotify_event_info_error { + struct fanotify_event_info_header hdr; + __s32 error; + __u32 error_count; +}; + +struct fanotify_event_info_range { + struct fanotify_event_info_header hdr; + __u32 pad; + __u64 offset; + __u64 count; +}; + +struct fanotify_event_info_mnt { + struct fanotify_event_info_header hdr; + __u64 mnt_id; +}; + +/* + * User space may need to record additional information about its decision. + * The extra information type records what kind of information is included. + * The default is none. We also define an extra information buffer whose + * size is determined by the extra information type. + * + * If the information type is Audit Rule, then the information following + * is the rule number that triggered the user space decision that + * requires auditing. + */ + +#define FAN_RESPONSE_INFO_NONE 0 +#define FAN_RESPONSE_INFO_AUDIT_RULE 1 + +struct fanotify_response { + __s32 fd; + __u32 response; +}; + +struct fanotify_response_info_header { + __u8 type; + __u8 pad; + __u16 len; +}; + +struct fanotify_response_info_audit_rule { + struct fanotify_response_info_header hdr; + __u32 rule_number; + __u32 subj_trust; + __u32 obj_trust; +}; + +/* Legit userspace responses to a _PERM event */ +#define FAN_ALLOW 0x01 +#define FAN_DENY 0x02 +/* errno other than EPERM can specified in upper byte of deny response */ +#define FAN_ERRNO_BITS 8 +#define FAN_ERRNO_SHIFT (32 - FAN_ERRNO_BITS) +#define FAN_ERRNO_MASK ((1 << FAN_ERRNO_BITS) - 1) +#define FAN_DENY_ERRNO(err) \ + (FAN_DENY | ((((__u32)(err)) & FAN_ERRNO_MASK) << FAN_ERRNO_SHIFT)) + +#define FAN_AUDIT 0x10 /* Bitmask to create audit record for result */ +#define FAN_INFO 0x20 /* Bitmask to indicate additional information */ + +/* No fd set in event */ +#define FAN_NOFD -1 +#define FAN_NOPIDFD FAN_NOFD +#define FAN_EPIDFD -2 + +/* Helper functions to deal with fanotify_event_metadata buffers */ +#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata)) + +#define FAN_EVENT_NEXT(meta, len) ((len) -= (meta)->event_len, \ + (struct fanotify_event_metadata*)(((char *)(meta)) + \ + (meta)->event_len)) + +#define FAN_EVENT_OK(meta, len) ((long)(len) >= (long)FAN_EVENT_METADATA_LEN && \ + (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \ + (long)(meta)->event_len <= (long)(len)) + +#endif /* _UAPI_LINUX_FANOTIFY_H */ diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h new file mode 100644 index 000000000000..7fa67c2031a5 --- /dev/null +++ b/tools/include/uapi/linux/mount.h @@ -0,0 +1,235 @@ +#ifndef _UAPI_LINUX_MOUNT_H +#define _UAPI_LINUX_MOUNT_H + +#include <linux/types.h> + +/* + * These are the fs-independent mount-flags: up to 32 flags are supported + * + * Usage of these is restricted within the kernel to core mount(2) code and + * callers of sys_mount() only. Filesystems should be using the SB_* + * equivalent instead. + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define MS_NODIRATIME 2048 /* Do not update directory access times */ +#define MS_BIND 4096 +#define MS_MOVE 8192 +#define MS_REC 16384 +#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ +#define MS_SILENT 32768 +#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define MS_UNBINDABLE (1<<17) /* change to unbindable */ +#define MS_PRIVATE (1<<18) /* change to private */ +#define MS_SLAVE (1<<19) /* change to slave */ +#define MS_SHARED (1<<20) /* change to shared */ +#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ +#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) +#define MS_NOREMOTELOCK (1<<27) +#define MS_NOSEC (1<<28) +#define MS_BORN (1<<29) +#define MS_ACTIVE (1<<30) +#define MS_NOUSER (1<<31) + +/* + * Superblock flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ + MS_LAZYTIME) + +/* + * Old magic mount flag and mask + */ +#define MS_MGC_VAL 0xC0ED0000 +#define MS_MGC_MSK 0xffff0000 + +/* + * open_tree() flags. + */ +#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */ +#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */ + +/* + * move_mount() flags. + */ +#define MOVE_MOUNT_F_SYMLINKS 0x00000001 /* Follow symlinks on from path */ +#define MOVE_MOUNT_F_AUTOMOUNTS 0x00000002 /* Follow automounts on from path */ +#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */ +#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */ +#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */ +#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */ +#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */ +#define MOVE_MOUNT_BENEATH 0x00000200 /* Mount beneath top mount */ +#define MOVE_MOUNT__MASK 0x00000377 + +/* + * fsopen() flags. + */ +#define FSOPEN_CLOEXEC 0x00000001 + +/* + * fspick() flags. + */ +#define FSPICK_CLOEXEC 0x00000001 +#define FSPICK_SYMLINK_NOFOLLOW 0x00000002 +#define FSPICK_NO_AUTOMOUNT 0x00000004 +#define FSPICK_EMPTY_PATH 0x00000008 + +/* + * The type of fsconfig() call made. + */ +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, /* Set parameter, supplying no value */ + FSCONFIG_SET_STRING = 1, /* Set parameter, supplying a string value */ + FSCONFIG_SET_BINARY = 2, /* Set parameter, supplying a binary blob value */ + FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */ + FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */ + FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */ + FSCONFIG_CMD_CREATE = 6, /* Create new or reuse existing superblock */ + FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */ + FSCONFIG_CMD_CREATE_EXCL = 8, /* Create new superblock, fail if reusing existing superblock */ +}; + +/* + * fsmount() flags. + */ +#define FSMOUNT_CLOEXEC 0x00000001 + +/* + * Mount attributes. + */ +#define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */ +#define MOUNT_ATTR_NOSUID 0x00000002 /* Ignore suid and sgid bits */ +#define MOUNT_ATTR_NODEV 0x00000004 /* Disallow access to device special files */ +#define MOUNT_ATTR_NOEXEC 0x00000008 /* Disallow program execution */ +#define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */ +#define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */ +#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */ +#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */ +#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */ +#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */ +#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */ + +/* + * mount_setattr() + */ +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +/* List of all mount_attr versions. */ +#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */ + + +/* + * Structure for getting mount/superblock/filesystem info with statmount(2). + * + * The interface is similar to statx(2): individual fields or groups can be + * selected with the @mask argument of statmount(). Kernel will set the @mask + * field according to the supported fields. + * + * If string fields are selected, then the caller needs to pass a buffer that + * has space after the fixed part of the structure. Nul terminated strings are + * copied there and offsets relative to @str are stored in the relevant fields. + * If the buffer is too small, then EOVERFLOW is returned. The actually used + * size is returned in @size. + */ +struct statmount { + __u32 size; /* Total size, including strings */ + __u32 mnt_opts; /* [str] Options (comma separated, escaped) */ + __u64 mask; /* What results were written */ + __u32 sb_dev_major; /* Device ID */ + __u32 sb_dev_minor; + __u64 sb_magic; /* ..._SUPER_MAGIC */ + __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */ + __u32 fs_type; /* [str] Filesystem type */ + __u64 mnt_id; /* Unique ID of mount */ + __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */ + __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */ + __u32 mnt_parent_id_old; + __u64 mnt_attr; /* MOUNT_ATTR_... */ + __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */ + __u64 mnt_peer_group; /* ID of shared peer group */ + __u64 mnt_master; /* Mount receives propagation from this ID */ + __u64 propagate_from; /* Propagation from in current namespace */ + __u32 mnt_root; /* [str] Root of mount relative to root of fs */ + __u32 mnt_point; /* [str] Mountpoint relative to current root */ + __u64 mnt_ns_id; /* ID of the mount namespace */ + __u32 fs_subtype; /* [str] Subtype of fs_type (if any) */ + __u32 sb_source; /* [str] Source string of the mount */ + __u32 opt_num; /* Number of fs options */ + __u32 opt_array; /* [str] Array of nul terminated fs options */ + __u32 opt_sec_num; /* Number of security options */ + __u32 opt_sec_array; /* [str] Array of nul terminated security options */ + __u64 supported_mask; /* Mask flags that this kernel supports */ + __u32 mnt_uidmap_num; /* Number of uid mappings */ + __u32 mnt_uidmap; /* [str] Array of uid mappings (as seen from callers namespace) */ + __u32 mnt_gidmap_num; /* Number of gid mappings */ + __u32 mnt_gidmap; /* [str] Array of gid mappings (as seen from callers namespace) */ + __u64 __spare2[43]; + char str[]; /* Variable size part containing strings */ +}; + +/* + * Structure for passing mount ID and miscellaneous parameters to statmount(2) + * and listmount(2). + * + * For statmount(2) @param represents the request mask. + * For listmount(2) @param represents the last listed mount id (or zero). + */ +struct mnt_id_req { + __u32 size; + __u32 spare; + __u64 mnt_id; + __u64 param; + __u64 mnt_ns_id; +}; + +/* List of all mnt_id_req versions. */ +#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */ +#define MNT_ID_REQ_SIZE_VER1 32 /* sizeof second published struct */ + +/* + * @mask bits for statmount(2) + */ +#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */ +#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */ +#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */ +#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */ +#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */ +#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */ +#define STATMOUNT_MNT_NS_ID 0x00000040U /* Want/got mnt_ns_id */ +#define STATMOUNT_MNT_OPTS 0x00000080U /* Want/got mnt_opts */ +#define STATMOUNT_FS_SUBTYPE 0x00000100U /* Want/got fs_subtype */ +#define STATMOUNT_SB_SOURCE 0x00000200U /* Want/got sb_source */ +#define STATMOUNT_OPT_ARRAY 0x00000400U /* Want/got opt_... */ +#define STATMOUNT_OPT_SEC_ARRAY 0x00000800U /* Want/got opt_sec... */ +#define STATMOUNT_SUPPORTED_MASK 0x00001000U /* Want/got supported mask flags */ +#define STATMOUNT_MNT_UIDMAP 0x00002000U /* Want/got uidmap... */ +#define STATMOUNT_MNT_GIDMAP 0x00004000U /* Want/got gidmap... */ + +/* + * Special @mnt_id values that can be passed to listmount + */ +#define LSMT_ROOT 0xffffffffffffffff /* root mount */ +#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */ + +#endif /* _UAPI_LINUX_MOUNT_H */ diff --git a/tools/include/uapi/linux/nsfs.h b/tools/include/uapi/linux/nsfs.h new file mode 100644 index 000000000000..34127653fd00 --- /dev/null +++ b/tools/include/uapi/linux/nsfs.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __LINUX_NSFS_H +#define __LINUX_NSFS_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define NSIO 0xb7 + +/* Returns a file descriptor that refers to an owning user namespace */ +#define NS_GET_USERNS _IO(NSIO, 0x1) +/* Returns a file descriptor that refers to a parent namespace */ +#define NS_GET_PARENT _IO(NSIO, 0x2) +/* Returns the type of namespace (CLONE_NEW* value) referred to by + file descriptor */ +#define NS_GET_NSTYPE _IO(NSIO, 0x3) +/* Get owner UID (in the caller's user namespace) for a user namespace */ +#define NS_GET_OWNER_UID _IO(NSIO, 0x4) +/* Get the id for a mount namespace */ +#define NS_GET_MNTNS_ID _IOR(NSIO, 0x5, __u64) +/* Translate pid from target pid namespace into the caller's pid namespace. */ +#define NS_GET_PID_FROM_PIDNS _IOR(NSIO, 0x6, int) +/* Return thread-group leader id of pid in the callers pid namespace. */ +#define NS_GET_TGID_FROM_PIDNS _IOR(NSIO, 0x7, int) +/* Translate pid from caller's pid namespace into a target pid namespace. */ +#define NS_GET_PID_IN_PIDNS _IOR(NSIO, 0x8, int) +/* Return thread-group leader id of pid in the target pid namespace. */ +#define NS_GET_TGID_IN_PIDNS _IOR(NSIO, 0x9, int) + +struct mnt_ns_info { + __u32 size; + __u32 nr_mounts; + __u64 mnt_ns_id; +}; + +#define MNT_NS_INFO_SIZE_VER0 16 /* size of first published struct */ + +/* Get information about namespace. */ +#define NS_MNT_GET_INFO _IOR(NSIO, 10, struct mnt_ns_info) +/* Get next namespace. */ +#define NS_MNT_GET_NEXT _IOR(NSIO, 11, struct mnt_ns_info) +/* Get previous namespace. */ +#define NS_MNT_GET_PREV _IOR(NSIO, 12, struct mnt_ns_info) + +#endif /* __LINUX_NSFS_H */ diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 5fc753c23734..78a362b80027 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -39,18 +39,21 @@ enum perf_type_id { /* * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE + * * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA * AA: hardware event ID * EEEEEEEE: PMU type ID + * * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB * BB: hardware cache ID * CC: hardware cache op ID * DD: hardware cache op result ID * EEEEEEEE: PMU type ID - * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. + * + * If the PMU type ID is 0, PERF_TYPE_RAW will be applied. */ -#define PERF_PMU_TYPE_SHIFT 32 -#define PERF_HW_EVENT_MASK 0xffffffff +#define PERF_PMU_TYPE_SHIFT 32 +#define PERF_HW_EVENT_MASK 0xffffffff /* * Generalized performance event event_id types, used by the @@ -112,7 +115,7 @@ enum perf_hw_cache_op_result_id { /* * Special "software" events provided by the kernel, even if the hardware * does not support performance events. These events measure various - * physical and sw events of the kernel (and allow the profiling of them as + * physical and SW events of the kernel (and allow the profiling of them as * well): */ enum perf_sw_ids { @@ -167,8 +170,9 @@ enum perf_event_sample_format { }; #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) + /* - * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set + * Values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set. * * If the user does not pass priv level information via branch_sample_type, * the kernel uses the event's priv level. Branch and event priv levels do @@ -178,20 +182,20 @@ enum perf_event_sample_format { * of branches and therefore it supersedes all the other types. */ enum perf_branch_sample_type_shift { - PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ - PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ - PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ - - PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ - PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ - PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ - PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ - PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ - PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ - PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ + + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ - PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* CALL/RET stack */ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ @@ -210,96 +214,95 @@ enum perf_branch_sample_type_shift { }; enum perf_branch_sample_type { - PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, - PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, - PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, + PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, + PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, + PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, - PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, - PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, - PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, - PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, - PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, - PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, + PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, + PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, + PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, + PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, + PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, - PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, - PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, - PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, + PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, + PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, + PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, - PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, - PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, + PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, + PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, - PERF_SAMPLE_BRANCH_TYPE_SAVE = - 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, + PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, - PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, - PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; /* - * Common flow change classification + * Common control flow change classifications: */ enum { - PERF_BR_UNKNOWN = 0, /* unknown */ - PERF_BR_COND = 1, /* conditional */ - PERF_BR_UNCOND = 2, /* unconditional */ - PERF_BR_IND = 3, /* indirect */ - PERF_BR_CALL = 4, /* function call */ - PERF_BR_IND_CALL = 5, /* indirect function call */ - PERF_BR_RET = 6, /* function return */ - PERF_BR_SYSCALL = 7, /* syscall */ - PERF_BR_SYSRET = 8, /* syscall return */ - PERF_BR_COND_CALL = 9, /* conditional function call */ - PERF_BR_COND_RET = 10, /* conditional function return */ - PERF_BR_ERET = 11, /* exception return */ - PERF_BR_IRQ = 12, /* irq */ - PERF_BR_SERROR = 13, /* system error */ - PERF_BR_NO_TX = 14, /* not in transaction */ - PERF_BR_EXTEND_ABI = 15, /* extend ABI */ + PERF_BR_UNKNOWN = 0, /* Unknown */ + PERF_BR_COND = 1, /* Conditional */ + PERF_BR_UNCOND = 2, /* Unconditional */ + PERF_BR_IND = 3, /* Indirect */ + PERF_BR_CALL = 4, /* Function call */ + PERF_BR_IND_CALL = 5, /* Indirect function call */ + PERF_BR_RET = 6, /* Function return */ + PERF_BR_SYSCALL = 7, /* Syscall */ + PERF_BR_SYSRET = 8, /* Syscall return */ + PERF_BR_COND_CALL = 9, /* Conditional function call */ + PERF_BR_COND_RET = 10, /* Conditional function return */ + PERF_BR_ERET = 11, /* Exception return */ + PERF_BR_IRQ = 12, /* IRQ */ + PERF_BR_SERROR = 13, /* System error */ + PERF_BR_NO_TX = 14, /* Not in transaction */ + PERF_BR_EXTEND_ABI = 15, /* Extend ABI */ PERF_BR_MAX, }; /* - * Common branch speculation outcome classification + * Common branch speculation outcome classifications: */ enum { - PERF_BR_SPEC_NA = 0, /* Not available */ - PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ - PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ - PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ + PERF_BR_SPEC_NA = 0, /* Not available */ + PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ + PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ + PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ PERF_BR_SPEC_MAX, }; enum { - PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ - PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ - PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ - PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ - PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ - PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ - PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ - PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ + PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ + PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ + PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ + PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ + PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ + PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ + PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ + PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ PERF_BR_NEW_MAX, }; enum { - PERF_BR_PRIV_UNKNOWN = 0, - PERF_BR_PRIV_USER = 1, - PERF_BR_PRIV_KERNEL = 2, - PERF_BR_PRIV_HV = 3, + PERF_BR_PRIV_UNKNOWN = 0, + PERF_BR_PRIV_USER = 1, + PERF_BR_PRIV_KERNEL = 2, + PERF_BR_PRIV_HV = 3, }; -#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 -#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 -#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 -#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 -#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 +#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 +#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 +#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 +#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 +#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 #define PERF_SAMPLE_BRANCH_PLM_ALL \ (PERF_SAMPLE_BRANCH_USER|\ @@ -310,9 +313,9 @@ enum { * Values to determine ABI of the registers dump. */ enum perf_sample_regs_abi { - PERF_SAMPLE_REGS_ABI_NONE = 0, - PERF_SAMPLE_REGS_ABI_32 = 1, - PERF_SAMPLE_REGS_ABI_64 = 2, + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, }; /* @@ -320,21 +323,21 @@ enum perf_sample_regs_abi { * abort events. Multiple bits can be set. */ enum { - PERF_TXN_ELISION = (1 << 0), /* From elision */ - PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ - PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ - PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ - PERF_TXN_RETRY = (1 << 4), /* Retry possible */ - PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ - PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ - PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ + PERF_TXN_ELISION = (1 << 0), /* From elision */ + PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ + PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ + PERF_TXN_ASYNC = (1 << 3), /* Instruction is not related */ + PERF_TXN_RETRY = (1 << 4), /* Retry possible */ + PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ + PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ + PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ - PERF_TXN_MAX = (1 << 8), /* non-ABI */ + PERF_TXN_MAX = (1 << 8), /* non-ABI */ - /* bits 32..63 are reserved for the abort code */ + /* Bits 32..63 are reserved for the abort code */ - PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), - PERF_TXN_ABORT_SHIFT = 32, + PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), + PERF_TXN_ABORT_SHIFT = 32, }; /* @@ -369,24 +372,22 @@ enum perf_event_read_format { PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ }; -#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ -#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ -#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ -#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ - /* add: sample_stack_user */ -#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ -#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ -#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ -#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ -#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */ +#define PERF_ATTR_SIZE_VER0 64 /* Size of first published 'struct perf_event_attr' */ +#define PERF_ATTR_SIZE_VER1 72 /* Add: config2 */ +#define PERF_ATTR_SIZE_VER2 80 /* Add: branch_sample_type */ +#define PERF_ATTR_SIZE_VER3 96 /* Add: sample_regs_user */ + /* Add: sample_stack_user */ +#define PERF_ATTR_SIZE_VER4 104 /* Add: sample_regs_intr */ +#define PERF_ATTR_SIZE_VER5 112 /* Add: aux_watermark */ +#define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */ +#define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ +#define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ /* - * Hardware event_id to monitor via a performance monitoring event: - * - * @sample_max_stack: Max number of frame pointers in a callchain, - * should be < /proc/sys/kernel/perf_event_max_stack - * Max number of entries of branch stack - * should be < hardware limit + * 'struct perf_event_attr' contains various attributes that define + * a performance event - most of them hardware related configuration + * details, but also a lot of behavioral switches and values implemented + * by the kernel. */ struct perf_event_attr { @@ -396,7 +397,7 @@ struct perf_event_attr { __u32 type; /* - * Size of the attr structure, for fwd/bwd compat. + * Size of the attr structure, for forward/backwards compatibility. */ __u32 size; @@ -451,21 +452,21 @@ struct perf_event_attr { comm_exec : 1, /* flag comm events that are due to an exec */ use_clockid : 1, /* use @clockid for time fields */ context_switch : 1, /* context switch data */ - write_backward : 1, /* Write ring buffer from end to beginning */ + write_backward : 1, /* write ring buffer from end to beginning */ namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ - bpf_event : 1, /* include bpf events */ + bpf_event : 1, /* include BPF events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ - build_id : 1, /* use build id in mmap2 events */ + build_id : 1, /* use build ID in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ remove_on_exec : 1, /* event is removed from task on exec */ sigtrap : 1, /* send synchronous SIGTRAP on event */ __reserved_1 : 26; union { - __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_events; /* wake up every n events */ __u32 wakeup_watermark; /* bytes before wakeup */ }; @@ -474,13 +475,13 @@ struct perf_event_attr { __u64 bp_addr; __u64 kprobe_func; /* for perf_kprobe */ __u64 uprobe_path; /* for perf_uprobe */ - __u64 config1; /* extension of config */ + __u64 config1; /* extension of config */ }; union { __u64 bp_len; - __u64 kprobe_addr; /* when kprobe_func == NULL */ + __u64 kprobe_addr; /* when kprobe_func == NULL */ __u64 probe_offset; /* for perf_[k,u]probe */ - __u64 config2; /* extension of config1 */ + __u64 config2; /* extension of config1 */ }; __u64 branch_sample_type; /* enum perf_branch_sample_type */ @@ -510,7 +511,16 @@ struct perf_event_attr { * Wakeup watermark for AUX area */ __u32 aux_watermark; + + /* + * Max number of frame pointers in a callchain, should be + * lower than /proc/sys/kernel/perf_event_max_stack. + * + * Max number of entries of branch stack should be lower + * than the hardware limit. + */ __u16 sample_max_stack; + __u16 __reserved_2; __u32 aux_sample_size; @@ -537,7 +547,7 @@ struct perf_event_attr { /* * Structure used by below PERF_EVENT_IOC_QUERY_BPF command - * to query bpf programs attached to the same perf tracepoint + * to query BPF programs attached to the same perf tracepoint * as the given perf event. */ struct perf_event_query_bpf { @@ -559,21 +569,21 @@ struct perf_event_query_bpf { /* * Ioctls that can be done on a perf event fd: */ -#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) -#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) -#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) -#define PERF_EVENT_IOC_RESET _IO ('$', 3) -#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) -#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) -#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) -#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) -#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) -#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW ('$', 4, __u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW ('$', 6, char *) +#define PERF_EVENT_IOC_ID _IOR ('$', 7, __u64 *) +#define PERF_EVENT_IOC_SET_BPF _IOW ('$', 8, __u32) +#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW ('$', 9, __u32) #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) -#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) +#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW ('$', 11, struct perf_event_attr *) enum perf_event_ioc_flags { - PERF_IOC_FLAG_GROUP = 1U << 0, + PERF_IOC_FLAG_GROUP = 1U << 0, }; /* @@ -584,7 +594,7 @@ struct perf_event_mmap_page { __u32 compat_version; /* lowest version this is compat with */ /* - * Bits needed to read the hw events in user-space. + * Bits needed to read the HW events in user-space. * * u32 seq, time_mult, time_shift, index, width; * u64 count, enabled, running; @@ -622,7 +632,7 @@ struct perf_event_mmap_page { __u32 index; /* hardware event identifier */ __s64 offset; /* add to hardware event value */ __u64 time_enabled; /* time event active */ - __u64 time_running; /* time event on cpu */ + __u64 time_running; /* time event on CPU */ union { __u64 capabilities; struct { @@ -650,7 +660,7 @@ struct perf_event_mmap_page { /* * If cap_usr_time the below fields can be used to compute the time - * delta since time_enabled (in ns) using rdtsc or similar. + * delta since time_enabled (in ns) using RDTSC or similar. * * u64 quot, rem; * u64 delta; @@ -723,7 +733,7 @@ struct perf_event_mmap_page { * after reading this value. * * When the mapping is PROT_WRITE the @data_tail value should be - * written by userspace to reflect the last read data, after issueing + * written by user-space to reflect the last read data, after issuing * an smp_mb() to separate the data read from the ->data_tail store. * In this case the kernel will not over-write unread data. * @@ -739,7 +749,7 @@ struct perf_event_mmap_page { /* * AUX area is defined by aux_{offset,size} fields that should be set - * by the userspace, so that + * by the user-space, so that * * aux_offset >= data_offset + data_size * @@ -813,7 +823,7 @@ struct perf_event_mmap_page { * Indicates that thread was preempted in TASK_RUNNING state. * * PERF_RECORD_MISC_MMAP_BUILD_ID: - * Indicates that mmap2 event carries build id data. + * Indicates that mmap2 event carries build ID data. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) @@ -824,26 +834,26 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) struct perf_event_header { - __u32 type; - __u16 misc; - __u16 size; + __u32 type; + __u16 misc; + __u16 size; }; struct perf_ns_link_info { - __u64 dev; - __u64 ino; + __u64 dev; + __u64 ino; }; enum { - NET_NS_INDEX = 0, - UTS_NS_INDEX = 1, - IPC_NS_INDEX = 2, - PID_NS_INDEX = 3, - USER_NS_INDEX = 4, - MNT_NS_INDEX = 5, - CGROUP_NS_INDEX = 6, - - NR_NAMESPACES, /* number of available namespaces */ + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + + NR_NAMESPACES, /* number of available namespaces */ }; enum perf_event_type { @@ -859,11 +869,11 @@ enum perf_event_type { * optional fields being ignored. * * struct sample_id { - * { u32 pid, tid; } && PERF_SAMPLE_TID - * { u64 time; } && PERF_SAMPLE_TIME - * { u64 id; } && PERF_SAMPLE_ID - * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID - * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 id; } && PERF_SAMPLE_IDENTIFIER * } && perf_event_attr::sample_id_all * @@ -874,7 +884,7 @@ enum perf_event_type { /* * The MMAP events record the PROT_EXEC mappings so that we can - * correlate userspace IPs to code. They have the following structure: + * correlate user-space IPs to code. They have the following structure: * * struct { * struct perf_event_header header; @@ -884,7 +894,7 @@ enum perf_event_type { * u64 len; * u64 pgoff; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP = 1, @@ -894,7 +904,7 @@ enum perf_event_type { * struct perf_event_header header; * u64 id; * u64 lost; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_LOST = 2, @@ -905,7 +915,7 @@ enum perf_event_type { * * u32 pid, tid; * char comm[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_COMM = 3, @@ -916,7 +926,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_EXIT = 4, @@ -927,7 +937,7 @@ enum perf_event_type { * u64 time; * u64 id; * u64 stream_id; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_THROTTLE = 5, @@ -939,7 +949,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_FORK = 7, @@ -950,7 +960,7 @@ enum perf_event_type { * u32 pid, tid; * * struct read_format values; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_READ = 8, @@ -1005,12 +1015,12 @@ enum perf_event_type { * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * - * { u64 abi; # enum perf_sample_regs_abi - * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER + * { u64 abi; # enum perf_sample_regs_abi + * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER * - * { u64 size; - * char data[size]; - * u64 dyn_size; } && PERF_SAMPLE_STACK_USER + * { u64 size; + * char data[size]; + * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * * { union perf_sample_weight * { @@ -1035,10 +1045,11 @@ enum perf_event_type { * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR - * { u64 size; - * char data[size]; } && PERF_SAMPLE_AUX + * { u64 cgroup;} && PERF_SAMPLE_CGROUP * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE + * { u64 size; + * char data[size]; } && PERF_SAMPLE_AUX * }; */ PERF_RECORD_SAMPLE = 9, @@ -1070,7 +1081,7 @@ enum perf_event_type { * }; * u32 prot, flags; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP2 = 10, @@ -1079,12 +1090,12 @@ enum perf_event_type { * Records that new data landed in the AUX buffer part. * * struct { - * struct perf_event_header header; + * struct perf_event_header header; * - * u64 aux_offset; - * u64 aux_size; + * u64 aux_offset; + * u64 aux_size; * u64 flags; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_AUX = 11, @@ -1167,7 +1178,7 @@ enum perf_event_type { PERF_RECORD_KSYMBOL = 17, /* - * Record bpf events: + * Record BPF events: * enum perf_bpf_event_type { * PERF_BPF_EVENT_UNKNOWN = 0, * PERF_BPF_EVENT_PROG_LOAD = 1, @@ -1245,181 +1256,181 @@ enum perf_record_ksymbol_type { #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) enum perf_bpf_event_type { - PERF_BPF_EVENT_UNKNOWN = 0, - PERF_BPF_EVENT_PROG_LOAD = 1, - PERF_BPF_EVENT_PROG_UNLOAD = 2, - PERF_BPF_EVENT_MAX, /* non-ABI */ + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX, /* non-ABI */ }; -#define PERF_MAX_STACK_DEPTH 127 -#define PERF_MAX_CONTEXTS_PER_STACK 8 +#define PERF_MAX_STACK_DEPTH 127 +#define PERF_MAX_CONTEXTS_PER_STACK 8 enum perf_callchain_context { - PERF_CONTEXT_HV = (__u64)-32, - PERF_CONTEXT_KERNEL = (__u64)-128, - PERF_CONTEXT_USER = (__u64)-512, + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, - PERF_CONTEXT_GUEST = (__u64)-2048, - PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, - PERF_CONTEXT_GUEST_USER = (__u64)-2560, + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, - PERF_CONTEXT_MAX = (__u64)-4095, + PERF_CONTEXT_MAX = (__u64)-4095, }; /** * PERF_RECORD_AUX::flags bits */ -#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ -#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ -#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ -#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ +#define PERF_AUX_FLAG_TRUNCATED 0x0001 /* Record was truncated to fit */ +#define PERF_AUX_FLAG_OVERWRITE 0x0002 /* Snapshot from overwrite mode */ +#define PERF_AUX_FLAG_PARTIAL 0x0004 /* Record contains gaps */ +#define PERF_AUX_FLAG_COLLISION 0x0008 /* Sample collided with another */ #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ /* CoreSight PMU AUX buffer formats */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ -#define PERF_FLAG_FD_NO_GROUP (1UL << 0) -#define PERF_FLAG_FD_OUTPUT (1UL << 1) -#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ -#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ +#define PERF_FLAG_FD_NO_GROUP (1UL << 0) +#define PERF_FLAG_FD_OUTPUT (1UL << 1) +#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup ID, per-CPU mode only */ +#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ #if defined(__LITTLE_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_op:5, /* type of opcode */ - mem_lvl:14, /* memory hierarchy level */ - mem_snoop:5, /* snoop mode */ - mem_lock:2, /* lock instr */ - mem_dtlb:7, /* tlb access */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_remote:1, /* remote */ - mem_snoopx:2, /* snoop mode, ext */ - mem_blk:3, /* access blocked */ - mem_hops:3, /* hop level */ - mem_rsvd:18; + __u64 mem_op : 5, /* Type of opcode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_snoop : 5, /* Snoop mode */ + mem_lock : 2, /* Lock instr */ + mem_dtlb : 7, /* TLB access */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_remote : 1, /* Remote */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_blk : 3, /* Access blocked */ + mem_hops : 3, /* Hop level */ + mem_rsvd : 18; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_rsvd:18, - mem_hops:3, /* hop level */ - mem_blk:3, /* access blocked */ - mem_snoopx:2, /* snoop mode, ext */ - mem_remote:1, /* remote */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_dtlb:7, /* tlb access */ - mem_lock:2, /* lock instr */ - mem_snoop:5, /* snoop mode */ - mem_lvl:14, /* memory hierarchy level */ - mem_op:5; /* type of opcode */ + __u64 mem_rsvd : 18, + mem_hops : 3, /* Hop level */ + mem_blk : 3, /* Access blocked */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_remote : 1, /* Remote */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_dtlb : 7, /* TLB access */ + mem_lock : 2, /* Lock instr */ + mem_snoop : 5, /* Snoop mode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_op : 5; /* Type of opcode */ }; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif -/* type of opcode (load/store/prefetch,code) */ -#define PERF_MEM_OP_NA 0x01 /* not available */ -#define PERF_MEM_OP_LOAD 0x02 /* load instruction */ -#define PERF_MEM_OP_STORE 0x04 /* store instruction */ -#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ -#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ -#define PERF_MEM_OP_SHIFT 0 +/* Type of memory opcode: */ +#define PERF_MEM_OP_NA 0x0001 /* Not available */ +#define PERF_MEM_OP_LOAD 0x0002 /* Load instruction */ +#define PERF_MEM_OP_STORE 0x0004 /* Store instruction */ +#define PERF_MEM_OP_PFETCH 0x0008 /* Prefetch */ +#define PERF_MEM_OP_EXEC 0x0010 /* Code (execution) */ +#define PERF_MEM_OP_SHIFT 0 /* - * PERF_MEM_LVL_* namespace being depricated to some extent in the + * The PERF_MEM_LVL_* namespace is being deprecated to some extent in * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. - * Supporting this namespace inorder to not break defined ABIs. + * We support this namespace in order to not break defined ABIs. * - * memory hierarchy (memory level, hit or miss) + * Memory hierarchy (memory level, hit or miss) */ -#define PERF_MEM_LVL_NA 0x01 /* not available */ -#define PERF_MEM_LVL_HIT 0x02 /* hit level */ -#define PERF_MEM_LVL_MISS 0x04 /* miss level */ -#define PERF_MEM_LVL_L1 0x08 /* L1 */ -#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ -#define PERF_MEM_LVL_L2 0x20 /* L2 */ -#define PERF_MEM_LVL_L3 0x40 /* L3 */ -#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ -#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ -#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ -#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ -#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ -#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ -#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ -#define PERF_MEM_LVL_SHIFT 5 - -#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ -#define PERF_MEM_REMOTE_SHIFT 37 - -#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ -#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ -#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ -#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ -#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */ -/* 0x7 available */ -#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */ -#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */ -#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */ -#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ -#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ -#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ -#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ - -#define PERF_MEM_LVLNUM_SHIFT 33 - -/* snoop mode */ -#define PERF_MEM_SNOOP_NA 0x01 /* not available */ -#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ -#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ -#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ -#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ -#define PERF_MEM_SNOOP_SHIFT 19 - -#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ -#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */ -#define PERF_MEM_SNOOPX_SHIFT 38 - -/* locked instruction */ -#define PERF_MEM_LOCK_NA 0x01 /* not available */ -#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ -#define PERF_MEM_LOCK_SHIFT 24 +#define PERF_MEM_LVL_NA 0x0001 /* Not available */ +#define PERF_MEM_LVL_HIT 0x0002 /* Hit level */ +#define PERF_MEM_LVL_MISS 0x0004 /* Miss level */ +#define PERF_MEM_LVL_L1 0x0008 /* L1 */ +#define PERF_MEM_LVL_LFB 0x0010 /* Line Fill Buffer */ +#define PERF_MEM_LVL_L2 0x0020 /* L2 */ +#define PERF_MEM_LVL_L3 0x0040 /* L3 */ +#define PERF_MEM_LVL_LOC_RAM 0x0080 /* Local DRAM */ +#define PERF_MEM_LVL_REM_RAM1 0x0100 /* Remote DRAM (1 hop) */ +#define PERF_MEM_LVL_REM_RAM2 0x0200 /* Remote DRAM (2 hops) */ +#define PERF_MEM_LVL_REM_CCE1 0x0400 /* Remote Cache (1 hop) */ +#define PERF_MEM_LVL_REM_CCE2 0x0800 /* Remote Cache (2 hops) */ +#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ +#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ +#define PERF_MEM_LVL_SHIFT 5 + +#define PERF_MEM_REMOTE_REMOTE 0x0001 /* Remote */ +#define PERF_MEM_REMOTE_SHIFT 37 + +#define PERF_MEM_LVLNUM_L1 0x0001 /* L1 */ +#define PERF_MEM_LVLNUM_L2 0x0002 /* L2 */ +#define PERF_MEM_LVLNUM_L3 0x0003 /* L3 */ +#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ +#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ +/* 0x007 available */ +#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ +#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ +#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */ +#define PERF_MEM_LVLNUM_ANY_CACHE 0x000b /* Any cache */ +#define PERF_MEM_LVLNUM_LFB 0x000c /* LFB / L1 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_RAM 0x000d /* RAM */ +#define PERF_MEM_LVLNUM_PMEM 0x000e /* PMEM */ +#define PERF_MEM_LVLNUM_NA 0x000f /* N/A */ + +#define PERF_MEM_LVLNUM_SHIFT 33 + +/* Snoop mode */ +#define PERF_MEM_SNOOP_NA 0x0001 /* Not available */ +#define PERF_MEM_SNOOP_NONE 0x0002 /* No snoop */ +#define PERF_MEM_SNOOP_HIT 0x0004 /* Snoop hit */ +#define PERF_MEM_SNOOP_MISS 0x0008 /* Snoop miss */ +#define PERF_MEM_SNOOP_HITM 0x0010 /* Snoop hit modified */ +#define PERF_MEM_SNOOP_SHIFT 19 + +#define PERF_MEM_SNOOPX_FWD 0x0001 /* Forward */ +#define PERF_MEM_SNOOPX_PEER 0x0002 /* Transfer from peer */ +#define PERF_MEM_SNOOPX_SHIFT 38 + +/* Locked instruction */ +#define PERF_MEM_LOCK_NA 0x0001 /* Not available */ +#define PERF_MEM_LOCK_LOCKED 0x0002 /* Locked transaction */ +#define PERF_MEM_LOCK_SHIFT 24 /* TLB access */ -#define PERF_MEM_TLB_NA 0x01 /* not available */ -#define PERF_MEM_TLB_HIT 0x02 /* hit level */ -#define PERF_MEM_TLB_MISS 0x04 /* miss level */ -#define PERF_MEM_TLB_L1 0x08 /* L1 */ -#define PERF_MEM_TLB_L2 0x10 /* L2 */ -#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ -#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ -#define PERF_MEM_TLB_SHIFT 26 +#define PERF_MEM_TLB_NA 0x0001 /* Not available */ +#define PERF_MEM_TLB_HIT 0x0002 /* Hit level */ +#define PERF_MEM_TLB_MISS 0x0004 /* Miss level */ +#define PERF_MEM_TLB_L1 0x0008 /* L1 */ +#define PERF_MEM_TLB_L2 0x0010 /* L2 */ +#define PERF_MEM_TLB_WK 0x0020 /* Hardware Walker*/ +#define PERF_MEM_TLB_OS 0x0040 /* OS fault handler */ +#define PERF_MEM_TLB_SHIFT 26 /* Access blocked */ -#define PERF_MEM_BLK_NA 0x01 /* not available */ -#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ -#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ -#define PERF_MEM_BLK_SHIFT 40 - -/* hop level */ -#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ -#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ -#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ -#define PERF_MEM_HOPS_3 0x04 /* remote board */ +#define PERF_MEM_BLK_NA 0x0001 /* Not available */ +#define PERF_MEM_BLK_DATA 0x0002 /* Data could not be forwarded */ +#define PERF_MEM_BLK_ADDR 0x0004 /* Address conflict */ +#define PERF_MEM_BLK_SHIFT 40 + +/* Hop level */ +#define PERF_MEM_HOPS_0 0x0001 /* Remote core, same node */ +#define PERF_MEM_HOPS_1 0x0002 /* Remote node, same socket */ +#define PERF_MEM_HOPS_2 0x0003 /* Remote socket, same board */ +#define PERF_MEM_HOPS_3 0x0004 /* Remote board */ /* 5-7 available */ -#define PERF_MEM_HOPS_SHIFT 43 +#define PERF_MEM_HOPS_SHIFT 43 #define PERF_MEM_S(a, s) \ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) /* - * single taken branch record layout: + * Layout of single taken branch records: * * from: source instruction (may not always be a branch insn) * to: branch target @@ -1438,37 +1449,37 @@ union perf_mem_data_src { struct perf_branch_entry { __u64 from; __u64 to; - __u64 mispred:1, /* target mispredicted */ - predicted:1,/* target predicted */ - in_tx:1, /* in transaction */ - abort:1, /* transaction abort */ - cycles:16, /* cycle count to last branch */ - type:4, /* branch type */ - spec:2, /* branch speculation info */ - new_type:4, /* additional branch type */ - priv:3, /* privilege level */ - reserved:31; + __u64 mispred : 1, /* target mispredicted */ + predicted : 1, /* target predicted */ + in_tx : 1, /* in transaction */ + abort : 1, /* transaction abort */ + cycles : 16, /* cycle count to last branch */ + type : 4, /* branch type */ + spec : 2, /* branch speculation info */ + new_type : 4, /* additional branch type */ + priv : 3, /* privilege level */ + reserved : 31; }; /* Size of used info bits in struct perf_branch_entry */ #define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 union perf_sample_weight { - __u64 full; + __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) struct { - __u32 var1_dw; - __u16 var2_w; - __u16 var3_w; + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; }; #elif defined(__BIG_ENDIAN_BITFIELD) struct { - __u16 var3_w; - __u16 var2_w; - __u32 var1_dw; + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif }; diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index 35791791a879..43dec6eed559 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -230,7 +230,7 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) -/* Tagged user address controls for arm64 */ +/* Tagged user address controls for arm64 and RISC-V */ #define PR_SET_TAGGED_ADDR_CTRL 55 #define PR_GET_TAGGED_ADDR_CTRL 56 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) @@ -244,6 +244,9 @@ struct prctl_mm_map { # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Unused; kept only for source compatibility */ # define PR_MTE_TCF_SHIFT 1 +/* RISC-V pointer masking tag length */ +# define PR_PMLEN_SHIFT 24 +# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 @@ -328,4 +331,44 @@ struct prctl_mm_map { # define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */ # define PR_PPC_DEXCR_CTRL_MASK 0x1f +/* + * Get the current shadow stack configuration for the current thread, + * this will be the value configured via PR_SET_SHADOW_STACK_STATUS. + */ +#define PR_GET_SHADOW_STACK_STATUS 74 + +/* + * Set the current shadow stack configuration. Enabling the shadow + * stack will cause a shadow stack to be allocated for the thread. + */ +#define PR_SET_SHADOW_STACK_STATUS 75 +# define PR_SHADOW_STACK_ENABLE (1UL << 0) +# define PR_SHADOW_STACK_WRITE (1UL << 1) +# define PR_SHADOW_STACK_PUSH (1UL << 2) + +/* + * Prevent further changes to the specified shadow stack + * configuration. All bits may be locked via this call, including + * undefined bits. + */ +#define PR_LOCK_SHADOW_STACK_STATUS 76 + +/* + * Controls the mode of timer_create() for CRIU restore operations. + * Enabling this allows CRIU to restore timers with explicit IDs. + * + * Don't use for normal operations as the result might be undefined. + */ +#define PR_TIMER_CREATE_RESTORE_IDS 77 +# define PR_TIMER_CREATE_RESTORE_IDS_OFF 0 +# define PR_TIMER_CREATE_RESTORE_IDS_ON 1 +# define PR_TIMER_CREATE_RESTORE_IDS_GET 2 + +/* FUTEX hash management */ +#define PR_FUTEX_HASH 78 +# define PR_FUTEX_HASH_SET_SLOTS 1 +# define FH_FLAG_IMMUTABLE (1ULL << 0) +# define PR_FUTEX_HASH_GET_SLOTS 2 +# define PR_FUTEX_HASH_GET_IMMUTABLE 3 + #endif /* _LINUX_PRCTL_H */ diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 687c5eafb49a..98c4713c1b09 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -851,12 +851,14 @@ int arch_decode_hint_reg(u8 sp_reg, int *base) bool arch_is_retpoline(struct symbol *sym) { - return !strncmp(sym->name, "__x86_indirect_", 15); + return !strncmp(sym->name, "__x86_indirect_", 15) || + !strncmp(sym->name, "__pi___x86_indirect_", 20); } bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk"); + return !strcmp(sym->name, "__x86_return_thunk") || + !strcmp(sym->name, "__pi___x86_return_thunk"); } bool arch_is_embedded_insn(struct symbol *sym) diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 727a3a4fd9d7..ca5d77db692a 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -572,6 +572,34 @@ err: return -1; } +static int mark_group_syms(struct elf *elf) +{ + struct section *symtab, *sec; + struct symbol *sym; + + symtab = find_section_by_name(elf, ".symtab"); + if (!symtab) { + ERROR("no .symtab"); + return -1; + } + + list_for_each_entry(sec, &elf->sections, list) { + if (sec->sh.sh_type == SHT_GROUP && + sec->sh.sh_link == symtab->idx) { + sym = find_symbol_by_index(elf, sec->sh.sh_info); + if (!sym) { + ERROR("%s: can't find SHT_GROUP signature symbol", + sec->name); + return -1; + } + + sym->group_sec = sec; + } + } + + return 0; +} + /* * @sym's idx has changed. Update the relocs which reference it. */ @@ -745,7 +773,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym) /* * Move the first global symbol, as per sh_info, into a new, higher - * symbol index. This fees up a spot for a new local symbol. + * symbol index. This frees up a spot for a new local symbol. */ first_non_local = symtab->sh.sh_info; old = find_symbol_by_index(elf, first_non_local); @@ -763,6 +791,11 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym) if (elf_update_sym_relocs(elf, old)) return NULL; + if (old->group_sec) { + old->group_sec->sh.sh_info = new_idx; + mark_sec_changed(elf, old->group_sec, true); + } + new_idx = first_non_local; } @@ -1035,6 +1068,9 @@ struct elf *elf_open_read(const char *name, int flags) if (read_symbols(elf)) goto err; + if (mark_group_syms(elf)) + goto err; + if (read_relocs(elf)) goto err; diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index c7c4e87ebe88..0a2fa3ac0079 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -72,6 +72,7 @@ struct symbol { u8 ignore : 1; struct list_head pv_target; struct reloc *relocs; + struct section *group_sec; }; struct reloc { diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build index 279ab2ab4abe..b558ab98719f 100644 --- a/tools/perf/bench/Build +++ b/tools/perf/bench/Build @@ -3,6 +3,7 @@ perf-bench-y += sched-pipe.o perf-bench-y += sched-seccomp-notify.o perf-bench-y += syscall.o perf-bench-y += mem-functions.o +perf-bench-y += futex.o perf-bench-y += futex-hash.o perf-bench-y += futex-wake.o perf-bench-y += futex-wake-parallel.o diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c index b472eded521b..fdf133c9520f 100644 --- a/tools/perf/bench/futex-hash.c +++ b/tools/perf/bench/futex-hash.c @@ -18,9 +18,11 @@ #include <stdlib.h> #include <linux/compiler.h> #include <linux/kernel.h> +#include <linux/prctl.h> #include <linux/zalloc.h> #include <sys/time.h> #include <sys/mman.h> +#include <sys/prctl.h> #include <perf/cpumap.h> #include "../util/mutex.h" @@ -50,9 +52,12 @@ struct worker { static struct bench_futex_parameters params = { .nfutexes = 1024, .runtime = 10, + .nbuckets = -1, }; static const struct option options[] = { + OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"), + OPT_BOOLEAN( 'I', "immutable", ¶ms.buckets_immutable, "Make the hash buckets immutable"), OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"), OPT_UINTEGER('f', "futexes", ¶ms.nfutexes, "Specify amount of futexes per threads"), @@ -118,6 +123,7 @@ static void print_summary(void) printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), (int)bench__runtime.tv_sec); + futex_print_nbuckets(¶ms); } int bench_futex_hash(int argc, const char **argv) @@ -161,6 +167,7 @@ int bench_futex_hash(int argc, const char **argv) if (!params.fshared) futex_flag = FUTEX_PRIVATE_FLAG; + futex_set_nbuckets_param(¶ms); printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n", getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime); diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c index 0416120c091b..5144a158512c 100644 --- a/tools/perf/bench/futex-lock-pi.c +++ b/tools/perf/bench/futex-lock-pi.c @@ -41,10 +41,13 @@ static struct stats throughput_stats; static struct cond thread_parent, thread_worker; static struct bench_futex_parameters params = { + .nbuckets = -1, .runtime = 10, }; static const struct option options[] = { + OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"), + OPT_BOOLEAN( 'I', "immutable", ¶ms.buckets_immutable, "Make the hash buckets immutable"), OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), OPT_UINTEGER('r', "runtime", ¶ms.runtime, "Specify runtime (in seconds)"), OPT_BOOLEAN( 'M', "multi", ¶ms.multi, "Use multiple futexes"), @@ -67,6 +70,7 @@ static void print_summary(void) printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), (int)bench__runtime.tv_sec); + futex_print_nbuckets(¶ms); } static void toggle_done(int sig __maybe_unused, @@ -203,6 +207,7 @@ int bench_futex_lock_pi(int argc, const char **argv) mutex_init(&thread_lock); cond_init(&thread_parent); cond_init(&thread_worker); + futex_set_nbuckets_param(¶ms); threads_starting = params.nthreads; gettimeofday(&bench__start, NULL); diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c index aad5bfc4fe18..a2f91ee1950b 100644 --- a/tools/perf/bench/futex-requeue.c +++ b/tools/perf/bench/futex-requeue.c @@ -42,6 +42,7 @@ static unsigned int threads_starting; static int futex_flag = 0; static struct bench_futex_parameters params = { + .nbuckets = -1, /* * How many tasks to requeue at a time. * Default to 1 in order to make the kernel work more. @@ -50,6 +51,8 @@ static struct bench_futex_parameters params = { }; static const struct option options[] = { + OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"), + OPT_BOOLEAN( 'I', "immutable", ¶ms.buckets_immutable, "Make the hash buckets immutable"), OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), OPT_UINTEGER('q', "nrequeue", ¶ms.nrequeue, "Specify amount of threads to requeue at once"), OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"), @@ -77,6 +80,7 @@ static void print_summary(void) params.nthreads, requeuetime_avg / USEC_PER_MSEC, rel_stddev_stats(requeuetime_stddev, requeuetime_avg)); + futex_print_nbuckets(¶ms); } static void *workerfn(void *arg __maybe_unused) @@ -204,6 +208,8 @@ int bench_futex_requeue(int argc, const char **argv) if (params.broadcast) params.nrequeue = params.nthreads; + futex_set_nbuckets_param(¶ms); + printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %s%p), " "%d at a time.\n\n", getpid(), params.nthreads, params.fshared ? "shared":"private", &futex1, diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c index 4352e318631e..ee66482c29fd 100644 --- a/tools/perf/bench/futex-wake-parallel.c +++ b/tools/perf/bench/futex-wake-parallel.c @@ -57,9 +57,13 @@ static struct stats waketime_stats, wakeup_stats; static unsigned int threads_starting; static int futex_flag = 0; -static struct bench_futex_parameters params; +static struct bench_futex_parameters params = { + .nbuckets = -1, +}; static const struct option options[] = { + OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"), + OPT_BOOLEAN( 'I', "immutable", ¶ms.buckets_immutable, "Make the hash buckets immutable"), OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), OPT_UINTEGER('w', "nwakers", ¶ms.nwakes, "Specify amount of waking threads"), OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"), @@ -218,6 +222,7 @@ static void print_summary(void) params.nthreads, waketime_avg / USEC_PER_MSEC, rel_stddev_stats(waketime_stddev, waketime_avg)); + futex_print_nbuckets(¶ms); } @@ -291,6 +296,8 @@ int bench_futex_wake_parallel(int argc, const char **argv) if (!params.fshared) futex_flag = FUTEX_PRIVATE_FLAG; + futex_set_nbuckets_param(¶ms); + printf("Run summary [PID %d]: blocking on %d threads (at [%s] " "futex %p), %d threads waking up %d at a time.\n\n", getpid(), params.nthreads, params.fshared ? "shared":"private", diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c index 49b3c89b0b35..8d6107f7cd94 100644 --- a/tools/perf/bench/futex-wake.c +++ b/tools/perf/bench/futex-wake.c @@ -42,6 +42,7 @@ static unsigned int threads_starting; static int futex_flag = 0; static struct bench_futex_parameters params = { + .nbuckets = -1, /* * How many wakeups to do at a time. * Default to 1 in order to make the kernel work more. @@ -50,6 +51,8 @@ static struct bench_futex_parameters params = { }; static const struct option options[] = { + OPT_INTEGER( 'b', "buckets", ¶ms.nbuckets, "Specify amount of hash buckets"), + OPT_BOOLEAN( 'I', "immutable", ¶ms.buckets_immutable, "Make the hash buckets immutable"), OPT_UINTEGER('t', "threads", ¶ms.nthreads, "Specify amount of threads"), OPT_UINTEGER('w', "nwakes", ¶ms.nwakes, "Specify amount of threads to wake at once"), OPT_BOOLEAN( 's', "silent", ¶ms.silent, "Silent mode: do not display data/details"), @@ -93,6 +96,7 @@ static void print_summary(void) params.nthreads, waketime_avg / USEC_PER_MSEC, rel_stddev_stats(waketime_stddev, waketime_avg)); + futex_print_nbuckets(¶ms); } static void block_threads(pthread_t *w, struct perf_cpu_map *cpu) diff --git a/tools/perf/bench/futex.c b/tools/perf/bench/futex.c new file mode 100644 index 000000000000..26382e4d8d4c --- /dev/null +++ b/tools/perf/bench/futex.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <err.h> +#include <stdio.h> +#include <stdlib.h> +#include <linux/prctl.h> +#include <sys/prctl.h> + +#include "futex.h" + +void futex_set_nbuckets_param(struct bench_futex_parameters *params) +{ + unsigned long flags; + int ret; + + if (params->nbuckets < 0) + return; + + flags = params->buckets_immutable ? FH_FLAG_IMMUTABLE : 0; + ret = prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_SET_SLOTS, params->nbuckets, flags); + if (ret) { + printf("Requesting %d hash buckets failed: %d/%m\n", + params->nbuckets, ret); + err(EXIT_FAILURE, "prctl(PR_FUTEX_HASH)"); + } +} + +void futex_print_nbuckets(struct bench_futex_parameters *params) +{ + char *futex_hash_mode; + int ret; + + ret = prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_SLOTS); + if (params->nbuckets >= 0) { + if (ret != params->nbuckets) { + if (ret < 0) { + printf("Can't query number of buckets: %m\n"); + err(EXIT_FAILURE, "prctl(PR_FUTEX_HASH)"); + } + printf("Requested number of hash buckets does not currently used.\n"); + printf("Requested: %d in usage: %d\n", params->nbuckets, ret); + err(EXIT_FAILURE, "prctl(PR_FUTEX_HASH)"); + } + if (params->nbuckets == 0) { + ret = asprintf(&futex_hash_mode, "Futex hashing: global hash"); + } else { + ret = prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_IMMUTABLE); + if (ret < 0) { + printf("Can't check if the hash is immutable: %m\n"); + err(EXIT_FAILURE, "prctl(PR_FUTEX_HASH)"); + } + ret = asprintf(&futex_hash_mode, "Futex hashing: %d hash buckets %s", + params->nbuckets, + ret == 1 ? "(immutable)" : ""); + } + } else { + if (ret <= 0) { + ret = asprintf(&futex_hash_mode, "Futex hashing: global hash"); + } else { + ret = asprintf(&futex_hash_mode, "Futex hashing: auto resized to %d buckets", + ret); + } + } + if (ret < 0) + err(EXIT_FAILURE, "ENOMEM, futex_hash_mode"); + printf("%s\n", futex_hash_mode); + free(futex_hash_mode); +} diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h index ebdc2b032afc..9c9a73f9d865 100644 --- a/tools/perf/bench/futex.h +++ b/tools/perf/bench/futex.h @@ -25,6 +25,8 @@ struct bench_futex_parameters { unsigned int nfutexes; unsigned int nwakes; unsigned int nrequeue; + int nbuckets; + bool buckets_immutable; }; /** @@ -143,4 +145,7 @@ futex_cmp_requeue_pi(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, val, opflags); } +void futex_set_nbuckets_param(struct bench_futex_parameters *params); +void futex_print_nbuckets(struct bench_futex_parameters *params); + #endif /* _FUTEX_H */ diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 857f6646cc23..e9fab20e9330 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -186,7 +186,7 @@ done # diff with extra ignore lines check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))" -I"^#include <linux/cfi_types.h>"' check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"' -check arch/x86/include/asm/amd-ibs.h '-I "^#include [<\"]\(asm/\)*msr-index.h"' +check arch/x86/include/asm/amd/ibs.h '-I "^#include [<\"]\(asm/\)*msr-index.h"' check arch/arm64/include/asm/cputype.h '-I "^#include [<\"]\(asm/\)*sysreg.h"' check include/linux/unaligned.h '-I "^#include <linux/unaligned/packed_struct.h>" -I "^#include <asm/byteorder.h>" -I "^#pragma GCC diagnostic"' check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"' diff --git a/tools/perf/util/amd-sample-raw.c b/tools/perf/util/amd-sample-raw.c index 9d0ce88e90e4..456ce64ad822 100644 --- a/tools/perf/util/amd-sample-raw.c +++ b/tools/perf/util/amd-sample-raw.c @@ -9,7 +9,7 @@ #include <inttypes.h> #include <linux/string.h> -#include "../../arch/x86/include/asm/amd-ibs.h" +#include "../../arch/x86/include/asm/amd/ibs.h" #include "debug.h" #include "session.h" diff --git a/tools/testing/crypto/chacha20-s390/test-cipher.c b/tools/testing/crypto/chacha20-s390/test-cipher.c index 35ea65c54ffa..827507844e8f 100644 --- a/tools/testing/crypto/chacha20-s390/test-cipher.c +++ b/tools/testing/crypto/chacha20-s390/test-cipher.c @@ -50,7 +50,7 @@ struct skcipher_def { /* Perform cipher operations with the chacha lib */ static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 iv[16], key[32]; u64 start, end; @@ -66,10 +66,10 @@ static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain) } /* Encrypt */ - chacha_init(chacha_state, (u32 *)key, iv); + chacha_init(&chacha_state, (u32 *)key, iv); start = ktime_get_ns(); - chacha_crypt_arch(chacha_state, cipher, plain, data_size, 20); + chacha_crypt_arch(&chacha_state, cipher, plain, data_size, 20); end = ktime_get_ns(); @@ -81,10 +81,10 @@ static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain) pr_info("lib encryption took: %lld nsec", end - start); /* Decrypt */ - chacha_init(chacha_state, (u32 *)key, iv); + chacha_init(&chacha_state, (u32 *)key, iv); start = ktime_get_ns(); - chacha_crypt_arch(chacha_state, revert, cipher, data_size, 20); + chacha_crypt_arch(&chacha_state, revert, cipher, data_size, 20); end = ktime_get_ns(); if (debug) diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config index 422e186cf3cf..e70c502a16df 100644 --- a/tools/testing/kunit/configs/all_tests.config +++ b/tools/testing/kunit/configs/all_tests.config @@ -10,6 +10,7 @@ CONFIG_KUNIT_EXAMPLE_TEST=y CONFIG_KUNIT_ALL_TESTS=y CONFIG_FORTIFY_SOURCE=y +CONFIG_INIT_STACK_ALL_PATTERN=y CONFIG_IIO=y diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py index 10ff65689dd8..80fa4e354a17 100644 --- a/tools/testing/kunit/kunit_json.py +++ b/tools/testing/kunit/kunit_json.py @@ -39,10 +39,20 @@ def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj: status = _status_map.get(subtest.status, "FAIL") test_cases.append({"name": subtest.name, "status": status}) + test_counts = test.counts + counts_json = { + "tests": test_counts.total(), + "passed": test_counts.passed, + "failed": test_counts.failed, + "crashed": test_counts.crashed, + "skipped": test_counts.skipped, + "errors": test_counts.errors, + } test_group = { "name": test.name, "sub_groups": sub_groups, "test_cases": test_cases, + "misc": counts_json } test_group.update(common_fields) return test_group diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index d3f39bc1ceec..260d8d9aa1db 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -14,6 +14,7 @@ import os import shlex import shutil import signal +import sys import threading from typing import Iterator, List, Optional, Tuple from types import FrameType @@ -201,6 +202,13 @@ def _default_qemu_config_path(arch: str) -> str: return config_path options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')] + + if arch == 'help': + print('um') + for option in options: + print(option) + sys.exit() + raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options))) def _get_qemu_ops(config_path: str, diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py index 7ec38d4131f7..5b4c895d5d5a 100644 --- a/tools/testing/kunit/qemu_configs/powerpc.py +++ b/tools/testing/kunit/qemu_configs/powerpc.py @@ -3,6 +3,7 @@ from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='powerpc', kconfig=''' CONFIG_PPC64=y +CONFIG_CPU_BIG_ENDIAN=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_HVC_CONSOLE=y''', diff --git a/tools/testing/kunit/qemu_configs/powerpc32.py b/tools/testing/kunit/qemu_configs/powerpc32.py new file mode 100644 index 000000000000..88bd60dbb948 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/powerpc32.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 + +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='powerpc', + kconfig=''' +CONFIG_PPC32=y +CONFIG_CPU_BIG_ENDIAN=y +CONFIG_ADB_CUDA=y +CONFIG_SERIAL_PMACZILOG=y +CONFIG_SERIAL_PMACZILOG_TTYS=y +CONFIG_SERIAL_PMACZILOG_CONSOLE=y +''', + qemu_arch='ppc', + kernel_path='vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=['-M', 'g3beige', '-cpu', 'max']) diff --git a/tools/testing/kunit/qemu_configs/powerpcle.py b/tools/testing/kunit/qemu_configs/powerpcle.py new file mode 100644 index 000000000000..7ddee8af4bd7 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/powerpcle.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='powerpc', + kconfig=''' +CONFIG_PPC64=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_HVC_CONSOLE=y +''', + qemu_arch='ppc64', + kernel_path='vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=['-M', 'pseries', '-cpu', 'power8']) diff --git a/tools/testing/kunit/qemu_configs/riscv32.py b/tools/testing/kunit/qemu_configs/riscv32.py new file mode 100644 index 000000000000..b79ba0ae30f8 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/riscv32.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 + +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='riscv', + kconfig=''' +CONFIG_NONPORTABLE=y +CONFIG_ARCH_RV32I=y +CONFIG_ARCH_VIRT=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +''', + qemu_arch='riscv32', + kernel_path='arch/riscv/boot/Image', + kernel_command_line='console=ttyS0', + extra_qemu_params=['-machine', 'virt']) diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py index 256d9573b446..2019550a1b69 100644 --- a/tools/testing/kunit/qemu_configs/sparc.py +++ b/tools/testing/kunit/qemu_configs/sparc.py @@ -2,6 +2,8 @@ from ..qemu_config import QemuArchParams QEMU_ARCH = QemuArchParams(linux_arch='sparc', kconfig=''' +CONFIG_KUNIT_FAULT_TEST=n +CONFIG_SPARC32=y CONFIG_SERIAL_SUNZILOG=y CONFIG_SERIAL_SUNZILOG_CONSOLE=y ''', diff --git a/tools/testing/kunit/qemu_configs/sparc64.py b/tools/testing/kunit/qemu_configs/sparc64.py new file mode 100644 index 000000000000..53d4e5a8c972 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/sparc64.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='sparc', + kconfig=''' +CONFIG_64BIT=y +CONFIG_SPARC64=y +CONFIG_PCI=y +CONFIG_SERIAL_SUNSU=y +CONFIG_SERIAL_SUNSU_CONSOLE=y +''', + qemu_arch='sparc64', + kernel_path='arch/sparc/boot/image', + kernel_command_line='console=ttyS0 kunit_shutdown=poweroff', + extra_qemu_params=[]) diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 1bd403a5ef7b..0fd8c9b0d38f 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -526,6 +526,12 @@ extern const struct bench bench_trig_uprobe_multi_push; extern const struct bench bench_trig_uretprobe_multi_push; extern const struct bench bench_trig_uprobe_multi_ret; extern const struct bench bench_trig_uretprobe_multi_ret; +#ifdef __x86_64__ +extern const struct bench bench_trig_uprobe_nop5; +extern const struct bench bench_trig_uretprobe_nop5; +extern const struct bench bench_trig_uprobe_multi_nop5; +extern const struct bench bench_trig_uretprobe_multi_nop5; +#endif extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; @@ -586,6 +592,12 @@ static const struct bench *benchs[] = { &bench_trig_uretprobe_multi_push, &bench_trig_uprobe_multi_ret, &bench_trig_uretprobe_multi_ret, +#ifdef __x86_64__ + &bench_trig_uprobe_nop5, + &bench_trig_uretprobe_nop5, + &bench_trig_uprobe_multi_nop5, + &bench_trig_uretprobe_multi_nop5, +#endif /* ringbuf/perfbuf benchmarks */ &bench_rb_libbpf, &bench_rb_custom, diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index 32e9f194d449..82327657846e 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -333,6 +333,20 @@ static void *uprobe_producer_ret(void *input) return NULL; } +#ifdef __x86_64__ +__nocf_check __weak void uprobe_target_nop5(void) +{ + asm volatile (".byte 0x0f, 0x1f, 0x44, 0x00, 0x00"); +} + +static void *uprobe_producer_nop5(void *input) +{ + while (true) + uprobe_target_nop5(); + return NULL; +} +#endif + static void usetup(bool use_retprobe, bool use_multi, void *target_addr) { size_t uprobe_offset; @@ -448,6 +462,28 @@ static void uretprobe_multi_ret_setup(void) usetup(true, true /* use_multi */, &uprobe_target_ret); } +#ifdef __x86_64__ +static void uprobe_nop5_setup(void) +{ + usetup(false, false /* !use_multi */, &uprobe_target_nop5); +} + +static void uretprobe_nop5_setup(void) +{ + usetup(true, false /* !use_multi */, &uprobe_target_nop5); +} + +static void uprobe_multi_nop5_setup(void) +{ + usetup(false, true /* use_multi */, &uprobe_target_nop5); +} + +static void uretprobe_multi_nop5_setup(void) +{ + usetup(true, true /* use_multi */, &uprobe_target_nop5); +} +#endif + const struct bench bench_trig_syscall_count = { .name = "trig-syscall-count", .validate = trigger_validate, @@ -506,3 +542,9 @@ BENCH_TRIG_USERMODE(uprobe_multi_ret, ret, "uprobe-multi-ret"); BENCH_TRIG_USERMODE(uretprobe_multi_nop, nop, "uretprobe-multi-nop"); BENCH_TRIG_USERMODE(uretprobe_multi_push, push, "uretprobe-multi-push"); BENCH_TRIG_USERMODE(uretprobe_multi_ret, ret, "uretprobe-multi-ret"); +#ifdef __x86_64__ +BENCH_TRIG_USERMODE(uprobe_nop5, nop5, "uprobe-nop5"); +BENCH_TRIG_USERMODE(uretprobe_nop5, nop5, "uretprobe-nop5"); +BENCH_TRIG_USERMODE(uprobe_multi_nop5, nop5, "uprobe-multi-nop5"); +BENCH_TRIG_USERMODE(uretprobe_multi_nop5, nop5, "uretprobe-multi-nop5"); +#endif diff --git a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh index af169f831f2f..03f55405484b 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh @@ -2,7 +2,7 @@ set -eufo pipefail -for i in usermode-count syscall-count {uprobe,uretprobe}-{nop,push,ret} +for i in usermode-count syscall-count {uprobe,uretprobe}-{nop,push,ret,nop5} do summary=$(sudo ./bench -w2 -d5 -a trig-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-) printf "%-15s: %s\n" $i "$summary" diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64 index 3720b7611523..e1495a4bbc99 100644 --- a/tools/testing/selftests/bpf/config.aarch64 +++ b/tools/testing/selftests/bpf/config.aarch64 @@ -158,7 +158,6 @@ CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TUN=y CONFIG_UNIX=y CONFIG_UPROBES=y -CONFIG_USELIB=y CONFIG_USER_NS=y CONFIG_VETH=y CONFIG_VLAN_8021Q=y diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x index 706931a8c2c6..26c3bc2ce11d 100644 --- a/tools/testing/selftests/bpf/config.s390x +++ b/tools/testing/selftests/bpf/config.s390x @@ -128,7 +128,6 @@ CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TUN=y CONFIG_UNIX=y CONFIG_UPROBES=y -CONFIG_USELIB=y CONFIG_USER_NS=y CONFIG_VETH=y CONFIG_VLAN_8021Q=y diff --git a/tools/testing/selftests/coredump/stackdump_test.c b/tools/testing/selftests/coredump/stackdump_test.c index 137b2364a082..9984413be9f0 100644 --- a/tools/testing/selftests/coredump/stackdump_test.c +++ b/tools/testing/selftests/coredump/stackdump_test.c @@ -1,14 +1,20 @@ // SPDX-License-Identifier: GPL-2.0 #include <fcntl.h> +#include <inttypes.h> #include <libgen.h> #include <linux/limits.h> #include <pthread.h> #include <string.h> +#include <sys/mount.h> #include <sys/resource.h> +#include <sys/stat.h> +#include <sys/socket.h> +#include <sys/un.h> #include <unistd.h> #include "../kselftest_harness.h" +#include "../pidfd/pidfd.h" #define STACKDUMP_FILE "stack_values" #define STACKDUMP_SCRIPT "stackdump" @@ -35,6 +41,7 @@ static void crashing_child(void) FIXTURE(coredump) { char original_core_pattern[256]; + pid_t pid_coredump_server; }; FIXTURE_SETUP(coredump) @@ -44,6 +51,7 @@ FIXTURE_SETUP(coredump) char *dir; int ret; + self->pid_coredump_server = -ESRCH; file = fopen("/proc/sys/kernel/core_pattern", "r"); ASSERT_NE(NULL, file); @@ -61,10 +69,17 @@ FIXTURE_TEARDOWN(coredump) { const char *reason; FILE *file; - int ret; + int ret, status; unlink(STACKDUMP_FILE); + if (self->pid_coredump_server > 0) { + kill(self->pid_coredump_server, SIGTERM); + waitpid(self->pid_coredump_server, &status, 0); + } + unlink("/tmp/coredump.file"); + unlink("/tmp/coredump.socket"); + file = fopen("/proc/sys/kernel/core_pattern", "w"); if (!file) { reason = "Unable to open core_pattern"; @@ -89,14 +104,14 @@ fail: fprintf(stderr, "Failed to cleanup stackdump test: %s\n", reason); } -TEST_F(coredump, stackdump) +TEST_F_TIMEOUT(coredump, stackdump, 120) { struct sigaction action = {}; unsigned long long stack; char *test_dir, *line; size_t line_length; char buf[PATH_MAX]; - int ret, i; + int ret, i, status; FILE *file; pid_t pid; @@ -129,6 +144,10 @@ TEST_F(coredump, stackdump) /* * Step 3: Wait for the stackdump script to write the stack pointers to the stackdump file */ + waitpid(pid, &status, 0); + ASSERT_TRUE(WIFSIGNALED(status)); + ASSERT_TRUE(WCOREDUMP(status)); + for (i = 0; i < 10; ++i) { file = fopen(STACKDUMP_FILE, "r"); if (file) @@ -138,14 +157,466 @@ TEST_F(coredump, stackdump) ASSERT_NE(file, NULL); /* Step 4: Make sure all stack pointer values are non-zero */ + line = NULL; for (i = 0; -1 != getline(&line, &line_length, file); ++i) { stack = strtoull(line, NULL, 10); ASSERT_NE(stack, 0); } + free(line); ASSERT_EQ(i, 1 + NUM_THREAD_SPAWN); fclose(file); } +TEST_F(coredump, socket) +{ + int fd, pidfd, ret, status; + FILE *file; + pid_t pid, pid_coredump_server; + struct stat st; + char core_file[PATH_MAX]; + struct pidfd_info info = {}; + int ipc_sockets[2]; + char c; + const struct sockaddr_un coredump_sk = { + .sun_family = AF_UNIX, + .sun_path = "/tmp/coredump.socket", + }; + size_t coredump_sk_len = offsetof(struct sockaddr_un, sun_path) + + sizeof("/tmp/coredump.socket"); + + ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets); + ASSERT_EQ(ret, 0); + + file = fopen("/proc/sys/kernel/core_pattern", "w"); + ASSERT_NE(file, NULL); + + ret = fprintf(file, "@/tmp/coredump.socket"); + ASSERT_EQ(ret, strlen("@/tmp/coredump.socket")); + ASSERT_EQ(fclose(file), 0); + + pid_coredump_server = fork(); + ASSERT_GE(pid_coredump_server, 0); + if (pid_coredump_server == 0) { + int fd_server, fd_coredump, fd_peer_pidfd, fd_core_file; + socklen_t fd_peer_pidfd_len; + + close(ipc_sockets[0]); + + fd_server = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0); + if (fd_server < 0) + _exit(EXIT_FAILURE); + + ret = bind(fd_server, (const struct sockaddr *)&coredump_sk, coredump_sk_len); + if (ret < 0) { + fprintf(stderr, "Failed to bind coredump socket\n"); + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + ret = listen(fd_server, 1); + if (ret < 0) { + fprintf(stderr, "Failed to listen on coredump socket\n"); + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + if (write_nointr(ipc_sockets[1], "1", 1) < 0) { + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + close(ipc_sockets[1]); + + fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC); + if (fd_coredump < 0) { + fprintf(stderr, "Failed to accept coredump socket connection\n"); + close(fd_server); + _exit(EXIT_FAILURE); + } + + fd_peer_pidfd_len = sizeof(fd_peer_pidfd); + ret = getsockopt(fd_coredump, SOL_SOCKET, SO_PEERPIDFD, + &fd_peer_pidfd, &fd_peer_pidfd_len); + if (ret < 0) { + fprintf(stderr, "%m - Failed to retrieve peer pidfd for coredump socket connection\n"); + close(fd_coredump); + close(fd_server); + _exit(EXIT_FAILURE); + } + + memset(&info, 0, sizeof(info)); + info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP; + ret = ioctl(fd_peer_pidfd, PIDFD_GET_INFO, &info); + if (ret < 0) { + fprintf(stderr, "Failed to retrieve pidfd info from peer pidfd for coredump socket connection\n"); + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + _exit(EXIT_FAILURE); + } + + if (!(info.mask & PIDFD_INFO_COREDUMP)) { + fprintf(stderr, "Missing coredump information from coredumping task\n"); + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + _exit(EXIT_FAILURE); + } + + if (!(info.coredump_mask & PIDFD_COREDUMPED)) { + fprintf(stderr, "Received connection from non-coredumping task\n"); + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + _exit(EXIT_FAILURE); + } + + fd_core_file = creat("/tmp/coredump.file", 0644); + if (fd_core_file < 0) { + fprintf(stderr, "Failed to create coredump file\n"); + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + _exit(EXIT_FAILURE); + } + + for (;;) { + char buffer[4096]; + ssize_t bytes_read, bytes_write; + + bytes_read = read(fd_coredump, buffer, sizeof(buffer)); + if (bytes_read < 0) { + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + close(fd_core_file); + _exit(EXIT_FAILURE); + } + + if (bytes_read == 0) + break; + + bytes_write = write(fd_core_file, buffer, bytes_read); + if (bytes_read != bytes_write) { + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + close(fd_core_file); + _exit(EXIT_FAILURE); + } + } + + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + close(fd_core_file); + _exit(EXIT_SUCCESS); + } + self->pid_coredump_server = pid_coredump_server; + + EXPECT_EQ(close(ipc_sockets[1]), 0); + ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1); + EXPECT_EQ(close(ipc_sockets[0]), 0); + + pid = fork(); + ASSERT_GE(pid, 0); + if (pid == 0) + crashing_child(); + + pidfd = sys_pidfd_open(pid, 0); + ASSERT_GE(pidfd, 0); + + waitpid(pid, &status, 0); + ASSERT_TRUE(WIFSIGNALED(status)); + ASSERT_TRUE(WCOREDUMP(status)); + + info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP; + ASSERT_EQ(ioctl(pidfd, PIDFD_GET_INFO, &info), 0); + ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0); + ASSERT_GT((info.coredump_mask & PIDFD_COREDUMPED), 0); + + waitpid(pid_coredump_server, &status, 0); + self->pid_coredump_server = -ESRCH; + ASSERT_TRUE(WIFEXITED(status)); + ASSERT_EQ(WEXITSTATUS(status), 0); + + ASSERT_EQ(stat("/tmp/coredump.file", &st), 0); + ASSERT_GT(st.st_size, 0); + /* + * We should somehow validate the produced core file. + * For now just allow for visual inspection + */ + system("file /tmp/coredump.file"); +} + +TEST_F(coredump, socket_detect_userspace_client) +{ + int fd, pidfd, ret, status; + FILE *file; + pid_t pid, pid_coredump_server; + struct stat st; + char core_file[PATH_MAX]; + struct pidfd_info info = {}; + int ipc_sockets[2]; + char c; + const struct sockaddr_un coredump_sk = { + .sun_family = AF_UNIX, + .sun_path = "/tmp/coredump.socket", + }; + size_t coredump_sk_len = offsetof(struct sockaddr_un, sun_path) + + sizeof("/tmp/coredump.socket"); + + file = fopen("/proc/sys/kernel/core_pattern", "w"); + ASSERT_NE(file, NULL); + + ret = fprintf(file, "@/tmp/coredump.socket"); + ASSERT_EQ(ret, strlen("@/tmp/coredump.socket")); + ASSERT_EQ(fclose(file), 0); + + ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets); + ASSERT_EQ(ret, 0); + + pid_coredump_server = fork(); + ASSERT_GE(pid_coredump_server, 0); + if (pid_coredump_server == 0) { + int fd_server, fd_coredump, fd_peer_pidfd, fd_core_file; + socklen_t fd_peer_pidfd_len; + + close(ipc_sockets[0]); + + fd_server = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0); + if (fd_server < 0) + _exit(EXIT_FAILURE); + + ret = bind(fd_server, (const struct sockaddr *)&coredump_sk, coredump_sk_len); + if (ret < 0) { + fprintf(stderr, "Failed to bind coredump socket\n"); + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + ret = listen(fd_server, 1); + if (ret < 0) { + fprintf(stderr, "Failed to listen on coredump socket\n"); + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + if (write_nointr(ipc_sockets[1], "1", 1) < 0) { + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + close(ipc_sockets[1]); + + fd_coredump = accept4(fd_server, NULL, NULL, SOCK_CLOEXEC); + if (fd_coredump < 0) { + fprintf(stderr, "Failed to accept coredump socket connection\n"); + close(fd_server); + _exit(EXIT_FAILURE); + } + + fd_peer_pidfd_len = sizeof(fd_peer_pidfd); + ret = getsockopt(fd_coredump, SOL_SOCKET, SO_PEERPIDFD, + &fd_peer_pidfd, &fd_peer_pidfd_len); + if (ret < 0) { + fprintf(stderr, "%m - Failed to retrieve peer pidfd for coredump socket connection\n"); + close(fd_coredump); + close(fd_server); + _exit(EXIT_FAILURE); + } + + memset(&info, 0, sizeof(info)); + info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP; + ret = ioctl(fd_peer_pidfd, PIDFD_GET_INFO, &info); + if (ret < 0) { + fprintf(stderr, "Failed to retrieve pidfd info from peer pidfd for coredump socket connection\n"); + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + _exit(EXIT_FAILURE); + } + + if (!(info.mask & PIDFD_INFO_COREDUMP)) { + fprintf(stderr, "Missing coredump information from coredumping task\n"); + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + _exit(EXIT_FAILURE); + } + + if (info.coredump_mask & PIDFD_COREDUMPED) { + fprintf(stderr, "Received unexpected connection from coredumping task\n"); + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + _exit(EXIT_FAILURE); + } + + close(fd_coredump); + close(fd_server); + close(fd_peer_pidfd); + close(fd_core_file); + _exit(EXIT_SUCCESS); + } + self->pid_coredump_server = pid_coredump_server; + + EXPECT_EQ(close(ipc_sockets[1]), 0); + ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1); + EXPECT_EQ(close(ipc_sockets[0]), 0); + + pid = fork(); + ASSERT_GE(pid, 0); + if (pid == 0) { + int fd_socket; + ssize_t ret; + + fd_socket = socket(AF_UNIX, SOCK_STREAM, 0); + if (fd_socket < 0) + _exit(EXIT_FAILURE); + + + ret = connect(fd_socket, (const struct sockaddr *)&coredump_sk, coredump_sk_len); + if (ret < 0) + _exit(EXIT_FAILURE); + + (void *)write(fd_socket, &(char){ 0 }, 1); + close(fd_socket); + _exit(EXIT_SUCCESS); + } + + pidfd = sys_pidfd_open(pid, 0); + ASSERT_GE(pidfd, 0); + + waitpid(pid, &status, 0); + ASSERT_TRUE(WIFEXITED(status)); + ASSERT_EQ(WEXITSTATUS(status), 0); + + info.mask = PIDFD_INFO_EXIT | PIDFD_INFO_COREDUMP; + ASSERT_EQ(ioctl(pidfd, PIDFD_GET_INFO, &info), 0); + ASSERT_GT((info.mask & PIDFD_INFO_COREDUMP), 0); + ASSERT_EQ((info.coredump_mask & PIDFD_COREDUMPED), 0); + + waitpid(pid_coredump_server, &status, 0); + self->pid_coredump_server = -ESRCH; + ASSERT_TRUE(WIFEXITED(status)); + ASSERT_EQ(WEXITSTATUS(status), 0); + + ASSERT_NE(stat("/tmp/coredump.file", &st), 0); + ASSERT_EQ(errno, ENOENT); +} + +TEST_F(coredump, socket_enoent) +{ + int pidfd, ret, status; + FILE *file; + pid_t pid; + char core_file[PATH_MAX]; + + file = fopen("/proc/sys/kernel/core_pattern", "w"); + ASSERT_NE(file, NULL); + + ret = fprintf(file, "@/tmp/coredump.socket"); + ASSERT_EQ(ret, strlen("@/tmp/coredump.socket")); + ASSERT_EQ(fclose(file), 0); + + pid = fork(); + ASSERT_GE(pid, 0); + if (pid == 0) + crashing_child(); + + pidfd = sys_pidfd_open(pid, 0); + ASSERT_GE(pidfd, 0); + + waitpid(pid, &status, 0); + ASSERT_TRUE(WIFSIGNALED(status)); + ASSERT_FALSE(WCOREDUMP(status)); +} + +TEST_F(coredump, socket_no_listener) +{ + int pidfd, ret, status; + FILE *file; + pid_t pid, pid_coredump_server; + int ipc_sockets[2]; + char c; + const struct sockaddr_un coredump_sk = { + .sun_family = AF_UNIX, + .sun_path = "/tmp/coredump.socket", + }; + size_t coredump_sk_len = offsetof(struct sockaddr_un, sun_path) + + sizeof("/tmp/coredump.socket"); + + ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets); + ASSERT_EQ(ret, 0); + + file = fopen("/proc/sys/kernel/core_pattern", "w"); + ASSERT_NE(file, NULL); + + ret = fprintf(file, "@/tmp/coredump.socket"); + ASSERT_EQ(ret, strlen("@/tmp/coredump.socket")); + ASSERT_EQ(fclose(file), 0); + + pid_coredump_server = fork(); + ASSERT_GE(pid_coredump_server, 0); + if (pid_coredump_server == 0) { + int fd_server; + socklen_t fd_peer_pidfd_len; + + close(ipc_sockets[0]); + + fd_server = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0); + if (fd_server < 0) + _exit(EXIT_FAILURE); + + ret = bind(fd_server, (const struct sockaddr *)&coredump_sk, coredump_sk_len); + if (ret < 0) { + fprintf(stderr, "Failed to bind coredump socket\n"); + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + if (write_nointr(ipc_sockets[1], "1", 1) < 0) { + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_FAILURE); + } + + close(fd_server); + close(ipc_sockets[1]); + _exit(EXIT_SUCCESS); + } + self->pid_coredump_server = pid_coredump_server; + + EXPECT_EQ(close(ipc_sockets[1]), 0); + ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1); + EXPECT_EQ(close(ipc_sockets[0]), 0); + + pid = fork(); + ASSERT_GE(pid, 0); + if (pid == 0) + crashing_child(); + + pidfd = sys_pidfd_open(pid, 0); + ASSERT_GE(pidfd, 0); + + waitpid(pid, &status, 0); + ASSERT_TRUE(WIFSIGNALED(status)); + ASSERT_FALSE(WCOREDUMP(status)); + + waitpid(pid_coredump_server, &status, 0); + self->pid_coredump_server = -ESRCH; + ASSERT_TRUE(WIFEXITED(status)); + ASSERT_EQ(WEXITSTATUS(status), 0); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/cpufreq/cpufreq.sh b/tools/testing/selftests/cpufreq/cpufreq.sh index e350c521b467..9927b654fb8f 100755 --- a/tools/testing/selftests/cpufreq/cpufreq.sh +++ b/tools/testing/selftests/cpufreq/cpufreq.sh @@ -52,7 +52,14 @@ read_cpufreq_files_in_dir() for file in $files; do if [ -f $1/$file ]; then printf "$file:" - cat $1/$file + #file is readable ? + local rfile=$(ls -l $1/$file | awk '$1 ~ /^.*r.*/ { print $NF; }') + + if [ ! -z $rfile ]; then + cat $1/$file + else + printf "$file is not readable\n" + fi else printf "\n" read_cpufreq_files_in_dir "$1/$file" @@ -83,10 +90,10 @@ update_cpufreq_files_in_dir() for file in $files; do if [ -f $1/$file ]; then - # file is writable ? - local wfile=$(ls -l $1/$file | awk '$1 ~ /^.*w.*/ { print $NF; }') + # file is readable and writable ? + local rwfile=$(ls -l $1/$file | awk '$1 ~ /^.*rw.*/ { print $NF; }') - if [ ! -z $wfile ]; then + if [ ! -z $rwfile ]; then # scaling_setspeed is a special file and we # should skip updating it if [ $file != "scaling_setspeed" ]; then @@ -244,9 +251,10 @@ do_suspend() printf "Failed to suspend using RTC wake alarm\n" return 1 fi + else + echo $filename > $SYSFS/power/state fi - echo $filename > $SYSFS/power/state printf "Came out of $1\n" printf "Do basic tests after finishing $1 to verify cpufreq state\n\n" diff --git a/tools/testing/selftests/filesystems/.gitignore b/tools/testing/selftests/filesystems/.gitignore index 828b66a10c63..7afa58e2bb20 100644 --- a/tools/testing/selftests/filesystems/.gitignore +++ b/tools/testing/selftests/filesystems/.gitignore @@ -2,3 +2,4 @@ dnotify_test devpts_pts file_stressor +anon_inode_test diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile index 66305fc34c60..b02326193fee 100644 --- a/tools/testing/selftests/filesystems/Makefile +++ b/tools/testing/selftests/filesystems/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS += $(KHDR_INCLUDES) -TEST_GEN_PROGS := devpts_pts file_stressor +TEST_GEN_PROGS := devpts_pts file_stressor anon_inode_test TEST_GEN_PROGS_EXTENDED := dnotify_test include ../lib.mk diff --git a/tools/testing/selftests/filesystems/anon_inode_test.c b/tools/testing/selftests/filesystems/anon_inode_test.c new file mode 100644 index 000000000000..e8e0ef1460d2 --- /dev/null +++ b/tools/testing/selftests/filesystems/anon_inode_test.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ + +#include <fcntl.h> +#include <stdio.h> +#include <sys/stat.h> + +#include "../kselftest_harness.h" +#include "overlayfs/wrappers.h" + +TEST(anon_inode_no_chown) +{ + int fd_context; + + fd_context = sys_fsopen("tmpfs", 0); + ASSERT_GE(fd_context, 0); + + ASSERT_LT(fchown(fd_context, 1234, 5678), 0); + ASSERT_EQ(errno, EOPNOTSUPP); + + EXPECT_EQ(close(fd_context), 0); +} + +TEST(anon_inode_no_chmod) +{ + int fd_context; + + fd_context = sys_fsopen("tmpfs", 0); + ASSERT_GE(fd_context, 0); + + ASSERT_LT(fchmod(fd_context, 0777), 0); + ASSERT_EQ(errno, EOPNOTSUPP); + + EXPECT_EQ(close(fd_context), 0); +} + +TEST(anon_inode_no_exec) +{ + int fd_context; + + fd_context = sys_fsopen("tmpfs", 0); + ASSERT_GE(fd_context, 0); + + ASSERT_LT(execveat(fd_context, "", NULL, NULL, AT_EMPTY_PATH), 0); + ASSERT_EQ(errno, EACCES); + + EXPECT_EQ(close(fd_context), 0); +} + +TEST(anon_inode_no_open) +{ + int fd_context; + + fd_context = sys_fsopen("tmpfs", 0); + ASSERT_GE(fd_context, 0); + + ASSERT_GE(dup2(fd_context, 500), 0); + ASSERT_EQ(close(fd_context), 0); + fd_context = 500; + + ASSERT_LT(open("/proc/self/fd/500", 0), 0); + ASSERT_EQ(errno, ENXIO); + + EXPECT_EQ(close(fd_context), 0); +} + +TEST_HARNESS_MAIN + diff --git a/tools/testing/selftests/filesystems/mount-notify/.gitignore b/tools/testing/selftests/filesystems/mount-notify/.gitignore index 82a4846cbc4b..124339ea7845 100644 --- a/tools/testing/selftests/filesystems/mount-notify/.gitignore +++ b/tools/testing/selftests/filesystems/mount-notify/.gitignore @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only /*_test +/*_test_ns diff --git a/tools/testing/selftests/filesystems/mount-notify/Makefile b/tools/testing/selftests/filesystems/mount-notify/Makefile index 10be0227b5ae..836a4eb7be06 100644 --- a/tools/testing/selftests/filesystems/mount-notify/Makefile +++ b/tools/testing/selftests/filesystems/mount-notify/Makefile @@ -1,6 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-or-later -CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES) -TEST_GEN_PROGS := mount-notify_test +CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES) $(TOOLS_INCLUDES) +LDLIBS += -lcap + +TEST_GEN_PROGS := mount-notify_test mount-notify_test_ns include ../../lib.mk + +$(OUTPUT)/mount-notify_test: ../utils.c +$(OUTPUT)/mount-notify_test_ns: ../utils.c diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c index 59a71f22fb11..63ce708d93ed 100644 --- a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c +++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c @@ -8,43 +8,21 @@ #include <string.h> #include <sys/stat.h> #include <sys/mount.h> -#include <linux/fanotify.h> #include <unistd.h> -#include <sys/fanotify.h> #include <sys/syscall.h> #include "../../kselftest_harness.h" #include "../statmount/statmount.h" +#include "../utils.h" -#ifndef FAN_MNT_ATTACH -struct fanotify_event_info_mnt { - struct fanotify_event_info_header hdr; - __u64 mnt_id; -}; -#define FAN_MNT_ATTACH 0x01000000 /* Mount was attached */ -#endif - -#ifndef FAN_MNT_DETACH -#define FAN_MNT_DETACH 0x02000000 /* Mount was detached */ -#endif - -#ifndef FAN_REPORT_MNT -#define FAN_REPORT_MNT 0x00004000 /* Report mount events */ +// Needed for linux/fanotify.h +#ifndef __kernel_fsid_t +typedef struct { + int val[2]; +} __kernel_fsid_t; #endif -#ifndef FAN_MARK_MNTNS -#define FAN_MARK_MNTNS 0x00000110 -#endif - -static uint64_t get_mnt_id(struct __test_metadata *const _metadata, - const char *path) -{ - struct statx sx; - - ASSERT_EQ(statx(AT_FDCWD, path, 0, STATX_MNT_ID_UNIQUE, &sx), 0); - ASSERT_TRUE(!!(sx.stx_mask & STATX_MNT_ID_UNIQUE)); - return sx.stx_mnt_id; -} +#include <sys/fanotify.h> static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX"; @@ -94,7 +72,7 @@ FIXTURE_SETUP(fanotify) ASSERT_EQ(mkdir("b", 0700), 0); - self->root_id = get_mnt_id(_metadata, "/"); + self->root_id = get_unique_mnt_id("/"); ASSERT_NE(self->root_id, 0); for (i = 0; i < NUM_FAN_FDS; i++) { diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c new file mode 100644 index 000000000000..090a5ca65004 --- /dev/null +++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright (c) 2025 Miklos Szeredi <miklos@szeredi.hu> + +#define _GNU_SOURCE +#include <fcntl.h> +#include <sched.h> +#include <stdio.h> +#include <string.h> +#include <sys/stat.h> +#include <sys/mount.h> +#include <unistd.h> +#include <sys/syscall.h> + +#include "../../kselftest_harness.h" +#include "../../pidfd/pidfd.h" +#include "../statmount/statmount.h" +#include "../utils.h" + +// Needed for linux/fanotify.h +#ifndef __kernel_fsid_t +typedef struct { + int val[2]; +} __kernel_fsid_t; +#endif + +#include <sys/fanotify.h> + +static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX"; + +static const int mark_types[] = { + FAN_MARK_FILESYSTEM, + FAN_MARK_MOUNT, + FAN_MARK_INODE +}; + +static const int mark_cmds[] = { + FAN_MARK_ADD, + FAN_MARK_REMOVE, + FAN_MARK_FLUSH +}; + +#define NUM_FAN_FDS ARRAY_SIZE(mark_cmds) + +FIXTURE(fanotify) { + int fan_fd[NUM_FAN_FDS]; + char buf[256]; + unsigned int rem; + void *next; + char root_mntpoint[sizeof(root_mntpoint_templ)]; + int orig_root; + int orig_ns_fd; + int ns_fd; + uint64_t root_id; +}; + +FIXTURE_SETUP(fanotify) +{ + int i, ret; + + self->orig_ns_fd = open("/proc/self/ns/mnt", O_RDONLY); + ASSERT_GE(self->orig_ns_fd, 0); + + ret = setup_userns(); + ASSERT_EQ(ret, 0); + + self->ns_fd = open("/proc/self/ns/mnt", O_RDONLY); + ASSERT_GE(self->ns_fd, 0); + + strcpy(self->root_mntpoint, root_mntpoint_templ); + ASSERT_NE(mkdtemp(self->root_mntpoint), NULL); + + self->orig_root = open("/", O_PATH | O_CLOEXEC); + ASSERT_GE(self->orig_root, 0); + + ASSERT_EQ(mount("tmpfs", self->root_mntpoint, "tmpfs", 0, NULL), 0); + + ASSERT_EQ(chroot(self->root_mntpoint), 0); + + ASSERT_EQ(chdir("/"), 0); + + ASSERT_EQ(mkdir("a", 0700), 0); + + ASSERT_EQ(mkdir("b", 0700), 0); + + self->root_id = get_unique_mnt_id("/"); + ASSERT_NE(self->root_id, 0); + + for (i = 0; i < NUM_FAN_FDS; i++) { + int fan_fd = fanotify_init(FAN_REPORT_FID, 0); + // Verify that watching tmpfs mounted inside userns is allowed + ret = fanotify_mark(fan_fd, FAN_MARK_ADD | mark_types[i], + FAN_OPEN, AT_FDCWD, "/"); + ASSERT_EQ(ret, 0); + // ...but watching entire orig root filesystem is not allowed + ret = fanotify_mark(fan_fd, FAN_MARK_ADD | FAN_MARK_FILESYSTEM, + FAN_OPEN, self->orig_root, "."); + ASSERT_NE(ret, 0); + close(fan_fd); + + self->fan_fd[i] = fanotify_init(FAN_REPORT_MNT | FAN_NONBLOCK, + 0); + ASSERT_GE(self->fan_fd[i], 0); + // Verify that watching mntns where group was created is allowed + ret = fanotify_mark(self->fan_fd[i], FAN_MARK_ADD | + FAN_MARK_MNTNS, + FAN_MNT_ATTACH | FAN_MNT_DETACH, + self->ns_fd, NULL); + ASSERT_EQ(ret, 0); + // ...but watching orig mntns is not allowed + ret = fanotify_mark(self->fan_fd[i], FAN_MARK_ADD | + FAN_MARK_MNTNS, + FAN_MNT_ATTACH | FAN_MNT_DETACH, + self->orig_ns_fd, NULL); + ASSERT_NE(ret, 0); + // On fd[0] we do an extra ADD that changes nothing. + // On fd[1]/fd[2] we REMOVE/FLUSH which removes the mark. + ret = fanotify_mark(self->fan_fd[i], mark_cmds[i] | + FAN_MARK_MNTNS, + FAN_MNT_ATTACH | FAN_MNT_DETACH, + self->ns_fd, NULL); + ASSERT_EQ(ret, 0); + } + + self->rem = 0; +} + +FIXTURE_TEARDOWN(fanotify) +{ + int i; + + ASSERT_EQ(self->rem, 0); + for (i = 0; i < NUM_FAN_FDS; i++) + close(self->fan_fd[i]); + + ASSERT_EQ(fchdir(self->orig_root), 0); + + ASSERT_EQ(chroot("."), 0); + + EXPECT_EQ(umount2(self->root_mntpoint, MNT_DETACH), 0); + EXPECT_EQ(chdir(self->root_mntpoint), 0); + EXPECT_EQ(chdir("/"), 0); + EXPECT_EQ(rmdir(self->root_mntpoint), 0); +} + +static uint64_t expect_notify(struct __test_metadata *const _metadata, + FIXTURE_DATA(fanotify) *self, + uint64_t *mask) +{ + struct fanotify_event_metadata *meta; + struct fanotify_event_info_mnt *mnt; + unsigned int thislen; + + if (!self->rem) { + ssize_t len; + int i; + + for (i = NUM_FAN_FDS - 1; i >= 0; i--) { + len = read(self->fan_fd[i], self->buf, + sizeof(self->buf)); + if (i > 0) { + // Groups 1,2 should get EAGAIN + ASSERT_EQ(len, -1); + ASSERT_EQ(errno, EAGAIN); + } else { + // Group 0 should get events + ASSERT_GT(len, 0); + } + } + + self->rem = len; + self->next = (void *) self->buf; + } + + meta = self->next; + ASSERT_TRUE(FAN_EVENT_OK(meta, self->rem)); + + thislen = meta->event_len; + self->rem -= thislen; + self->next += thislen; + + *mask = meta->mask; + thislen -= sizeof(*meta); + + mnt = ((void *) meta) + meta->event_len - thislen; + + ASSERT_EQ(thislen, sizeof(*mnt)); + + return mnt->mnt_id; +} + +static void expect_notify_n(struct __test_metadata *const _metadata, + FIXTURE_DATA(fanotify) *self, + unsigned int n, uint64_t mask[], uint64_t mnts[]) +{ + unsigned int i; + + for (i = 0; i < n; i++) + mnts[i] = expect_notify(_metadata, self, &mask[i]); +} + +static uint64_t expect_notify_mask(struct __test_metadata *const _metadata, + FIXTURE_DATA(fanotify) *self, + uint64_t expect_mask) +{ + uint64_t mntid, mask; + + mntid = expect_notify(_metadata, self, &mask); + ASSERT_EQ(expect_mask, mask); + + return mntid; +} + + +static void expect_notify_mask_n(struct __test_metadata *const _metadata, + FIXTURE_DATA(fanotify) *self, + uint64_t mask, unsigned int n, uint64_t mnts[]) +{ + unsigned int i; + + for (i = 0; i < n; i++) + mnts[i] = expect_notify_mask(_metadata, self, mask); +} + +static void verify_mount_ids(struct __test_metadata *const _metadata, + const uint64_t list1[], const uint64_t list2[], + size_t num) +{ + unsigned int i, j; + + // Check that neither list has any duplicates + for (i = 0; i < num; i++) { + for (j = 0; j < num; j++) { + if (i != j) { + ASSERT_NE(list1[i], list1[j]); + ASSERT_NE(list2[i], list2[j]); + } + } + } + // Check that all list1 memebers can be found in list2. Together with + // the above it means that the list1 and list2 represent the same sets. + for (i = 0; i < num; i++) { + for (j = 0; j < num; j++) { + if (list1[i] == list2[j]) + break; + } + ASSERT_NE(j, num); + } +} + +static void check_mounted(struct __test_metadata *const _metadata, + const uint64_t mnts[], size_t num) +{ + ssize_t ret; + uint64_t *list; + + list = malloc((num + 1) * sizeof(list[0])); + ASSERT_NE(list, NULL); + + ret = listmount(LSMT_ROOT, 0, 0, list, num + 1, 0); + ASSERT_EQ(ret, num); + + verify_mount_ids(_metadata, mnts, list, num); + + free(list); +} + +static void setup_mount_tree(struct __test_metadata *const _metadata, + int log2_num) +{ + int ret, i; + + ret = mount("", "/", NULL, MS_SHARED, NULL); + ASSERT_EQ(ret, 0); + + for (i = 0; i < log2_num; i++) { + ret = mount("/", "/", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + } +} + +TEST_F(fanotify, bind) +{ + int ret; + uint64_t mnts[2] = { self->root_id }; + + ret = mount("/", "/", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH); + ASSERT_NE(mnts[0], mnts[1]); + + check_mounted(_metadata, mnts, 2); + + // Cleanup + uint64_t detach_id; + ret = umount("/"); + ASSERT_EQ(ret, 0); + + detach_id = expect_notify_mask(_metadata, self, FAN_MNT_DETACH); + ASSERT_EQ(detach_id, mnts[1]); + + check_mounted(_metadata, mnts, 1); +} + +TEST_F(fanotify, move) +{ + int ret; + uint64_t mnts[2] = { self->root_id }; + uint64_t move_id; + + ret = mount("/", "/a", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH); + ASSERT_NE(mnts[0], mnts[1]); + + check_mounted(_metadata, mnts, 2); + + ret = move_mount(AT_FDCWD, "/a", AT_FDCWD, "/b", 0); + ASSERT_EQ(ret, 0); + + move_id = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH | FAN_MNT_DETACH); + ASSERT_EQ(move_id, mnts[1]); + + // Cleanup + ret = umount("/b"); + ASSERT_EQ(ret, 0); + + check_mounted(_metadata, mnts, 1); +} + +TEST_F(fanotify, propagate) +{ + const unsigned int log2_num = 4; + const unsigned int num = (1 << log2_num); + uint64_t mnts[num]; + + setup_mount_tree(_metadata, log2_num); + + expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, num - 1, mnts + 1); + + mnts[0] = self->root_id; + check_mounted(_metadata, mnts, num); + + // Cleanup + int ret; + uint64_t mnts2[num]; + ret = umount2("/", MNT_DETACH); + ASSERT_EQ(ret, 0); + + ret = mount("", "/", NULL, MS_PRIVATE, NULL); + ASSERT_EQ(ret, 0); + + mnts2[0] = self->root_id; + expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, num - 1, mnts2 + 1); + verify_mount_ids(_metadata, mnts, mnts2, num); + + check_mounted(_metadata, mnts, 1); +} + +TEST_F(fanotify, fsmount) +{ + int ret, fs, mnt; + uint64_t mnts[2] = { self->root_id }; + + fs = fsopen("tmpfs", 0); + ASSERT_GE(fs, 0); + + ret = fsconfig(fs, FSCONFIG_CMD_CREATE, 0, 0, 0); + ASSERT_EQ(ret, 0); + + mnt = fsmount(fs, 0, 0); + ASSERT_GE(mnt, 0); + + close(fs); + + ret = move_mount(mnt, "", AT_FDCWD, "/a", MOVE_MOUNT_F_EMPTY_PATH); + ASSERT_EQ(ret, 0); + + close(mnt); + + mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH); + ASSERT_NE(mnts[0], mnts[1]); + + check_mounted(_metadata, mnts, 2); + + // Cleanup + uint64_t detach_id; + ret = umount("/a"); + ASSERT_EQ(ret, 0); + + detach_id = expect_notify_mask(_metadata, self, FAN_MNT_DETACH); + ASSERT_EQ(detach_id, mnts[1]); + + check_mounted(_metadata, mnts, 1); +} + +TEST_F(fanotify, reparent) +{ + uint64_t mnts[6] = { self->root_id }; + uint64_t dmnts[3]; + uint64_t masks[3]; + unsigned int i; + int ret; + + // Create setup with a[1] -> b[2] propagation + ret = mount("/", "/a", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + ret = mount("", "/a", NULL, MS_SHARED, NULL); + ASSERT_EQ(ret, 0); + + ret = mount("/a", "/b", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + ret = mount("", "/b", NULL, MS_SLAVE, NULL); + ASSERT_EQ(ret, 0); + + expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 1); + + check_mounted(_metadata, mnts, 3); + + // Mount on a[3], which is propagated to b[4] + ret = mount("/", "/a", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 3); + + check_mounted(_metadata, mnts, 5); + + // Mount on b[5], not propagated + ret = mount("/", "/b", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + mnts[5] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH); + + check_mounted(_metadata, mnts, 6); + + // Umount a[3], which is propagated to b[4], but not b[5] + // This will result in b[5] "falling" on b[2] + ret = umount("/a"); + ASSERT_EQ(ret, 0); + + expect_notify_n(_metadata, self, 3, masks, dmnts); + verify_mount_ids(_metadata, mnts + 3, dmnts, 3); + + for (i = 0; i < 3; i++) { + if (dmnts[i] == mnts[5]) { + ASSERT_EQ(masks[i], FAN_MNT_ATTACH | FAN_MNT_DETACH); + } else { + ASSERT_EQ(masks[i], FAN_MNT_DETACH); + } + } + + mnts[3] = mnts[5]; + check_mounted(_metadata, mnts, 4); + + // Cleanup + ret = umount("/b"); + ASSERT_EQ(ret, 0); + + ret = umount("/a"); + ASSERT_EQ(ret, 0); + + ret = umount("/b"); + ASSERT_EQ(ret, 0); + + expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, 3, dmnts); + verify_mount_ids(_metadata, mnts + 1, dmnts, 3); + + check_mounted(_metadata, mnts, 1); +} + +TEST_F(fanotify, rmdir) +{ + uint64_t mnts[3] = { self->root_id }; + int ret; + + ret = mount("/", "/a", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + ret = mount("/", "/a/b", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 1); + + check_mounted(_metadata, mnts, 3); + + ret = chdir("/a"); + ASSERT_EQ(ret, 0); + + ret = fork(); + ASSERT_GE(ret, 0); + + if (ret == 0) { + chdir("/"); + unshare(CLONE_NEWNS); + mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL); + umount2("/a", MNT_DETACH); + // This triggers a detach in the other namespace + rmdir("/a"); + exit(0); + } + wait(NULL); + + expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, 2, mnts + 1); + check_mounted(_metadata, mnts, 1); + + // Cleanup + ret = chdir("/"); + ASSERT_EQ(ret, 0); +} + +TEST_F(fanotify, pivot_root) +{ + uint64_t mnts[3] = { self->root_id }; + uint64_t mnts2[3]; + int ret; + + ret = mount("tmpfs", "/a", "tmpfs", 0, NULL); + ASSERT_EQ(ret, 0); + + mnts[2] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH); + + ret = mkdir("/a/new", 0700); + ASSERT_EQ(ret, 0); + + ret = mkdir("/a/old", 0700); + ASSERT_EQ(ret, 0); + + ret = mount("/a", "/a/new", NULL, MS_BIND, NULL); + ASSERT_EQ(ret, 0); + + mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH); + check_mounted(_metadata, mnts, 3); + + ret = syscall(SYS_pivot_root, "/a/new", "/a/new/old"); + ASSERT_EQ(ret, 0); + + expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH | FAN_MNT_DETACH, 2, mnts2); + verify_mount_ids(_metadata, mnts, mnts2, 2); + check_mounted(_metadata, mnts, 3); + + // Cleanup + ret = syscall(SYS_pivot_root, "/old", "/old/a/new"); + ASSERT_EQ(ret, 0); + + ret = umount("/a/new"); + ASSERT_EQ(ret, 0); + + ret = umount("/a"); + ASSERT_EQ(ret, 0); + + check_mounted(_metadata, mnts, 1); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/filesystems/overlayfs/Makefile b/tools/testing/selftests/filesystems/overlayfs/Makefile index 6c661232b3b5..d3ad4a77db9b 100644 --- a/tools/testing/selftests/filesystems/overlayfs/Makefile +++ b/tools/testing/selftests/filesystems/overlayfs/Makefile @@ -4,7 +4,7 @@ CFLAGS += -Wall CFLAGS += $(KHDR_INCLUDES) LDLIBS += -lcap -LOCAL_HDRS += wrappers.h log.h +LOCAL_HDRS += ../wrappers.h log.h TEST_GEN_PROGS := dev_in_maps TEST_GEN_PROGS += set_layers_via_fds diff --git a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c index 3b796264223f..31db54b00e64 100644 --- a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c +++ b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c @@ -17,7 +17,7 @@ #include "../../kselftest.h" #include "log.h" -#include "wrappers.h" +#include "../wrappers.h" static long get_file_dev_and_inode(void *addr, struct statx *stx) { diff --git a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c index 5074e64e74a8..dc0449fa628f 100644 --- a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c +++ b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c @@ -16,7 +16,7 @@ #include "../../pidfd/pidfd.h" #include "log.h" #include "../utils.h" -#include "wrappers.h" +#include "../wrappers.h" FIXTURE(set_layers_via_fds) { int pidfd; diff --git a/tools/testing/selftests/filesystems/statmount/Makefile b/tools/testing/selftests/filesystems/statmount/Makefile index 14ee91a41650..8e354fe99b44 100644 --- a/tools/testing/selftests/filesystems/statmount/Makefile +++ b/tools/testing/selftests/filesystems/statmount/Makefile @@ -1,6 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-or-later -CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES) +CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES) $(TOOLS_INCLUDES) +LDLIBS += -lcap + TEST_GEN_PROGS := statmount_test statmount_test_ns listmount_test include ../../lib.mk + +$(OUTPUT)/statmount_test_ns: ../utils.c diff --git a/tools/testing/selftests/filesystems/statmount/statmount.h b/tools/testing/selftests/filesystems/statmount/statmount.h index a7a5289ddae9..99e5ad082fb1 100644 --- a/tools/testing/selftests/filesystems/statmount/statmount.h +++ b/tools/testing/selftests/filesystems/statmount/statmount.h @@ -7,6 +7,42 @@ #include <linux/mount.h> #include <asm/unistd.h> +#ifndef __NR_statmount + #if defined __alpha__ + #define __NR_statmount 567 + #elif defined _MIPS_SIM + #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ + #define __NR_statmount 4457 + #endif + #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ + #define __NR_statmount 6457 + #endif + #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ + #define __NR_statmount 5457 + #endif + #else + #define __NR_statmount 457 + #endif +#endif + +#ifndef __NR_listmount + #if defined __alpha__ + #define __NR_listmount 568 + #elif defined _MIPS_SIM + #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ + #define __NR_listmount 4458 + #endif + #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ + #define __NR_listmount 6458 + #endif + #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ + #define __NR_listmount 5458 + #endif + #else + #define __NR_listmount 458 + #endif +#endif + static inline int statmount(uint64_t mnt_id, uint64_t mnt_ns_id, uint64_t mask, struct statmount *buf, size_t bufsize, unsigned int flags) diff --git a/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c index 70cb0c8b21cf..605a3fa16bf7 100644 --- a/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c +++ b/tools/testing/selftests/filesystems/statmount/statmount_test_ns.c @@ -14,6 +14,7 @@ #include <linux/stat.h> #include "statmount.h" +#include "../utils.h" #include "../../kselftest.h" #define NSID_PASS 0 @@ -78,87 +79,10 @@ static int get_mnt_ns_id(const char *mnt_ns, uint64_t *mnt_ns_id) return NSID_PASS; } -static int get_mnt_id(const char *path, uint64_t *mnt_id) -{ - struct statx sx; - int ret; - - ret = statx(AT_FDCWD, path, 0, STATX_MNT_ID_UNIQUE, &sx); - if (ret == -1) { - ksft_print_msg("retrieving unique mount ID for %s: %s\n", path, - strerror(errno)); - return NSID_ERROR; - } - - if (!(sx.stx_mask & STATX_MNT_ID_UNIQUE)) { - ksft_print_msg("no unique mount ID available for %s\n", path); - return NSID_ERROR; - } - - *mnt_id = sx.stx_mnt_id; - return NSID_PASS; -} - -static int write_file(const char *path, const char *val) -{ - int fd = open(path, O_WRONLY); - size_t len = strlen(val); - int ret; - - if (fd == -1) { - ksft_print_msg("opening %s for write: %s\n", path, strerror(errno)); - return NSID_ERROR; - } - - ret = write(fd, val, len); - if (ret == -1) { - ksft_print_msg("writing to %s: %s\n", path, strerror(errno)); - return NSID_ERROR; - } - if (ret != len) { - ksft_print_msg("short write to %s\n", path); - return NSID_ERROR; - } - - ret = close(fd); - if (ret == -1) { - ksft_print_msg("closing %s\n", path); - return NSID_ERROR; - } - - return NSID_PASS; -} - static int setup_namespace(void) { - int ret; - char buf[32]; - uid_t uid = getuid(); - gid_t gid = getgid(); - - ret = unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWPID); - if (ret == -1) - ksft_exit_fail_msg("unsharing mountns and userns: %s\n", - strerror(errno)); - - sprintf(buf, "0 %d 1", uid); - ret = write_file("/proc/self/uid_map", buf); - if (ret != NSID_PASS) - return ret; - ret = write_file("/proc/self/setgroups", "deny"); - if (ret != NSID_PASS) - return ret; - sprintf(buf, "0 %d 1", gid); - ret = write_file("/proc/self/gid_map", buf); - if (ret != NSID_PASS) - return ret; - - ret = mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL); - if (ret == -1) { - ksft_print_msg("making mount tree private: %s\n", - strerror(errno)); + if (setup_userns() != 0) return NSID_ERROR; - } return NSID_PASS; } @@ -174,9 +98,9 @@ static int _test_statmount_mnt_ns_id(void) if (ret != NSID_PASS) return ret; - ret = get_mnt_id("/", &root_id); - if (ret != NSID_PASS) - return ret; + root_id = get_unique_mnt_id("/"); + if (!root_id) + return NSID_ERROR; ret = statmount(root_id, 0, STATMOUNT_MNT_NS_ID, &sm, sizeof(sm), 0); if (ret == -1) { diff --git a/tools/testing/selftests/filesystems/utils.c b/tools/testing/selftests/filesystems/utils.c index e553c89c5b19..c43a69dffd83 100644 --- a/tools/testing/selftests/filesystems/utils.c +++ b/tools/testing/selftests/filesystems/utils.c @@ -18,7 +18,10 @@ #include <sys/types.h> #include <sys/wait.h> #include <sys/xattr.h> +#include <sys/mount.h> +#include "../kselftest.h" +#include "wrappers.h" #include "utils.h" #define MAX_USERNS_LEVEL 32 @@ -447,6 +450,71 @@ out_close: return fret; } +static int write_file(const char *path, const char *val) +{ + int fd = open(path, O_WRONLY); + size_t len = strlen(val); + int ret; + + if (fd == -1) { + ksft_print_msg("opening %s for write: %s\n", path, strerror(errno)); + return -1; + } + + ret = write(fd, val, len); + if (ret == -1) { + ksft_print_msg("writing to %s: %s\n", path, strerror(errno)); + return -1; + } + if (ret != len) { + ksft_print_msg("short write to %s\n", path); + return -1; + } + + ret = close(fd); + if (ret == -1) { + ksft_print_msg("closing %s\n", path); + return -1; + } + + return 0; +} + +int setup_userns(void) +{ + int ret; + char buf[32]; + uid_t uid = getuid(); + gid_t gid = getgid(); + + ret = unshare(CLONE_NEWNS|CLONE_NEWUSER|CLONE_NEWPID); + if (ret) { + ksft_exit_fail_msg("unsharing mountns and userns: %s\n", + strerror(errno)); + return ret; + } + + sprintf(buf, "0 %d 1", uid); + ret = write_file("/proc/self/uid_map", buf); + if (ret) + return ret; + ret = write_file("/proc/self/setgroups", "deny"); + if (ret) + return ret; + sprintf(buf, "0 %d 1", gid); + ret = write_file("/proc/self/gid_map", buf); + if (ret) + return ret; + + ret = mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL); + if (ret) { + ksft_print_msg("making mount tree private: %s\n", strerror(errno)); + return ret; + } + + return 0; +} + /* caps_down - lower all effective caps */ int caps_down(void) { @@ -499,3 +567,23 @@ out: cap_free(caps); return fret; } + +uint64_t get_unique_mnt_id(const char *path) +{ + struct statx sx; + int ret; + + ret = statx(AT_FDCWD, path, 0, STATX_MNT_ID_UNIQUE, &sx); + if (ret == -1) { + ksft_print_msg("retrieving unique mount ID for %s: %s\n", path, + strerror(errno)); + return 0; + } + + if (!(sx.stx_mask & STATX_MNT_ID_UNIQUE)) { + ksft_print_msg("no unique mount ID available for %s\n", path); + return 0; + } + + return sx.stx_mnt_id; +} diff --git a/tools/testing/selftests/filesystems/utils.h b/tools/testing/selftests/filesystems/utils.h index 7f1df2a3e94c..70f7ccc607f4 100644 --- a/tools/testing/selftests/filesystems/utils.h +++ b/tools/testing/selftests/filesystems/utils.h @@ -27,6 +27,7 @@ extern int caps_down(void); extern int cap_down(cap_value_t down); extern bool switch_ids(uid_t uid, gid_t gid); +extern int setup_userns(void); static inline bool switch_userns(int fd, uid_t uid, gid_t gid, bool drop_caps) { @@ -42,4 +43,6 @@ static inline bool switch_userns(int fd, uid_t uid, gid_t gid, bool drop_caps) return true; } +extern uint64_t get_unique_mnt_id(const char *path); + #endif /* __IDMAP_UTILS_H */ diff --git a/tools/testing/selftests/filesystems/overlayfs/wrappers.h b/tools/testing/selftests/filesystems/wrappers.h index c38bc48e0cfa..420ae4f908cf 100644 --- a/tools/testing/selftests/filesystems/overlayfs/wrappers.h +++ b/tools/testing/selftests/filesystems/wrappers.h @@ -9,6 +9,10 @@ #include <linux/mount.h> #include <sys/syscall.h> +#ifndef STATX_MNT_ID_UNIQUE +#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ +#endif + static inline int sys_fsopen(const char *fsname, unsigned int flags) { return syscall(__NR_fsopen, fsname, flags); @@ -36,6 +40,28 @@ static inline int sys_mount(const char *src, const char *tgt, const char *fst, #define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */ #endif +#ifndef MOVE_MOUNT_T_EMPTY_PATH +#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */ +#endif + +#ifndef __NR_move_mount + #if defined __alpha__ + #define __NR_move_mount 539 + #elif defined _MIPS_SIM + #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ + #define __NR_move_mount 4429 + #endif + #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ + #define __NR_move_mount 6429 + #endif + #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ + #define __NR_move_mount 5429 + #endif + #else + #define __NR_move_mount 429 + #endif +#endif + static inline int sys_move_mount(int from_dfd, const char *from_pathname, int to_dfd, const char *to_pathname, unsigned int flags) @@ -53,7 +79,25 @@ static inline int sys_move_mount(int from_dfd, const char *from_pathname, #endif #ifndef AT_RECURSIVE -#define AT_RECURSIVE 0x8000 +#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */ +#endif + +#ifndef __NR_open_tree + #if defined __alpha__ + #define __NR_open_tree 538 + #elif defined _MIPS_SIM + #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ + #define __NR_open_tree 4428 + #endif + #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ + #define __NR_open_tree 6428 + #endif + #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ + #define __NR_open_tree 5428 + #endif + #else + #define __NR_open_tree 428 + #endif #endif static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags) diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile index 49d96bb16355..7c12263f8260 100644 --- a/tools/testing/selftests/ftrace/Makefile +++ b/tools/testing/selftests/ftrace/Makefile @@ -6,6 +6,6 @@ TEST_PROGS := ftracetest-ktap TEST_FILES := test.d settings EXTRA_CLEAN := $(OUTPUT)/logs/* -TEST_GEN_PROGS = poll +TEST_GEN_FILES := poll include ../lib.mk diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore index fbcbdb6963b3..7b24ae89594a 100644 --- a/tools/testing/selftests/futex/functional/.gitignore +++ b/tools/testing/selftests/futex/functional/.gitignore @@ -1,11 +1,13 @@ # SPDX-License-Identifier: GPL-2.0-only +futex_numa_mpol +futex_priv_hash +futex_requeue futex_requeue_pi futex_requeue_pi_mismatched_ops futex_requeue_pi_signal_restart +futex_wait futex_wait_private_mapped_file futex_wait_timeout futex_wait_uninitialized_heap futex_wait_wouldblock -futex_wait -futex_requeue futex_waitv diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index f79f9bac7918..8cfb87f7f7c5 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 INCLUDES := -I../include -I../../ $(KHDR_INCLUDES) CFLAGS := $(CFLAGS) -g -O2 -Wall -pthread $(INCLUDES) $(KHDR_INCLUDES) -LDLIBS := -lpthread -lrt +LDLIBS := -lpthread -lrt -lnuma LOCAL_HDRS := \ ../include/futextest.h \ @@ -17,7 +17,10 @@ TEST_GEN_PROGS := \ futex_wait_private_mapped_file \ futex_wait \ futex_requeue \ - futex_waitv + futex_priv_hash \ + futex_numa_mpol \ + futex_waitv \ + futex_numa TEST_PROGS := run.sh diff --git a/tools/testing/selftests/futex/functional/futex_numa.c b/tools/testing/selftests/futex/functional/futex_numa.c new file mode 100644 index 000000000000..f29e4d627e79 --- /dev/null +++ b/tools/testing/selftests/futex/functional/futex_numa.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <pthread.h> +#include <sys/shm.h> +#include <sys/mman.h> +#include <fcntl.h> +#include <stdbool.h> +#include <time.h> +#include <assert.h> +#include "logging.h" +#include "futextest.h" +#include "futex2test.h" + +typedef u_int32_t u32; +typedef int32_t s32; +typedef u_int64_t u64; + +static unsigned int fflags = (FUTEX2_SIZE_U32 | FUTEX2_PRIVATE); +static int fnode = FUTEX_NO_NODE; + +/* fairly stupid test-and-set lock with a waiter flag */ + +#define N_LOCK 0x0000001 +#define N_WAITERS 0x0001000 + +struct futex_numa_32 { + union { + u64 full; + struct { + u32 val; + u32 node; + }; + }; +}; + +void futex_numa_32_lock(struct futex_numa_32 *lock) +{ + for (;;) { + struct futex_numa_32 new, old = { + .full = __atomic_load_n(&lock->full, __ATOMIC_RELAXED), + }; + + for (;;) { + new = old; + if (old.val == 0) { + /* no waiter, no lock -> first lock, set no-node */ + new.node = fnode; + } + if (old.val & N_LOCK) { + /* contention, set waiter */ + new.val |= N_WAITERS; + } + new.val |= N_LOCK; + + /* nothing changed, ready to block */ + if (old.full == new.full) + break; + + /* + * Use u64 cmpxchg to set the futex value and node in a + * consistent manner. + */ + if (__atomic_compare_exchange_n(&lock->full, + &old.full, new.full, + /* .weak */ false, + __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) { + + /* if we just set N_LOCK, we own it */ + if (!(old.val & N_LOCK)) + return; + + /* go block */ + break; + } + } + + futex2_wait(lock, new.val, fflags, NULL, 0); + } +} + +void futex_numa_32_unlock(struct futex_numa_32 *lock) +{ + u32 val = __atomic_sub_fetch(&lock->val, N_LOCK, __ATOMIC_RELEASE); + assert((s32)val >= 0); + if (val & N_WAITERS) { + int woken = futex2_wake(lock, 1, fflags); + assert(val == N_WAITERS); + if (!woken) { + __atomic_compare_exchange_n(&lock->val, &val, 0U, + false, __ATOMIC_RELAXED, + __ATOMIC_RELAXED); + } + } +} + +static long nanos = 50000; + +struct thread_args { + pthread_t tid; + volatile int * done; + struct futex_numa_32 *lock; + int val; + int *val1, *val2; + int node; +}; + +static void *threadfn(void *_arg) +{ + struct thread_args *args = _arg; + struct timespec ts = { + .tv_nsec = nanos, + }; + int node; + + while (!*args->done) { + + futex_numa_32_lock(args->lock); + args->val++; + + assert(*args->val1 == *args->val2); + (*args->val1)++; + nanosleep(&ts, NULL); + (*args->val2)++; + + node = args->lock->node; + futex_numa_32_unlock(args->lock); + + if (node != args->node) { + args->node = node; + printf("node: %d\n", node); + } + + nanosleep(&ts, NULL); + } + + return NULL; +} + +static void *contendfn(void *_arg) +{ + struct thread_args *args = _arg; + + while (!*args->done) { + /* + * futex2_wait() will take hb-lock, verify *var == val and + * queue/abort. By knowingly setting val 'wrong' this will + * abort and thereby generate hb-lock contention. + */ + futex2_wait(&args->lock->val, ~0U, fflags, NULL, 0); + args->val++; + } + + return NULL; +} + +static volatile int done = 0; +static struct futex_numa_32 lock = { .val = 0, }; +static int val1, val2; + +int main(int argc, char *argv[]) +{ + struct thread_args *tas[512], *cas[512]; + int c, t, threads = 2, contenders = 0; + int sleeps = 10; + int total = 0; + + while ((c = getopt(argc, argv, "c:t:s:n:N::")) != -1) { + switch (c) { + case 'c': + contenders = atoi(optarg); + break; + case 't': + threads = atoi(optarg); + break; + case 's': + sleeps = atoi(optarg); + break; + case 'n': + nanos = atoi(optarg); + break; + case 'N': + fflags |= FUTEX2_NUMA; + if (optarg) + fnode = atoi(optarg); + break; + default: + exit(1); + break; + } + } + + for (t = 0; t < contenders; t++) { + struct thread_args *args = calloc(1, sizeof(*args)); + if (!args) { + perror("thread_args"); + exit(-1); + } + + args->done = &done; + args->lock = &lock; + args->val1 = &val1; + args->val2 = &val2; + args->node = -1; + + if (pthread_create(&args->tid, NULL, contendfn, args)) { + perror("pthread_create"); + exit(-1); + } + + cas[t] = args; + } + + for (t = 0; t < threads; t++) { + struct thread_args *args = calloc(1, sizeof(*args)); + if (!args) { + perror("thread_args"); + exit(-1); + } + + args->done = &done; + args->lock = &lock; + args->val1 = &val1; + args->val2 = &val2; + args->node = -1; + + if (pthread_create(&args->tid, NULL, threadfn, args)) { + perror("pthread_create"); + exit(-1); + } + + tas[t] = args; + } + + sleep(sleeps); + + done = true; + + for (t = 0; t < threads; t++) { + struct thread_args *args = tas[t]; + + pthread_join(args->tid, NULL); + total += args->val; +// printf("tval: %d\n", args->val); + } + printf("total: %d\n", total); + + if (contenders) { + total = 0; + for (t = 0; t < contenders; t++) { + struct thread_args *args = cas[t]; + + pthread_join(args->tid, NULL); + total += args->val; + // printf("tval: %d\n", args->val); + } + printf("contenders: %d\n", total); + } + + return 0; +} + diff --git a/tools/testing/selftests/futex/functional/futex_numa_mpol.c b/tools/testing/selftests/futex/functional/futex_numa_mpol.c new file mode 100644 index 000000000000..20a9d3ecf743 --- /dev/null +++ b/tools/testing/selftests/futex/functional/futex_numa_mpol.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2025 Sebastian Andrzej Siewior <bigeasy@linutronix.de> + */ + +#define _GNU_SOURCE + +#include <errno.h> +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <numa.h> +#include <numaif.h> + +#include <linux/futex.h> +#include <sys/mman.h> + +#include "logging.h" +#include "futextest.h" +#include "futex2test.h" + +#define MAX_THREADS 64 + +static pthread_barrier_t barrier_main; +static pthread_t threads[MAX_THREADS]; + +struct thread_args { + void *futex_ptr; + unsigned int flags; + int result; +}; + +static struct thread_args thread_args[MAX_THREADS]; + +#ifndef FUTEX_NO_NODE +#define FUTEX_NO_NODE (-1) +#endif + +#ifndef FUTEX2_MPOL +#define FUTEX2_MPOL 0x08 +#endif + +static void *thread_lock_fn(void *arg) +{ + struct thread_args *args = arg; + int ret; + + pthread_barrier_wait(&barrier_main); + ret = futex2_wait(args->futex_ptr, 0, args->flags, NULL, 0); + args->result = ret; + return NULL; +} + +static void create_max_threads(void *futex_ptr) +{ + int i, ret; + + for (i = 0; i < MAX_THREADS; i++) { + thread_args[i].futex_ptr = futex_ptr; + thread_args[i].flags = FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA; + thread_args[i].result = 0; + ret = pthread_create(&threads[i], NULL, thread_lock_fn, &thread_args[i]); + if (ret) + ksft_exit_fail_msg("pthread_create failed\n"); + } +} + +static void join_max_threads(void) +{ + int i, ret; + + for (i = 0; i < MAX_THREADS; i++) { + ret = pthread_join(threads[i], NULL); + if (ret) + ksft_exit_fail_msg("pthread_join failed for thread %d\n", i); + } +} + +static void __test_futex(void *futex_ptr, int must_fail, unsigned int futex_flags) +{ + int to_wake, ret, i, need_exit = 0; + + pthread_barrier_init(&barrier_main, NULL, MAX_THREADS + 1); + create_max_threads(futex_ptr); + pthread_barrier_wait(&barrier_main); + to_wake = MAX_THREADS; + + do { + ret = futex2_wake(futex_ptr, to_wake, futex_flags); + if (must_fail) { + if (ret < 0) + break; + ksft_exit_fail_msg("futex2_wake(%d, 0x%x) should fail, but didn't\n", + to_wake, futex_flags); + } + if (ret < 0) { + ksft_exit_fail_msg("Failed futex2_wake(%d, 0x%x): %m\n", + to_wake, futex_flags); + } + if (!ret) + usleep(50); + to_wake -= ret; + + } while (to_wake); + join_max_threads(); + + for (i = 0; i < MAX_THREADS; i++) { + if (must_fail && thread_args[i].result != -1) { + ksft_print_msg("Thread %d should fail but succeeded (%d)\n", + i, thread_args[i].result); + need_exit = 1; + } + if (!must_fail && thread_args[i].result != 0) { + ksft_print_msg("Thread %d failed (%d)\n", i, thread_args[i].result); + need_exit = 1; + } + } + if (need_exit) + ksft_exit_fail_msg("Aborting due to earlier errors.\n"); +} + +static void test_futex(void *futex_ptr, int must_fail) +{ + __test_futex(futex_ptr, must_fail, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA); +} + +static void test_futex_mpol(void *futex_ptr, int must_fail) +{ + __test_futex(futex_ptr, must_fail, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL); +} + +static void usage(char *prog) +{ + printf("Usage: %s\n", prog); + printf(" -c Use color\n"); + printf(" -h Display this help message\n"); + printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n", + VQUIET, VCRITICAL, VINFO); +} + +int main(int argc, char *argv[]) +{ + struct futex32_numa *futex_numa; + int mem_size, i; + void *futex_ptr; + char c; + + while ((c = getopt(argc, argv, "chv:")) != -1) { + switch (c) { + case 'c': + log_color(1); + break; + case 'h': + usage(basename(argv[0])); + exit(0); + break; + case 'v': + log_verbosity(atoi(optarg)); + break; + default: + usage(basename(argv[0])); + exit(1); + } + } + + ksft_print_header(); + ksft_set_plan(1); + + mem_size = sysconf(_SC_PAGE_SIZE); + futex_ptr = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + if (futex_ptr == MAP_FAILED) + ksft_exit_fail_msg("mmap() for %d bytes failed\n", mem_size); + + futex_numa = futex_ptr; + + ksft_print_msg("Regular test\n"); + futex_numa->futex = 0; + futex_numa->numa = FUTEX_NO_NODE; + test_futex(futex_ptr, 0); + + if (futex_numa->numa == FUTEX_NO_NODE) + ksft_exit_fail_msg("NUMA node is left uninitialized\n"); + + ksft_print_msg("Memory too small\n"); + test_futex(futex_ptr + mem_size - 4, 1); + + ksft_print_msg("Memory out of range\n"); + test_futex(futex_ptr + mem_size, 1); + + futex_numa->numa = FUTEX_NO_NODE; + mprotect(futex_ptr, mem_size, PROT_READ); + ksft_print_msg("Memory, RO\n"); + test_futex(futex_ptr, 1); + + mprotect(futex_ptr, mem_size, PROT_NONE); + ksft_print_msg("Memory, no access\n"); + test_futex(futex_ptr, 1); + + mprotect(futex_ptr, mem_size, PROT_READ | PROT_WRITE); + ksft_print_msg("Memory back to RW\n"); + test_futex(futex_ptr, 0); + + /* MPOL test. Does not work as expected */ + for (i = 0; i < 4; i++) { + unsigned long nodemask; + int ret; + + nodemask = 1 << i; + ret = mbind(futex_ptr, mem_size, MPOL_BIND, &nodemask, + sizeof(nodemask) * 8, 0); + if (ret == 0) { + ksft_print_msg("Node %d test\n", i); + futex_numa->futex = 0; + futex_numa->numa = FUTEX_NO_NODE; + + ret = futex2_wake(futex_ptr, 0, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL); + if (ret < 0) + ksft_test_result_fail("Failed to wake 0 with MPOL: %m\n"); + if (0) + test_futex_mpol(futex_numa, 0); + if (futex_numa->numa != i) { + ksft_test_result_fail("Returned NUMA node is %d expected %d\n", + futex_numa->numa, i); + } + } + } + ksft_test_result_pass("NUMA MPOL tests passed\n"); + ksft_finished(); + return 0; +} diff --git a/tools/testing/selftests/futex/functional/futex_priv_hash.c b/tools/testing/selftests/futex/functional/futex_priv_hash.c new file mode 100644 index 000000000000..2dca18fefedc --- /dev/null +++ b/tools/testing/selftests/futex/functional/futex_priv_hash.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2025 Sebastian Andrzej Siewior <bigeasy@linutronix.de> + */ + +#define _GNU_SOURCE + +#include <errno.h> +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> + +#include <linux/prctl.h> +#include <sys/prctl.h> + +#include "logging.h" + +#define MAX_THREADS 64 + +static pthread_barrier_t barrier_main; +static pthread_mutex_t global_lock; +static pthread_t threads[MAX_THREADS]; +static int counter; + +#ifndef PR_FUTEX_HASH +#define PR_FUTEX_HASH 78 +# define PR_FUTEX_HASH_SET_SLOTS 1 +# define FH_FLAG_IMMUTABLE (1ULL << 0) +# define PR_FUTEX_HASH_GET_SLOTS 2 +# define PR_FUTEX_HASH_GET_IMMUTABLE 3 +#endif + +static int futex_hash_slots_set(unsigned int slots, int flags) +{ + return prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_SET_SLOTS, slots, flags); +} + +static int futex_hash_slots_get(void) +{ + return prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_SLOTS); +} + +static int futex_hash_immutable_get(void) +{ + return prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_IMMUTABLE); +} + +static void futex_hash_slots_set_verify(int slots) +{ + int ret; + + ret = futex_hash_slots_set(slots, 0); + if (ret != 0) { + ksft_test_result_fail("Failed to set slots to %d: %m\n", slots); + ksft_finished(); + } + ret = futex_hash_slots_get(); + if (ret != slots) { + ksft_test_result_fail("Set %d slots but PR_FUTEX_HASH_GET_SLOTS returns: %d, %m\n", + slots, ret); + ksft_finished(); + } + ksft_test_result_pass("SET and GET slots %d passed\n", slots); +} + +static void futex_hash_slots_set_must_fail(int slots, int flags) +{ + int ret; + + ret = futex_hash_slots_set(slots, flags); + ksft_test_result(ret < 0, "futex_hash_slots_set(%d, %d)\n", + slots, flags); +} + +static void *thread_return_fn(void *arg) +{ + return NULL; +} + +static void *thread_lock_fn(void *arg) +{ + pthread_barrier_wait(&barrier_main); + + pthread_mutex_lock(&global_lock); + counter++; + usleep(20); + pthread_mutex_unlock(&global_lock); + return NULL; +} + +static void create_max_threads(void *(*thread_fn)(void *)) +{ + int i, ret; + + for (i = 0; i < MAX_THREADS; i++) { + ret = pthread_create(&threads[i], NULL, thread_fn, NULL); + if (ret) + ksft_exit_fail_msg("pthread_create failed: %m\n"); + } +} + +static void join_max_threads(void) +{ + int i, ret; + + for (i = 0; i < MAX_THREADS; i++) { + ret = pthread_join(threads[i], NULL); + if (ret) + ksft_exit_fail_msg("pthread_join failed for thread %d\n", i); + } +} + +static void usage(char *prog) +{ + printf("Usage: %s\n", prog); + printf(" -c Use color\n"); + printf(" -g Test global hash instead intead local immutable \n"); + printf(" -h Display this help message\n"); + printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n", + VQUIET, VCRITICAL, VINFO); +} + +static const char *test_msg_auto_create = "Automatic hash bucket init on thread creation.\n"; +static const char *test_msg_auto_inc = "Automatic increase with more than 16 CPUs\n"; + +int main(int argc, char *argv[]) +{ + int futex_slots1, futex_slotsn, online_cpus; + pthread_mutexattr_t mutex_attr_pi; + int use_global_hash = 0; + int ret; + char c; + + while ((c = getopt(argc, argv, "cghv:")) != -1) { + switch (c) { + case 'c': + log_color(1); + break; + case 'g': + use_global_hash = 1; + break; + case 'h': + usage(basename(argv[0])); + exit(0); + break; + case 'v': + log_verbosity(atoi(optarg)); + break; + default: + usage(basename(argv[0])); + exit(1); + } + } + + ksft_print_header(); + ksft_set_plan(22); + + ret = pthread_mutexattr_init(&mutex_attr_pi); + ret |= pthread_mutexattr_setprotocol(&mutex_attr_pi, PTHREAD_PRIO_INHERIT); + ret |= pthread_mutex_init(&global_lock, &mutex_attr_pi); + if (ret != 0) { + ksft_exit_fail_msg("Failed to initialize pthread mutex.\n"); + } + /* First thread, expect to be 0, not yet initialized */ + ret = futex_hash_slots_get(); + if (ret != 0) + ksft_exit_fail_msg("futex_hash_slots_get() failed: %d, %m\n", ret); + + ret = futex_hash_immutable_get(); + if (ret != 0) + ksft_exit_fail_msg("futex_hash_immutable_get() failed: %d, %m\n", ret); + + ksft_test_result_pass("Basic get slots and immutable status.\n"); + ret = pthread_create(&threads[0], NULL, thread_return_fn, NULL); + if (ret != 0) + ksft_exit_fail_msg("pthread_create() failed: %d, %m\n", ret); + + ret = pthread_join(threads[0], NULL); + if (ret != 0) + ksft_exit_fail_msg("pthread_join() failed: %d, %m\n", ret); + + /* First thread, has to initialiaze private hash */ + futex_slots1 = futex_hash_slots_get(); + if (futex_slots1 <= 0) { + ksft_print_msg("Current hash buckets: %d\n", futex_slots1); + ksft_exit_fail_msg(test_msg_auto_create); + } + + ksft_test_result_pass(test_msg_auto_create); + + online_cpus = sysconf(_SC_NPROCESSORS_ONLN); + ret = pthread_barrier_init(&barrier_main, NULL, MAX_THREADS + 1); + if (ret != 0) + ksft_exit_fail_msg("pthread_barrier_init failed: %m.\n"); + + ret = pthread_mutex_lock(&global_lock); + if (ret != 0) + ksft_exit_fail_msg("pthread_mutex_lock failed: %m.\n"); + + counter = 0; + create_max_threads(thread_lock_fn); + pthread_barrier_wait(&barrier_main); + + /* + * The current default size of hash buckets is 16. The auto increase + * works only if more than 16 CPUs are available. + */ + ksft_print_msg("Online CPUs: %d\n", online_cpus); + if (online_cpus > 16) { + futex_slotsn = futex_hash_slots_get(); + if (futex_slotsn < 0 || futex_slots1 == futex_slotsn) { + ksft_print_msg("Expected increase of hash buckets but got: %d -> %d\n", + futex_slots1, futex_slotsn); + ksft_exit_fail_msg(test_msg_auto_inc); + } + ksft_test_result_pass(test_msg_auto_inc); + } else { + ksft_test_result_skip(test_msg_auto_inc); + } + ret = pthread_mutex_unlock(&global_lock); + + /* Once the user changes it, it has to be what is set */ + futex_hash_slots_set_verify(2); + futex_hash_slots_set_verify(4); + futex_hash_slots_set_verify(8); + futex_hash_slots_set_verify(32); + futex_hash_slots_set_verify(16); + + ret = futex_hash_slots_set(15, 0); + ksft_test_result(ret < 0, "Use 15 slots\n"); + + futex_hash_slots_set_verify(2); + join_max_threads(); + ksft_test_result(counter == MAX_THREADS, "Created of waited for %d of %d threads\n", + counter, MAX_THREADS); + counter = 0; + /* Once the user set something, auto reisze must be disabled */ + ret = pthread_barrier_init(&barrier_main, NULL, MAX_THREADS); + + create_max_threads(thread_lock_fn); + join_max_threads(); + + ret = futex_hash_slots_get(); + ksft_test_result(ret == 2, "No more auto-resize after manaul setting, got %d\n", + ret); + + futex_hash_slots_set_must_fail(1 << 29, 0); + + /* + * Once the private hash has been made immutable or global hash has been requested, + * then this requested can not be undone. + */ + if (use_global_hash) { + ret = futex_hash_slots_set(0, 0); + ksft_test_result(ret == 0, "Global hash request\n"); + } else { + ret = futex_hash_slots_set(4, FH_FLAG_IMMUTABLE); + ksft_test_result(ret == 0, "Immutable resize to 4\n"); + } + if (ret != 0) + goto out; + + futex_hash_slots_set_must_fail(4, 0); + futex_hash_slots_set_must_fail(4, FH_FLAG_IMMUTABLE); + futex_hash_slots_set_must_fail(8, 0); + futex_hash_slots_set_must_fail(8, FH_FLAG_IMMUTABLE); + futex_hash_slots_set_must_fail(0, FH_FLAG_IMMUTABLE); + futex_hash_slots_set_must_fail(6, FH_FLAG_IMMUTABLE); + + ret = pthread_barrier_init(&barrier_main, NULL, MAX_THREADS); + if (ret != 0) { + ksft_exit_fail_msg("pthread_barrier_init failed: %m\n"); + return 1; + } + create_max_threads(thread_lock_fn); + join_max_threads(); + + ret = futex_hash_slots_get(); + if (use_global_hash) { + ksft_test_result(ret == 0, "Continue to use global hash\n"); + } else { + ksft_test_result(ret == 4, "Continue to use the 4 hash buckets\n"); + } + + ret = futex_hash_immutable_get(); + ksft_test_result(ret == 1, "Hash reports to be immutable\n"); + +out: + ksft_finished(); + return 0; +} diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh index 5ccd599da6c3..81739849f299 100755 --- a/tools/testing/selftests/futex/functional/run.sh +++ b/tools/testing/selftests/futex/functional/run.sh @@ -82,3 +82,10 @@ echo echo ./futex_waitv $COLOR + +echo +./futex_priv_hash $COLOR +./futex_priv_hash -g $COLOR + +echo +./futex_numa_mpol $COLOR diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h index 9d305520e849..ea79662405bc 100644 --- a/tools/testing/selftests/futex/include/futex2test.h +++ b/tools/testing/selftests/futex/include/futex2test.h @@ -8,6 +8,53 @@ #define u64_to_ptr(x) ((void *)(uintptr_t)(x)) +#ifndef __NR_futex_waitv +#define __NR_futex_waitv 449 +struct futex_waitv { + __u64 val; + __u64 uaddr; + __u32 flags; + __u32 __reserved; +}; +#endif + +#ifndef __NR_futex_wake +#define __NR_futex_wake 454 +#endif + +#ifndef __NR_futex_wait +#define __NR_futex_wait 455 +#endif + +#ifndef FUTEX2_SIZE_U32 +#define FUTEX2_SIZE_U32 0x02 +#endif + +#ifndef FUTEX2_NUMA +#define FUTEX2_NUMA 0x04 +#endif + +#ifndef FUTEX2_MPOL +#define FUTEX2_MPOL 0x08 +#endif + +#ifndef FUTEX2_PRIVATE +#define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG +#endif + +#ifndef FUTEX2_NO_NODE +#define FUTEX_NO_NODE (-1) +#endif + +#ifndef FUTEX_32 +#define FUTEX_32 FUTEX2_SIZE_U32 +#endif + +struct futex32_numa { + futex_t futex; + futex_t numa; +}; + /** * futex_waitv - Wait at multiple futexes, wake on any * @waiters: Array of waiters @@ -20,3 +67,26 @@ static inline int futex_waitv(volatile struct futex_waitv *waiters, unsigned lon { return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo, clockid); } + +/* + * futex_wait() - block on uaddr with optional timeout + * @val: Expected value + * @flags: FUTEX2 flags + * @timeout: Relative timeout + * @clockid: Clock id for the timeout + */ +static inline int futex2_wait(void *uaddr, long val, unsigned int flags, + struct timespec *timeout, clockid_t clockid) +{ + return syscall(__NR_futex_wait, uaddr, val, ~0U, flags, timeout, clockid); +} + +/* + * futex2_wake() - Wake a number of futexes + * @nr: Number of threads to wake at most + * @flags: FUTEX2 flags + */ +static inline int futex2_wake(void *uaddr, int nr, unsigned int flags) +{ + return syscall(__NR_futex_wake, uaddr, ~0U, nr, flags); +} diff --git a/tools/testing/selftests/kexec/Makefile b/tools/testing/selftests/kexec/Makefile index 67fe7a46cb62..e3000ccb9a5d 100644 --- a/tools/testing/selftests/kexec/Makefile +++ b/tools/testing/selftests/kexec/Makefile @@ -8,6 +8,13 @@ ifeq ($(ARCH_PROCESSED),$(filter $(ARCH_PROCESSED),x86 ppc64le)) TEST_PROGS := test_kexec_load.sh test_kexec_file_load.sh TEST_FILES := kexec_common_lib.sh +include ../../../scripts/Makefile.arch + +ifeq ($(IS_64_BIT)$(ARCH_PROCESSED),1x86) +TEST_PROGS += test_kexec_jump.sh +test_kexec_jump.sh: $(OUTPUT)/test_kexec_jump +endif + include ../lib.mk endif diff --git a/tools/testing/selftests/kexec/test_kexec_jump.c b/tools/testing/selftests/kexec/test_kexec_jump.c new file mode 100644 index 000000000000..fbce287866f5 --- /dev/null +++ b/tools/testing/selftests/kexec/test_kexec_jump.c @@ -0,0 +1,72 @@ +#include <unistd.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <linux/kexec.h> +#include <linux/reboot.h> +#include <sys/reboot.h> +#include <sys/syscall.h> + +asm( + " .code64\n" + " .data\n" + "purgatory_start:\n" + + // Trigger kexec debug exception handling + " int3\n" + + // Set load address for next time + " leaq purgatory_start_b(%rip), %r11\n" + " movq %r11, 8(%rsp)\n" + + // Back to Linux + " ret\n" + + // Same again + "purgatory_start_b:\n" + + // Trigger kexec debug exception handling + " int3\n" + + // Set load address for next time + " leaq purgatory_start(%rip), %r11\n" + " movq %r11, 8(%rsp)\n" + + // Back to Linux + " ret\n" + + "purgatory_end:\n" + ".previous" +); +extern char purgatory_start[], purgatory_end[]; + +int main (void) +{ + struct kexec_segment segment = {}; + int ret; + + segment.buf = purgatory_start; + segment.bufsz = purgatory_end - purgatory_start; + segment.mem = (void *)0x400000; + segment.memsz = 0x1000; + ret = syscall(__NR_kexec_load, 0x400000, 1, &segment, KEXEC_PRESERVE_CONTEXT); + if (ret) { + perror("kexec_load"); + exit(1); + } + + ret = syscall(__NR_reboot, LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_KEXEC); + if (ret) { + perror("kexec reboot"); + exit(1); + } + + ret = syscall(__NR_reboot, LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_KEXEC); + if (ret) { + perror("kexec reboot"); + exit(1); + } + printf("Success\n"); + return 0; +} + diff --git a/tools/testing/selftests/kexec/test_kexec_jump.sh b/tools/testing/selftests/kexec/test_kexec_jump.sh new file mode 100755 index 000000000000..6ae977054ba2 --- /dev/null +++ b/tools/testing/selftests/kexec/test_kexec_jump.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# +# Prevent loading a kernel image via the kexec_load syscall when +# signatures are required. (Dependent on CONFIG_IMA_ARCH_POLICY.) + +TEST="$0" +. ./kexec_common_lib.sh + +# kexec requires root privileges +require_root_privileges + +# get the kernel config +get_kconfig + +kconfig_enabled "CONFIG_KEXEC_JUMP=y" "kexec_jump is enabled" +if [ $? -eq 0 ]; then + log_skip "kexec_jump is not enabled" +fi + +kconfig_enabled "CONFIG_IMA_APPRAISE=y" "IMA enabled" +ima_appraise=$? + +kconfig_enabled "CONFIG_IMA_ARCH_POLICY=y" \ + "IMA architecture specific policy enabled" +arch_policy=$? + +get_secureboot_mode +secureboot=$? + +if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ]; then + log_skip "Secure boot and CONFIG_IMA_ARCH_POLICY are enabled" +fi + +./test_kexec_jump +if [ $? -eq 0 ]; then + log_pass "kexec_jump succeeded" +else + # The more likely failure mode if anything went wrong is that the + # kernel just crashes. But if we get back here, sure, whine anyway. + log_fail "kexec_jump failed" +fi diff --git a/tools/testing/selftests/mount_setattr/Makefile b/tools/testing/selftests/mount_setattr/Makefile index 0c0d7b1234c1..4d4f810cdf2c 100644 --- a/tools/testing/selftests/mount_setattr/Makefile +++ b/tools/testing/selftests/mount_setattr/Makefile @@ -2,6 +2,8 @@ # Makefile for mount selftests. CFLAGS = -g $(KHDR_INCLUDES) -Wall -O2 -pthread +LOCAL_HDRS += ../filesystems/wrappers.h + TEST_GEN_PROGS := mount_setattr_test include ../lib.mk diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c index 48a000cabc97..8b378c91debf 100644 --- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c +++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c @@ -20,7 +20,7 @@ #include <stdarg.h> #include <linux/mount.h> -#include "../filesystems/overlayfs/wrappers.h" +#include "../filesystems/wrappers.h" #include "../kselftest_harness.h" #ifndef CLONE_NEWNS @@ -107,46 +107,6 @@ #endif #endif -#ifndef __NR_open_tree - #if defined __alpha__ - #define __NR_open_tree 538 - #elif defined _MIPS_SIM - #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ - #define __NR_open_tree 4428 - #endif - #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ - #define __NR_open_tree 6428 - #endif - #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ - #define __NR_open_tree 5428 - #endif - #elif defined __ia64__ - #define __NR_open_tree (428 + 1024) - #else - #define __NR_open_tree 428 - #endif -#endif - -#ifndef __NR_move_mount - #if defined __alpha__ - #define __NR_move_mount 539 - #elif defined _MIPS_SIM - #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ - #define __NR_move_mount 4429 - #endif - #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ - #define __NR_move_mount 6429 - #endif - #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ - #define __NR_move_mount 5429 - #endif - #elif defined __ia64__ - #define __NR_move_mount (428 + 1024) - #else - #define __NR_move_mount 429 - #endif -#endif - #ifndef MOUNT_ATTR_IDMAP #define MOUNT_ATTR_IDMAP 0x00100000 #endif @@ -161,23 +121,6 @@ static inline int sys_mount_setattr(int dfd, const char *path, unsigned int flag return syscall(__NR_mount_setattr, dfd, path, flags, attr, size); } -#ifndef OPEN_TREE_CLONE -#define OPEN_TREE_CLONE 1 -#endif - -#ifndef OPEN_TREE_CLOEXEC -#define OPEN_TREE_CLOEXEC O_CLOEXEC -#endif - -#ifndef AT_RECURSIVE -#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */ -#endif - -static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags) -{ - return syscall(__NR_open_tree, dfd, filename, flags); -} - static ssize_t write_nointr(int fd, const void *buf, size_t count) { ssize_t ret; @@ -1076,7 +1019,7 @@ FIXTURE_SETUP(mount_setattr_idmapped) ASSERT_EQ(mkdir("/mnt/D", 0777), 0); img_fd = openat(-EBADF, "/mnt/C/ext4.img", O_CREAT | O_WRONLY, 0600); ASSERT_GE(img_fd, 0); - ASSERT_EQ(ftruncate(img_fd, 1024 * 2048), 0); + ASSERT_EQ(ftruncate(img_fd, 2147483648 /* 2 GB */), 0); ASSERT_EQ(system("mkfs.ext4 -q /mnt/C/ext4.img"), 0); ASSERT_EQ(system("mount -o loop -t ext4 /mnt/C/ext4.img /mnt/D/"), 0); ASSERT_EQ(close(img_fd), 0); diff --git a/tools/testing/selftests/perf_events/watermark_signal.c b/tools/testing/selftests/perf_events/watermark_signal.c index 49dc1e831174..e03fe1b9bba2 100644 --- a/tools/testing/selftests/perf_events/watermark_signal.c +++ b/tools/testing/selftests/perf_events/watermark_signal.c @@ -75,7 +75,7 @@ TEST(watermark_signal) if (waitpid(child, &child_status, WSTOPPED) != child || !(WIFSTOPPED(child_status) && WSTOPSIG(child_status) == SIGSTOP)) { fprintf(stderr, - "failed to sycnhronize with child errno=%d status=%x\n", + "failed to synchronize with child errno=%d status=%x\n", errno, child_status); goto cleanup; diff --git a/tools/testing/selftests/pid_namespace/pid_max.c b/tools/testing/selftests/pid_namespace/pid_max.c index 51c414faabb0..96f274f0582b 100644 --- a/tools/testing/selftests/pid_namespace/pid_max.c +++ b/tools/testing/selftests/pid_namespace/pid_max.c @@ -10,6 +10,7 @@ #include <stdlib.h> #include <string.h> #include <syscall.h> +#include <sys/mount.h> #include <sys/wait.h> #include "../kselftest_harness.h" diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h index 55bcf81a2b9a..efd74063126e 100644 --- a/tools/testing/selftests/pidfd/pidfd.h +++ b/tools/testing/selftests/pidfd/pidfd.h @@ -131,6 +131,26 @@ #define PIDFD_INFO_EXIT (1UL << 3) /* Always returned if available, even if not requested */ #endif +#ifndef PIDFD_INFO_COREDUMP +#define PIDFD_INFO_COREDUMP (1UL << 4) +#endif + +#ifndef PIDFD_COREDUMPED +#define PIDFD_COREDUMPED (1U << 0) /* Did crash and... */ +#endif + +#ifndef PIDFD_COREDUMP_SKIP +#define PIDFD_COREDUMP_SKIP (1U << 1) /* coredumping generation was skipped. */ +#endif + +#ifndef PIDFD_COREDUMP_USER +#define PIDFD_COREDUMP_USER (1U << 2) /* coredump was done as the user. */ +#endif + +#ifndef PIDFD_COREDUMP_ROOT +#define PIDFD_COREDUMP_ROOT (1U << 3) /* coredump was done as root. */ +#endif + #ifndef PIDFD_THREAD #define PIDFD_THREAD O_EXCL #endif @@ -150,6 +170,8 @@ struct pidfd_info { __u32 fsuid; __u32 fsgid; __s32 exit_code; + __u32 coredump_mask; + __u32 __spare1; }; /* diff --git a/tools/testing/selftests/pidfd/pidfd_bind_mount.c b/tools/testing/selftests/pidfd/pidfd_bind_mount.c index 7822dd080258..c094aeb1c620 100644 --- a/tools/testing/selftests/pidfd/pidfd_bind_mount.c +++ b/tools/testing/selftests/pidfd/pidfd_bind_mount.c @@ -15,79 +15,7 @@ #include "pidfd.h" #include "../kselftest_harness.h" - -#ifndef __NR_open_tree - #if defined __alpha__ - #define __NR_open_tree 538 - #elif defined _MIPS_SIM - #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ - #define __NR_open_tree 4428 - #endif - #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ - #define __NR_open_tree 6428 - #endif - #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ - #define __NR_open_tree 5428 - #endif - #elif defined __ia64__ - #define __NR_open_tree (428 + 1024) - #else - #define __NR_open_tree 428 - #endif -#endif - -#ifndef __NR_move_mount - #if defined __alpha__ - #define __NR_move_mount 539 - #elif defined _MIPS_SIM - #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */ - #define __NR_move_mount 4429 - #endif - #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */ - #define __NR_move_mount 6429 - #endif - #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */ - #define __NR_move_mount 5429 - #endif - #elif defined __ia64__ - #define __NR_move_mount (428 + 1024) - #else - #define __NR_move_mount 429 - #endif -#endif - -#ifndef MOVE_MOUNT_F_EMPTY_PATH -#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */ -#endif - -#ifndef MOVE_MOUNT_F_EMPTY_PATH -#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */ -#endif - -static inline int sys_move_mount(int from_dfd, const char *from_pathname, - int to_dfd, const char *to_pathname, - unsigned int flags) -{ - return syscall(__NR_move_mount, from_dfd, from_pathname, to_dfd, - to_pathname, flags); -} - -#ifndef OPEN_TREE_CLONE -#define OPEN_TREE_CLONE 1 -#endif - -#ifndef OPEN_TREE_CLOEXEC -#define OPEN_TREE_CLOEXEC O_CLOEXEC -#endif - -#ifndef AT_RECURSIVE -#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */ -#endif - -static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags) -{ - return syscall(__NR_open_tree, dfd, filename, flags); -} +#include "../filesystems/wrappers.h" FIXTURE(pidfd_bind_mount) { char template[PATH_MAX]; diff --git a/tools/testing/selftests/pidfd/pidfd_info_test.c b/tools/testing/selftests/pidfd/pidfd_info_test.c index 1758a1b0457b..a0eb6e81eaa2 100644 --- a/tools/testing/selftests/pidfd/pidfd_info_test.c +++ b/tools/testing/selftests/pidfd/pidfd_info_test.c @@ -299,6 +299,7 @@ TEST_F(pidfd_info, thread_group) /* Opening a thread as a thread-group leader must fail. */ pidfd_thread = sys_pidfd_open(pid_thread, 0); ASSERT_LT(pidfd_thread, 0); + ASSERT_EQ(errno, ENOENT); /* Opening a thread as a PIDFD_THREAD must succeed. */ pidfd_thread = sys_pidfd_open(pid_thread, PIDFD_THREAD); @@ -362,9 +363,9 @@ TEST_F(pidfd_info, thread_group) ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0); ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS)); ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT)); - /* The thread-group leader exited successfully. Only the specific thread was SIGKILLed. */ - ASSERT_TRUE(WIFEXITED(info.exit_code)); - ASSERT_EQ(WEXITSTATUS(info.exit_code), 0); + /* Even though the thread-group exited successfully it will still report the group exit code. */ + ASSERT_TRUE(WIFSIGNALED(info.exit_code)); + ASSERT_EQ(WTERMSIG(info.exit_code), SIGKILL); /* * Retrieve exit information for the thread-group leader via the @@ -375,9 +376,9 @@ TEST_F(pidfd_info, thread_group) ASSERT_FALSE(!!(info2.mask & PIDFD_INFO_CREDS)); ASSERT_TRUE(!!(info2.mask & PIDFD_INFO_EXIT)); - /* The thread-group leader exited successfully. Only the specific thread was SIGKILLed. */ - ASSERT_TRUE(WIFEXITED(info2.exit_code)); - ASSERT_EQ(WEXITSTATUS(info2.exit_code), 0); + /* Even though the thread-group exited successfully it will still report the group exit code. */ + ASSERT_TRUE(WIFSIGNALED(info2.exit_code)); + ASSERT_EQ(WTERMSIG(info2.exit_code), SIGKILL); /* Retrieve exit information for the thread. */ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT; diff --git a/tools/testing/selftests/rcutorture/bin/console-badness.sh b/tools/testing/selftests/rcutorture/bin/console-badness.sh index aad51e7c0183..991fb11306eb 100755 --- a/tools/testing/selftests/rcutorture/bin/console-badness.sh +++ b/tools/testing/selftests/rcutorture/bin/console-badness.sh @@ -10,7 +10,7 @@ # # Authors: Paul E. McKenney <paulmck@kernel.org> -grep -E 'Badness|WARNING:|Warn|BUG|===========|BUG: KCSAN:|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' | +grep -E 'Badness|WARNING:|Warn|BUG|===========|BUG: KCSAN:|Call Trace:|Call trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' | grep -v 'ODEBUG: ' | grep -v 'This means that this is a DEBUG kernel and it is' | grep -v 'Warning: unable to open an initial console' | diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh index ad79784e552d..957800c9ffba 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh @@ -73,7 +73,7 @@ config_override_param "$config_dir/CFcommon.$(uname -m)" KcList \ cp $T/KcList $resdir/ConfigFragment base_resdir=`echo $resdir | sed -e 's/\.[0-9]\+$//'` -if test "$base_resdir" != "$resdir" && test -f $base_resdir/bzImage && test -f $base_resdir/vmlinux +if test "$base_resdir" != "$resdir" && (test -f $base_resdir/bzImage || test -f $base_resdir/Image) && test -f $base_resdir/vmlinux then # Rerunning previous test, so use that test's kernel. QEMU="`identify_qemu $base_resdir/vmlinux`" diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh index b07c11cf6929..21e6ba3615f6 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-console.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh @@ -148,7 +148,7 @@ then summary="$summary KCSAN: $n_kcsan" fi fi - n_calltrace=`grep -c 'Call Trace:' $file` + n_calltrace=`grep -Ec 'Call Trace:|Call trace:' $file` if test "$n_calltrace" -ne 0 then summary="$summary Call Traces: $n_calltrace" diff --git a/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh b/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh index 2db12c5cad9c..208be7d09a61 100755 --- a/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh +++ b/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh @@ -39,8 +39,9 @@ do shift done -err= nerrs=0 + +# Test lockdep's handling of deadlocks. for d in 0 1 do for t in 0 1 2 @@ -52,6 +53,12 @@ do tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 5s --configs "SRCU-P" --kconfig "CONFIG_FORCE_NEED_SRCU_NMI_SAFE=y" --bootargs "rcutorture.test_srcu_lockdep=$val rcutorture.reader_flavor=0x2" --trust-make --datestamp "$ds/$val" > "$T/kvm.sh.out" 2>&1 ret=$? mv "$T/kvm.sh.out" "$RCUTORTURE/res/$ds/$val" + if ! grep -q '^CONFIG_PROVE_LOCKING=y' .config + then + echo "rcu_torture_init_srcu_lockdep:Error: CONFIG_PROVE_LOCKING disabled in rcutorture SRCU-P scenario" + nerrs=$((nerrs+1)) + err=1 + fi if test "$d" -ne 0 && test "$ret" -eq 0 then err=1 @@ -71,6 +78,39 @@ do done done done + +# Test lockdep-enabled testing of mixed SRCU readers. +for val in 0x1 0xf +do + err= + tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 5s --configs "SRCU-P" --kconfig "CONFIG_FORCE_NEED_SRCU_NMI_SAFE=y" --bootargs "rcutorture.reader_flavor=$val" --trust-make --datestamp "$ds/$val" > "$T/kvm.sh.out" 2>&1 + ret=$? + mv "$T/kvm.sh.out" "$RCUTORTURE/res/$ds/$val" + if ! grep -q '^CONFIG_PROVE_LOCKING=y' .config + then + echo "rcu_torture_init_srcu_lockdep:Error: CONFIG_PROVE_LOCKING disabled in rcutorture SRCU-P scenario" + nerrs=$((nerrs+1)) + err=1 + fi + if test "$val" -eq 0xf && test "$ret" -eq 0 + then + err=1 + echo -n Unexpected success for > "$RCUTORTURE/res/$ds/$val/kvm.sh.err" + fi + if test "$val" -eq 0x1 && test "$ret" -ne 0 + then + err=1 + echo -n Unexpected failure for > "$RCUTORTURE/res/$ds/$val/kvm.sh.err" + fi + if test -n "$err" + then + grep "rcu_torture_init_srcu_lockdep: test_srcu_lockdep = " "$RCUTORTURE/res/$ds/$val/SRCU-P/console.log" | sed -e 's/^.*rcu_torture_init_srcu_lockdep://' >> "$RCUTORTURE/res/$ds/$val/kvm.sh.err" + cat "$RCUTORTURE/res/$ds/$val/kvm.sh.err" + nerrs=$((nerrs+1)) + fi +done + +# Set up exit code. if test "$nerrs" -ne 0 then exit 1 diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 0447c4a00cc4..e03fdaca89b3 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -51,12 +51,15 @@ do_scftorture=yes do_rcuscale=yes do_refscale=yes do_kvfree=yes +do_normal=yes +explicit_normal=no do_kasan=yes do_kcsan=no do_clocksourcewd=yes do_rt=yes do_rcutasksflavors=yes do_srcu_lockdep=yes +do_rcu_rust=no # doyesno - Helper function for yes/no arguments function doyesno () { @@ -87,6 +90,7 @@ usage () { echo " --do-rcutorture / --do-no-rcutorture / --no-rcutorture" echo " --do-refscale / --do-no-refscale / --no-refscale" echo " --do-rt / --do-no-rt / --no-rt" + echo " --do-rcu-rust / --do-no-rcu-rust / --no-rcu-rust" echo " --do-scftorture / --do-no-scftorture / --no-scftorture" echo " --do-srcu-lockdep / --do-no-srcu-lockdep / --no-srcu-lockdep" echo " --duration [ <minutes> | <hours>h | <days>d ]" @@ -128,6 +132,8 @@ do do_refscale=yes do_rt=yes do_kvfree=yes + do_normal=yes + explicit_normal=no do_kasan=yes do_kcsan=yes do_clocksourcewd=yes @@ -161,11 +167,17 @@ do do_refscale=no do_rt=no do_kvfree=no + do_normal=no + explicit_normal=no do_kasan=no do_kcsan=no do_clocksourcewd=no do_srcu_lockdep=no ;; + --do-normal|--do-no-normal|--no-normal) + do_normal=`doyesno "$1" --do-normal` + explicit_normal=yes + ;; --do-rcuscale|--do-no-rcuscale|--no-rcuscale) do_rcuscale=`doyesno "$1" --do-rcuscale` ;; @@ -181,6 +193,9 @@ do --do-rt|--do-no-rt|--no-rt) do_rt=`doyesno "$1" --do-rt` ;; + --do-rcu-rust|--do-no-rcu-rust|--no-rcu-rust) + do_rcu_rust=`doyesno "$1" --do-rcu-rust` + ;; --do-scftorture|--do-no-scftorture|--no-scftorture) do_scftorture=`doyesno "$1" --do-scftorture` ;; @@ -242,6 +257,17 @@ trap 'rm -rf $T' 0 2 echo " --- " $scriptname $args | tee -a $T/log echo " --- Results directory: " $ds | tee -a $T/log +if test "$do_normal" = "no" && test "$do_kasan" = "no" && test "$do_kcsan" = "no" +then + # Match old scripts so that "--do-none --do-rcutorture" does + # normal rcutorture testing, but no KASAN or KCSAN testing. + if test $explicit_normal = yes + then + echo " --- Everything disabled, so explicit --do-normal overridden" | tee -a $T/log + fi + do_normal=yes +fi + # Calculate rcutorture defaults and apportion time if test -z "$configs_rcutorture" then @@ -332,9 +358,12 @@ function torture_set { local kcsan_kmake_tag= local flavor=$1 shift - curflavor=$flavor - torture_one "$@" - mv $T/last-resdir $T/last-resdir-nodebug || : + if test "$do_normal" = "yes" + then + curflavor=$flavor + torture_one "$@" + mv $T/last-resdir $T/last-resdir-nodebug || : + fi if test "$do_kasan" = "yes" then curflavor=${flavor}-kasan @@ -448,13 +477,57 @@ fi if test "$do_rt" = "yes" then - # With all post-boot grace periods forced to normal. - torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_normal=1" - torture_set "rcurttorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make + # In both runs, disable testing of RCU priority boosting because + # -rt doesn't like its interaction with testing of callback + # flooding. + + # With all post-boot grace periods forced to normal (default for PREEMPT_RT). + torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcutorture.test_boost=0 rcutorture.preempt_duration=0" + torture_set "rcurttorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --kconfig "CONFIG_PREEMPT_RT=y CONFIG_EXPERT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_RCU_NOCB_CPU=y" --trust-make # With all post-boot grace periods forced to expedited. - torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_expedited=1" - torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make + torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcutorture.test_boost=0 rcupdate.rcu_normal_after_boot=0 rcupdate.rcu_expedited=1 rcutorture.preempt_duration=0" + torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --kconfig "CONFIG_PREEMPT_RT=y CONFIG_EXPERT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_FULL=y CONFIG_RCU_NOCB_CPU=y" --trust-make +fi + +if test "$do_rcu_rust" = "yes" +then + echo " --- do-rcu-rust:" Start `date` | tee -a $T/log + rrdir="tools/testing/selftests/rcutorture/res/$ds/results-rcu-rust" + mkdir -p "$rrdir" + echo " --- make LLVM=1 rustavailable " | tee -a $rrdir/log > $rrdir/rustavailable.out + make LLVM=1 rustavailable > $T/rustavailable.out 2>&1 + retcode=$? + echo $retcode > $rrdir/rustavailable.exitcode + cat $T/rustavailable.out | tee -a $rrdir/log >> $rrdir/rustavailable.out 2>&1 + buildphase=rustavailable + if test "$retcode" -eq 0 + then + echo " --- Running 'make mrproper' in order to run kunit." | tee -a $rrdir/log > $rrdir/mrproper.out + make mrproper > $rrdir/mrproper.out 2>&1 + retcode=$? + echo $retcode > $rrdir/mrproper.exitcode + buildphase=mrproper + fi + if test "$retcode" -eq 0 + then + echo " --- Running rust_doctests_kernel." | tee -a $rrdir/log > $rrdir/rust_doctests_kernel.out + ./tools/testing/kunit/kunit.py run --make_options LLVM=1 --make_options CLIPPY=1 --arch arm64 --kconfig_add CONFIG_SMP=y --kconfig_add CONFIG_WERROR=y --kconfig_add CONFIG_RUST=y rust_doctests_kernel >> $rrdir/rust_doctests_kernel.out 2>&1 + # @@@ Remove "--arch arm64" in order to test on native architecture? + # @@@ Analyze $rrdir/rust_doctests_kernel.out contents? + retcode=$? + echo $retcode > $rrdir/rust_doctests_kernel.exitcode + buildphase=rust_doctests_kernel + fi + if test "$retcode" -eq 0 + then + echo "rcu-rust($retcode)" $rrdir >> $T/successes + echo Success >> $rrdir/log + else + echo "rcu-rust($retcode)" $rrdir >> $T/failures + echo " --- rcu-rust Test summary:" >> $rrdir/log + echo " --- Summary: Exit code $retcode from $buildphase, see $rrdir/$buildphase.out" >> $rrdir/log + fi fi if test "$do_srcu_lockdep" = "yes" diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 index 8ae41d5f81a3..54b1600c7eb5 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 @@ -8,8 +8,6 @@ CONFIG_NO_HZ_IDLE=y CONFIG_NO_HZ_FULL=n CONFIG_RCU_TRACE=y CONFIG_HOTPLUG_CPU=y -CONFIG_MAXSMP=y -CONFIG_CPUMASK_OFFSTACK=y CONFIG_RCU_NOCB_CPU=y CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_BOOST=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot index 40af3df0f397..1cc5b47dde28 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot @@ -1,4 +1,4 @@ -maxcpus=8 nr_cpus=43 +maxcpus=8 nr_cpus=17 rcutree.gp_preinit_delay=3 rcutree.gp_init_delay=3 rcutree.gp_cleanup_delay=3 diff --git a/tools/testing/selftests/run_kselftest.sh b/tools/testing/selftests/run_kselftest.sh index 50e03eefe7ac..0443beacf362 100755 --- a/tools/testing/selftests/run_kselftest.sh +++ b/tools/testing/selftests/run_kselftest.sh @@ -3,7 +3,14 @@ # # Run installed kselftest tests. # -BASE_DIR=$(realpath $(dirname $0)) + +# Fallback to readlink if realpath is not available +if which realpath > /dev/null; then + BASE_DIR=$(realpath $(dirname $0)) +else + BASE_DIR=$(readlink -f $(dirname $0)) +fi + cd $BASE_DIR TESTS="$BASE_DIR"/kselftest-list.txt if [ ! -r "$TESTS" ] ; then diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json index a951c0d33cd2..ddc97ecd8b39 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json @@ -573,5 +573,32 @@ "teardown": [ "$TC qdisc del dev $DEV1 handle 1: root" ] + }, + { + "id": "831d", + "name": "Test HFSC qlen accounting with DRR/NETEM/BLACKHOLE chain", + "category": ["qdisc", "hfsc", "drr", "netem", "blackhole"], + "plugins": { "requires": ["nsPlugin", "scapyPlugin"] }, + "setup": [ + "$IP link set dev $DEV1 up || true", + "$TC qdisc add dev $DEV1 root handle 1: drr", + "$TC filter add dev $DEV1 parent 1: basic classid 1:1", + "$TC class add dev $DEV1 parent 1: classid 1:1 drr", + "$TC qdisc add dev $DEV1 parent 1:1 handle 2: hfsc def 1", + "$TC class add dev $DEV1 parent 2: classid 2:1 hfsc rt m1 8 d 1 m2 0", + "$TC qdisc add dev $DEV1 parent 2:1 handle 3: netem", + "$TC qdisc add dev $DEV1 parent 3:1 handle 4: blackhole" + ], + "scapy": { + "iface": "$DEV0", + "count": 5, + "packet": "Ether()/IP(dst='10.10.10.1', src='10.10.10.10')/ICMP()" + }, + "cmdUnderTest": "$TC -s qdisc show dev $DEV1", + "expExitCode": "0", + "verifyCmd": "$TC -s qdisc show dev $DEV1", + "matchPattern": "qdisc hfsc", + "matchCount": "1", + "teardown": ["$TC qdisc del dev $DEV1 root handle 1: drr"] } ] diff --git a/tools/testing/selftests/timens/clock_nanosleep.c b/tools/testing/selftests/timens/clock_nanosleep.c index 72d41b955fb2..5cc0010e85ff 100644 --- a/tools/testing/selftests/timens/clock_nanosleep.c +++ b/tools/testing/selftests/timens/clock_nanosleep.c @@ -38,7 +38,7 @@ void *call_nanosleep(void *_args) return NULL; } -int run_test(int clockid, int abs) +static int run_test(int clockid, int abs) { struct timespec now = {}, rem; struct thread_args args = { .now = &now, .rem = &rem, .clockid = clockid}; @@ -115,6 +115,8 @@ int main(int argc, char *argv[]) { int ret, nsfd; + ksft_print_header(); + nscheck(); ksft_set_plan(4); diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c index d12ff955de0d..a644162d56fd 100644 --- a/tools/testing/selftests/timens/exec.c +++ b/tools/testing/selftests/timens/exec.c @@ -36,6 +36,8 @@ int main(int argc, char *argv[]) return 0; } + ksft_print_header(); + nscheck(); ksft_set_plan(1); diff --git a/tools/testing/selftests/timens/futex.c b/tools/testing/selftests/timens/futex.c index 6b2b9264e851..339633ae037a 100644 --- a/tools/testing/selftests/timens/futex.c +++ b/tools/testing/selftests/timens/futex.c @@ -66,6 +66,8 @@ int main(int argc, char *argv[]) pid_t pid; struct timespec mtime_now; + ksft_print_header(); + nscheck(); ksft_set_plan(2); diff --git a/tools/testing/selftests/timens/gettime_perf.c b/tools/testing/selftests/timens/gettime_perf.c index 6b13dc277724..d6658b7b7548 100644 --- a/tools/testing/selftests/timens/gettime_perf.c +++ b/tools/testing/selftests/timens/gettime_perf.c @@ -67,6 +67,8 @@ int main(int argc, char *argv[]) time_t offset = 10; int nsfd; + ksft_print_header(); + ksft_set_plan(8); fill_function_pointers(); diff --git a/tools/testing/selftests/timens/procfs.c b/tools/testing/selftests/timens/procfs.c index 1833ca97eb24..0a9ff90ee69a 100644 --- a/tools/testing/selftests/timens/procfs.c +++ b/tools/testing/selftests/timens/procfs.c @@ -180,6 +180,8 @@ int main(int argc, char *argv[]) { int ret = 0; + ksft_print_header(); + nscheck(); ksft_set_plan(2); diff --git a/tools/testing/selftests/timens/timens.c b/tools/testing/selftests/timens/timens.c index 387220791a05..a9c0534ef8f6 100644 --- a/tools/testing/selftests/timens/timens.c +++ b/tools/testing/selftests/timens/timens.c @@ -151,6 +151,8 @@ int main(int argc, char *argv[]) time_t offset; int ret = 0; + ksft_print_header(); + nscheck(); check_supported_timers(); diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c index 5b939f59dfa4..79543ceb2c0f 100644 --- a/tools/testing/selftests/timens/timer.c +++ b/tools/testing/selftests/timens/timer.c @@ -15,7 +15,7 @@ #include "log.h" #include "timens.h" -int run_test(int clockid, struct timespec now) +static int run_test(int clockid, struct timespec now) { struct itimerspec new_value; long long elapsed; @@ -75,6 +75,8 @@ int main(int argc, char *argv[]) pid_t pid; struct timespec btime_now, mtime_now; + ksft_print_header(); + nscheck(); check_supported_timers(); diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c index a4196bbd6e33..402e2e415545 100644 --- a/tools/testing/selftests/timens/timerfd.c +++ b/tools/testing/selftests/timens/timerfd.c @@ -15,14 +15,14 @@ #include "log.h" #include "timens.h" -static int tclock_gettime(clock_t clockid, struct timespec *now) +static int tclock_gettime(clockid_t clockid, struct timespec *now) { if (clockid == CLOCK_BOOTTIME_ALARM) clockid = CLOCK_BOOTTIME; return clock_gettime(clockid, now); } -int run_test(int clockid, struct timespec now) +static int run_test(int clockid, struct timespec now) { struct itimerspec new_value; long long elapsed; @@ -82,6 +82,8 @@ int main(int argc, char *argv[]) pid_t pid; struct timespec btime_now, mtime_now; + ksft_print_header(); + nscheck(); check_supported_timers(); diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c index 5b8907bf451d..b957e1a65124 100644 --- a/tools/testing/selftests/timens/vfork_exec.c +++ b/tools/testing/selftests/timens/vfork_exec.c @@ -91,6 +91,8 @@ int main(int argc, char *argv[]) return check("child after exec", &now); } + ksft_print_header(); + nscheck(); ksft_set_plan(4); diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile index f34ac0bac696..4dde8838261d 100644 --- a/tools/testing/selftests/ublk/Makefile +++ b/tools/testing/selftests/ublk/Makefile @@ -1,6 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir) +CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir)/usr/include +ifneq ($(WERROR),0) + CFLAGS += -Werror +endif + LDLIBS += -lpthread -lm -luring TEST_PROGS := test_generic_01.sh @@ -11,6 +15,11 @@ TEST_PROGS += test_generic_05.sh TEST_PROGS += test_generic_06.sh TEST_PROGS += test_generic_07.sh +TEST_PROGS += test_generic_08.sh +TEST_PROGS += test_generic_09.sh +TEST_PROGS += test_generic_10.sh +TEST_PROGS += test_generic_11.sh + TEST_PROGS += test_null_01.sh TEST_PROGS += test_null_02.sh TEST_PROGS += test_loop_01.sh diff --git a/tools/testing/selftests/ublk/fault_inject.c b/tools/testing/selftests/ublk/fault_inject.c index 94a8e729ba4c..5421774d7867 100644 --- a/tools/testing/selftests/ublk/fault_inject.c +++ b/tools/testing/selftests/ublk/fault_inject.c @@ -16,6 +16,11 @@ static int ublk_fault_inject_tgt_init(const struct dev_ctx *ctx, const struct ublksrv_ctrl_dev_info *info = &dev->dev_info; unsigned long dev_size = 250UL << 30; + if (ctx->auto_zc_fallback) { + ublk_err("%s: not support auto_zc_fallback\n", __func__); + return -EINVAL; + } + dev->tgt.dev_size = dev_size; dev->tgt.params = (struct ublk_params) { .types = UBLK_PARAM_TYPE_BASIC, diff --git a/tools/testing/selftests/ublk/file_backed.c b/tools/testing/selftests/ublk/file_backed.c index 6f34eabfae97..509842df9bee 100644 --- a/tools/testing/selftests/ublk/file_backed.c +++ b/tools/testing/selftests/ublk/file_backed.c @@ -29,19 +29,23 @@ static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_des static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) { unsigned ublk_op = ublksrv_get_op(iod); - int zc = ublk_queue_use_zc(q); - enum io_uring_op op = ublk_to_uring_op(iod, zc); + unsigned zc = ublk_queue_use_zc(q); + unsigned auto_zc = ublk_queue_use_auto_zc(q); + enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc); struct io_uring_sqe *sqe[3]; + void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr; - if (!zc) { + if (!zc || auto_zc) { ublk_queue_alloc_sqes(q, sqe, 1); if (!sqe[0]) return -ENOMEM; io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/, - (void *)iod->addr, + addr, iod->nr_sectors << 9, iod->start_sector << 9); + if (auto_zc) + sqe[0]->buf_index = tag; io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); /* bit63 marks us as tgt io */ sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1); @@ -145,6 +149,11 @@ static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) }, }; + if (ctx->auto_zc_fallback) { + ublk_err("%s: not support auto_zc_fallback\n", __func__); + return -EINVAL; + } + ret = backing_file_tgt_init(dev); if (ret) return ret; diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c index 842b40736a9b..b5131a000795 100644 --- a/tools/testing/selftests/ublk/kublk.c +++ b/tools/testing/selftests/ublk/kublk.c @@ -216,6 +216,30 @@ static int ublk_ctrl_get_features(struct ublk_dev *dev, return __ublk_ctrl_cmd(dev, &data); } +static int ublk_ctrl_update_size(struct ublk_dev *dev, + __u64 nr_sects) +{ + struct ublk_ctrl_cmd_data data = { + .cmd_op = UBLK_U_CMD_UPDATE_SIZE, + .flags = CTRL_CMD_HAS_DATA, + }; + + data.data[0] = nr_sects; + return __ublk_ctrl_cmd(dev, &data); +} + +static int ublk_ctrl_quiesce_dev(struct ublk_dev *dev, + unsigned int timeout_ms) +{ + struct ublk_ctrl_cmd_data data = { + .cmd_op = UBLK_U_CMD_QUIESCE_DEV, + .flags = CTRL_CMD_HAS_DATA, + }; + + data.data[0] = timeout_ms; + return __ublk_ctrl_cmd(dev, &data); +} + static const char *ublk_dev_state_desc(struct ublk_dev *dev) { switch (dev->dev_info.state) { @@ -405,7 +429,7 @@ static void ublk_queue_deinit(struct ublk_queue *q) free(q->ios[i].buf_addr); } -static int ublk_queue_init(struct ublk_queue *q) +static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags) { struct ublk_dev *dev = q->dev; int depth = dev->dev_info.queue_depth; @@ -420,10 +444,14 @@ static int ublk_queue_init(struct ublk_queue *q) q->cmd_inflight = 0; q->tid = gettid(); - if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) { + if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_AUTO_BUF_REG)) { q->state |= UBLKSRV_NO_BUF; - q->state |= UBLKSRV_ZC; + if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) + q->state |= UBLKSRV_ZC; + if (dev->dev_info.flags & UBLK_F_AUTO_BUF_REG) + q->state |= UBLKSRV_AUTO_BUF_REG; } + q->state |= extra_flags; cmd_buf_size = ublk_queue_cmd_buf_sz(q); off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz(); @@ -461,7 +489,7 @@ static int ublk_queue_init(struct ublk_queue *q) goto fail; } - if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) { + if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_AUTO_BUF_REG)) { ret = io_uring_register_buffers_sparse(&q->ring, q->q_depth); if (ret) { ublk_err("ublk dev %d queue %d register spare buffers failed %d", @@ -525,6 +553,23 @@ static void ublk_dev_unprep(struct ublk_dev *dev) close(dev->fds[0]); } +static void ublk_set_auto_buf_reg(const struct ublk_queue *q, + struct io_uring_sqe *sqe, + unsigned short tag) +{ + struct ublk_auto_buf_reg buf = {}; + + if (q->tgt_ops->buf_index) + buf.index = q->tgt_ops->buf_index(q, tag); + else + buf.index = tag; + + if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK) + buf.flags = UBLK_AUTO_BUF_REG_FALLBACK; + + sqe->addr = ublk_auto_buf_reg_to_sqe_addr(&buf); +} + int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) { struct ublksrv_io_cmd *cmd; @@ -579,6 +624,9 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) else cmd->addr = 0; + if (q->state & UBLKSRV_AUTO_BUF_REG) + ublk_set_auto_buf_reg(q, sqe[0], tag); + user_data = build_user_data(tag, _IOC_NR(cmd_op), 0, 0); io_uring_sqe_set_data64(sqe[0], user_data); @@ -729,6 +777,7 @@ struct ublk_queue_info { struct ublk_queue *q; sem_t *queue_sem; cpu_set_t *affinity; + unsigned char auto_zc_fallback; }; static void *ublk_io_handler_fn(void *data) @@ -736,9 +785,13 @@ static void *ublk_io_handler_fn(void *data) struct ublk_queue_info *info = data; struct ublk_queue *q = info->q; int dev_id = q->dev->dev_info.dev_id; + unsigned extra_flags = 0; int ret; - ret = ublk_queue_init(q); + if (info->auto_zc_fallback) + extra_flags = UBLKSRV_AUTO_BUF_REG_FALLBACK; + + ret = ublk_queue_init(q, extra_flags); if (ret) { ublk_err("ublk dev %d queue %d init queue failed\n", dev_id, q->q_id); @@ -831,6 +884,7 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev) qinfo[i].q = &dev->q[i]; qinfo[i].queue_sem = &queue_sem; qinfo[i].affinity = &affinity_buf[i]; + qinfo[i].auto_zc_fallback = ctx->auto_zc_fallback; pthread_create(&dev->q[i].thread, NULL, ublk_io_handler_fn, &qinfo[i]); @@ -1011,6 +1065,9 @@ static int __cmd_dev_add(const struct dev_ctx *ctx) info->nr_hw_queues = nr_queues; info->queue_depth = depth; info->flags = ctx->flags; + if ((features & UBLK_F_QUIESCE) && + (info->flags & UBLK_F_USER_RECOVERY)) + info->flags |= UBLK_F_QUIESCE; dev->tgt.ops = ops; dev->tgt.sq_depth = depth; dev->tgt.cq_depth = depth; @@ -1206,6 +1263,9 @@ static int cmd_dev_get_features(void) [const_ilog2(UBLK_F_USER_COPY)] = "USER_COPY", [const_ilog2(UBLK_F_ZONED)] = "ZONED", [const_ilog2(UBLK_F_USER_RECOVERY_FAIL_IO)] = "RECOVERY_FAIL_IO", + [const_ilog2(UBLK_F_UPDATE_SIZE)] = "UPDATE_SIZE", + [const_ilog2(UBLK_F_AUTO_BUF_REG)] = "AUTO_BUF_REG", + [const_ilog2(UBLK_F_QUIESCE)] = "QUIESCE", }; struct ublk_dev *dev; __u64 features = 0; @@ -1239,13 +1299,66 @@ static int cmd_dev_get_features(void) return ret; } +static int cmd_dev_update_size(struct dev_ctx *ctx) +{ + struct ublk_dev *dev = ublk_ctrl_init(); + struct ublk_params p; + int ret = -EINVAL; + + if (!dev) + return -ENODEV; + + if (ctx->dev_id < 0) { + fprintf(stderr, "device id isn't provided\n"); + goto out; + } + + dev->dev_info.dev_id = ctx->dev_id; + ret = ublk_ctrl_get_params(dev, &p); + if (ret < 0) { + ublk_err("failed to get params %d %s\n", ret, strerror(-ret)); + goto out; + } + + if (ctx->size & ((1 << p.basic.logical_bs_shift) - 1)) { + ublk_err("size isn't aligned with logical block size\n"); + ret = -EINVAL; + goto out; + } + + ret = ublk_ctrl_update_size(dev, ctx->size >> 9); +out: + ublk_ctrl_deinit(dev); + return ret; +} + +static int cmd_dev_quiesce(struct dev_ctx *ctx) +{ + struct ublk_dev *dev = ublk_ctrl_init(); + int ret = -EINVAL; + + if (!dev) + return -ENODEV; + + if (ctx->dev_id < 0) { + fprintf(stderr, "device id isn't provided for quiesce\n"); + goto out; + } + dev->dev_info.dev_id = ctx->dev_id; + ret = ublk_ctrl_quiesce_dev(dev, 10000); + +out: + ublk_ctrl_deinit(dev); + return ret; +} + static void __cmd_create_help(char *exe, bool recovery) { int i; printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n", exe, recovery ? "recover" : "add"); - printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g]\n"); + printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n"); printf("\t[-e 0|1 ] [-i 0|1]\n"); printf("\t[target options] [backfile1] [backfile2] ...\n"); printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n"); @@ -1281,6 +1394,8 @@ static int cmd_dev_help(char *exe) printf("%s list [-n dev_id] -a \n", exe); printf("\t -a list all devices, -n list specified device, default -a \n\n"); printf("%s features\n", exe); + printf("%s update_size -n dev_id -s|--size size_in_bytes \n", exe); + printf("%s quiesce -n dev_id\n", exe); return 0; } @@ -1300,6 +1415,9 @@ int main(int argc, char *argv[]) { "recovery_fail_io", 1, NULL, 'e'}, { "recovery_reissue", 1, NULL, 'i'}, { "get_data", 1, NULL, 'g'}, + { "auto_zc", 0, NULL, 0 }, + { "auto_zc_fallback", 0, NULL, 0 }, + { "size", 1, NULL, 's'}, { 0, 0, 0, 0 } }; const struct ublk_tgt_ops *ops = NULL; @@ -1321,7 +1439,7 @@ int main(int argc, char *argv[]) opterr = 0; optind = 2; - while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:gaz", + while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:s:gaz", longopts, &option_idx)) != -1) { switch (opt) { case 'a': @@ -1361,6 +1479,9 @@ int main(int argc, char *argv[]) case 'g': ctx.flags |= UBLK_F_NEED_GET_DATA; break; + case 's': + ctx.size = strtoull(optarg, NULL, 10); + break; case 0: if (!strcmp(longopts[option_idx].name, "debug_mask")) ublk_dbg_mask = strtol(optarg, NULL, 16); @@ -1368,6 +1489,10 @@ int main(int argc, char *argv[]) ublk_dbg_mask = 0; if (!strcmp(longopts[option_idx].name, "foreground")) ctx.fg = 1; + if (!strcmp(longopts[option_idx].name, "auto_zc")) + ctx.flags |= UBLK_F_AUTO_BUF_REG; + if (!strcmp(longopts[option_idx].name, "auto_zc_fallback")) + ctx.auto_zc_fallback = 1; break; case '?': /* @@ -1391,6 +1516,16 @@ int main(int argc, char *argv[]) } } + /* auto_zc_fallback depends on F_AUTO_BUF_REG & F_SUPPORT_ZERO_COPY */ + if (ctx.auto_zc_fallback && + !((ctx.flags & UBLK_F_AUTO_BUF_REG) && + (ctx.flags & UBLK_F_SUPPORT_ZERO_COPY))) { + ublk_err("%s: auto_zc_fallback is set but neither " + "F_AUTO_BUF_REG nor F_SUPPORT_ZERO_COPY is enabled\n", + __func__); + return -EINVAL; + } + i = optind; while (i < argc && ctx.nr_files < MAX_BACK_FILES) { ctx.files[ctx.nr_files++] = argv[i++]; @@ -1423,6 +1558,10 @@ int main(int argc, char *argv[]) ret = cmd_dev_help(argv[0]); else if (!strcmp(cmd, "features")) ret = cmd_dev_get_features(); + else if (!strcmp(cmd, "update_size")) + ret = cmd_dev_update_size(&ctx); + else if (!strcmp(cmd, "quiesce")) + ret = cmd_dev_quiesce(&ctx); else cmd_dev_help(argv[0]); diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h index 44ee1e4ac55b..e34508bf5798 100644 --- a/tools/testing/selftests/ublk/kublk.h +++ b/tools/testing/selftests/ublk/kublk.h @@ -19,7 +19,6 @@ #include <sys/inotify.h> #include <sys/wait.h> #include <sys/eventfd.h> -#include <sys/uio.h> #include <sys/ipc.h> #include <sys/shm.h> #include <linux/io_uring.h> @@ -85,6 +84,7 @@ struct dev_ctx { unsigned int all:1; unsigned int fg:1; unsigned int recovery:1; + unsigned int auto_zc_fallback:1; int _evtfd; int _shmid; @@ -92,6 +92,9 @@ struct dev_ctx { /* built from shmem, only for ublk_dump_dev() */ struct ublk_dev *shadow_dev; + /* for 'update_size' command */ + unsigned long long size; + union { struct stripe_ctx stripe; struct fault_inject_ctx fault_inject; @@ -116,6 +119,7 @@ struct ublk_io { #define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1) #define UBLKSRV_IO_FREE (1UL << 2) #define UBLKSRV_NEED_GET_DATA (1UL << 3) +#define UBLKSRV_NEED_REG_BUF (1UL << 4) unsigned short flags; unsigned short refs; /* used by target code only */ @@ -141,6 +145,9 @@ struct ublk_tgt_ops { */ void (*parse_cmd_line)(struct dev_ctx *ctx, int argc, char *argv[]); void (*usage)(const struct ublk_tgt_ops *ops); + + /* return buffer index for UBLK_F_AUTO_BUF_REG */ + unsigned short (*buf_index)(const struct ublk_queue *, int tag); }; struct ublk_tgt { @@ -169,6 +176,8 @@ struct ublk_queue { #define UBLKSRV_QUEUE_IDLE (1U << 1) #define UBLKSRV_NO_BUF (1U << 2) #define UBLKSRV_ZC (1U << 3) +#define UBLKSRV_AUTO_BUF_REG (1U << 4) +#define UBLKSRV_AUTO_BUF_REG_FALLBACK (1U << 5) unsigned state; pid_t tid; pthread_t thread; @@ -204,6 +213,12 @@ struct ublk_dev { extern unsigned int ublk_dbg_mask; extern int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag); + +static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod) +{ + return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF); +} + static inline int is_target_io(__u64 user_data) { return (user_data & (1ULL << 63)) != 0; @@ -388,6 +403,11 @@ static inline int ublk_queue_use_zc(const struct ublk_queue *q) return q->state & UBLKSRV_ZC; } +static inline int ublk_queue_use_auto_zc(const struct ublk_queue *q) +{ + return q->state & UBLKSRV_AUTO_BUF_REG; +} + extern const struct ublk_tgt_ops null_tgt_ops; extern const struct ublk_tgt_ops loop_tgt_ops; extern const struct ublk_tgt_ops stripe_tgt_ops; diff --git a/tools/testing/selftests/ublk/null.c b/tools/testing/selftests/ublk/null.c index 91fec3690d4b..44aca31cf2b0 100644 --- a/tools/testing/selftests/ublk/null.c +++ b/tools/testing/selftests/ublk/null.c @@ -42,10 +42,22 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) return 0; } +static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod, + struct io_uring_sqe *sqe) +{ + unsigned ublk_op = ublksrv_get_op(iod); + + io_uring_prep_nop(sqe); + sqe->buf_index = tag; + sqe->flags |= IOSQE_FIXED_FILE; + sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT; + sqe->len = iod->nr_sectors << 9; /* injected result */ + sqe->user_data = build_user_data(tag, ublk_op, 0, 1); +} + static int null_queue_zc_io(struct ublk_queue *q, int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); - unsigned ublk_op = ublksrv_get_op(iod); struct io_uring_sqe *sqe[3]; ublk_queue_alloc_sqes(q, sqe, 3); @@ -55,12 +67,8 @@ static int null_queue_zc_io(struct ublk_queue *q, int tag) ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; - io_uring_prep_nop(sqe[1]); - sqe[1]->buf_index = tag; - sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; - sqe[1]->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT; - sqe[1]->len = iod->nr_sectors << 9; /* injected result */ - sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1); + __setup_nop_io(tag, iod, sqe[1]); + sqe[1]->flags |= IOSQE_IO_HARDLINK; io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag); sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1); @@ -69,6 +77,16 @@ static int null_queue_zc_io(struct ublk_queue *q, int tag) return 2; } +static int null_queue_auto_zc_io(struct ublk_queue *q, int tag) +{ + const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); + struct io_uring_sqe *sqe[1]; + + ublk_queue_alloc_sqes(q, sqe, 1); + __setup_nop_io(tag, iod, sqe[0]); + return 1; +} + static void ublk_null_io_done(struct ublk_queue *q, int tag, const struct io_uring_cqe *cqe) { @@ -94,22 +112,37 @@ static void ublk_null_io_done(struct ublk_queue *q, int tag, static int ublk_null_queue_io(struct ublk_queue *q, int tag) { const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); - int zc = ublk_queue_use_zc(q); + unsigned auto_zc = ublk_queue_use_auto_zc(q); + unsigned zc = ublk_queue_use_zc(q); int queued; - if (!zc) { + if (auto_zc && !ublk_io_auto_zc_fallback(iod)) + queued = null_queue_auto_zc_io(q, tag); + else if (zc) + queued = null_queue_zc_io(q, tag); + else { ublk_complete_io(q, tag, iod->nr_sectors << 9); return 0; } - - queued = null_queue_zc_io(q, tag); ublk_queued_tgt_io(q, tag, queued); return 0; } +/* + * return invalid buffer index for triggering auto buffer register failure, + * then UBLK_IO_RES_NEED_REG_BUF handling is covered + */ +static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag) +{ + if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK) + return (unsigned short)-1; + return tag; +} + const struct ublk_tgt_ops null_tgt_ops = { .name = "null", .init_tgt = ublk_null_tgt_init, .queue_io = ublk_null_queue_io, .tgt_io_done = ublk_null_io_done, + .buf_index = ublk_null_buf_index, }; diff --git a/tools/testing/selftests/ublk/stripe.c b/tools/testing/selftests/ublk/stripe.c index 5dbd6392d83d..404a143bf3d6 100644 --- a/tools/testing/selftests/ublk/stripe.c +++ b/tools/testing/selftests/ublk/stripe.c @@ -70,7 +70,7 @@ static void free_stripe_array(struct stripe_array *s) } static void calculate_stripe_array(const struct stripe_conf *conf, - const struct ublksrv_io_desc *iod, struct stripe_array *s) + const struct ublksrv_io_desc *iod, struct stripe_array *s, void *base) { const unsigned shift = conf->shift - 9; const unsigned chunk_sects = 1 << shift; @@ -102,7 +102,7 @@ static void calculate_stripe_array(const struct stripe_conf *conf, } assert(this->nr_vec < this->cap); - this->vec[this->nr_vec].iov_base = (void *)(iod->addr + done); + this->vec[this->nr_vec].iov_base = (void *)(base + done); this->vec[this->nr_vec++].iov_len = nr_sects << 9; start += nr_sects; @@ -126,15 +126,17 @@ static inline enum io_uring_op stripe_to_uring_op( static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag) { const struct stripe_conf *conf = get_chunk_shift(q); - int zc = !!(ublk_queue_use_zc(q) != 0); - enum io_uring_op op = stripe_to_uring_op(iod, zc); + unsigned auto_zc = (ublk_queue_use_auto_zc(q) != 0); + unsigned zc = (ublk_queue_use_zc(q) != 0); + enum io_uring_op op = stripe_to_uring_op(iod, zc | auto_zc); struct io_uring_sqe *sqe[NR_STRIPE]; struct stripe_array *s = alloc_stripe_array(conf, iod); struct ublk_io *io = ublk_get_io(q, tag); int i, extra = zc ? 2 : 0; + void *base = (zc | auto_zc) ? NULL : (void *)iod->addr; io->private_data = s; - calculate_stripe_array(conf, iod, s); + calculate_stripe_array(conf, iod, s, base); ublk_queue_alloc_sqes(q, sqe, s->nr + extra); @@ -153,12 +155,11 @@ static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_ (void *)t->vec, t->nr_vec, t->start << 9); - if (zc) { + io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE); + if (auto_zc || zc) { sqe[i]->buf_index = tag; - io_uring_sqe_set_flags(sqe[i], - IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK); - } else { - io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE); + if (zc) + sqe[i]->flags |= IOSQE_IO_HARDLINK; } /* bit63 marks us as tgt io */ sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, 1); @@ -287,6 +288,11 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) loff_t bytes = 0; int ret, i, mul = 1; + if (ctx->auto_zc_fallback) { + ublk_err("%s: not support auto_zc_fallback\n", __func__); + return -EINVAL; + } + if ((chunk_size & (chunk_size - 1)) || !chunk_size) { ublk_err("invalid chunk size %u\n", chunk_size); return -EINVAL; diff --git a/tools/testing/selftests/ublk/test_common.sh b/tools/testing/selftests/ublk/test_common.sh index a81210ca3e99..0145569ee7e9 100755 --- a/tools/testing/selftests/ublk/test_common.sh +++ b/tools/testing/selftests/ublk/test_common.sh @@ -23,6 +23,11 @@ _get_disk_dev_t() { echo $(( (major & 0xfff) << 20 | (minor & 0xfffff) )) } +_get_disk_size() +{ + lsblk -b -o SIZE -n "$1" +} + _run_fio_verify_io() { fio --name=verify --rw=randwrite --direct=1 --ioengine=libaio \ --bs=8k --iodepth=32 --verify=crc32c --do_verify=1 \ @@ -215,6 +220,26 @@ _recover_ublk_dev() { echo "$state" } +# quiesce device and return ublk device state +__ublk_quiesce_dev() +{ + local dev_id=$1 + local exp_state=$2 + local state + + if ! ${UBLK_PROG} quiesce -n "${dev_id}"; then + state=$(_get_ublk_dev_state "${dev_id}") + return "$state" + fi + + for ((j=0;j<50;j++)); do + state=$(_get_ublk_dev_state "${dev_id}") + [ "$state" == "$exp_state" ] && break + sleep 1 + done + echo "$state" +} + # kill the ublk daemon and return ublk device state __ublk_kill_daemon() { @@ -251,7 +276,7 @@ __run_io_and_remove() local kill_server=$3 fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \ - --rw=readwrite --iodepth=256 --size="${size}" --numjobs=4 \ + --rw=randrw --norandommap --iodepth=256 --size="${size}" --numjobs="$(nproc)" \ --runtime=20 --time_based > /dev/null 2>&1 & sleep 2 if [ "${kill_server}" = "yes" ]; then @@ -303,20 +328,26 @@ run_io_and_kill_daemon() run_io_and_recover() { + local action=$1 local state local dev_id + shift 1 dev_id=$(_add_ublk_dev "$@") _check_add_dev "$TID" $? fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \ - --rw=readwrite --iodepth=256 --size="${size}" --numjobs=4 \ + --rw=randread --iodepth=256 --size="${size}" --numjobs=4 \ --runtime=20 --time_based > /dev/null 2>&1 & sleep 4 - state=$(__ublk_kill_daemon "${dev_id}" "QUIESCED") + if [ "$action" == "kill_daemon" ]; then + state=$(__ublk_kill_daemon "${dev_id}" "QUIESCED") + elif [ "$action" == "quiesce_dev" ]; then + state=$(__ublk_quiesce_dev "${dev_id}" "QUIESCED") + fi if [ "$state" != "QUIESCED" ]; then - echo "device isn't quiesced($state) after killing daemon" + echo "device isn't quiesced($state) after $action" return 255 fi diff --git a/tools/testing/selftests/ublk/test_generic_04.sh b/tools/testing/selftests/ublk/test_generic_04.sh index 8a3bc080c577..8b533217d4a1 100755 --- a/tools/testing/selftests/ublk/test_generic_04.sh +++ b/tools/testing/selftests/ublk/test_generic_04.sh @@ -8,7 +8,7 @@ ERR_CODE=0 ublk_run_recover_test() { - run_io_and_recover "$@" + run_io_and_recover "kill_daemon" "$@" ERR_CODE=$? if [ ${ERR_CODE} -ne 0 ]; then echo "$TID failure: $*" diff --git a/tools/testing/selftests/ublk/test_generic_05.sh b/tools/testing/selftests/ublk/test_generic_05.sh index 3bb00a347402..398e9e2b58e1 100755 --- a/tools/testing/selftests/ublk/test_generic_05.sh +++ b/tools/testing/selftests/ublk/test_generic_05.sh @@ -8,7 +8,7 @@ ERR_CODE=0 ublk_run_recover_test() { - run_io_and_recover "$@" + run_io_and_recover "kill_daemon" "$@" ERR_CODE=$? if [ ${ERR_CODE} -ne 0 ]; then echo "$TID failure: $*" diff --git a/tools/testing/selftests/ublk/test_generic_06.sh b/tools/testing/selftests/ublk/test_generic_06.sh index b67230c42c84..fd42062b7b76 100755 --- a/tools/testing/selftests/ublk/test_generic_06.sh +++ b/tools/testing/selftests/ublk/test_generic_06.sh @@ -17,7 +17,7 @@ STARTTIME=${SECONDS} dd if=/dev/urandom of=/dev/ublkb${dev_id} oflag=direct bs=4k count=1 status=none > /dev/null 2>&1 & dd_pid=$! -__ublk_kill_daemon ${dev_id} "DEAD" +__ublk_kill_daemon ${dev_id} "DEAD" >/dev/null wait $dd_pid dd_exitcode=$? diff --git a/tools/testing/selftests/ublk/test_generic_08.sh b/tools/testing/selftests/ublk/test_generic_08.sh new file mode 100755 index 000000000000..b222f3a77e12 --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_08.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_08" +ERR_CODE=0 + +if ! _have_feature "AUTO_BUF_REG"; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "generic" "test UBLK_F_AUTO_BUF_REG" + +_create_backfile 0 256M +_create_backfile 1 256M + +dev_id=$(_add_ublk_dev -t loop -q 2 --auto_zc "${UBLK_BACKFILES[0]}") +_check_add_dev $TID $? + +if ! _mkfs_mount_test /dev/ublkb"${dev_id}"; then + _cleanup_test "generic" + _show_result $TID 255 +fi + +dev_id=$(_add_ublk_dev -t stripe --auto_zc "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}") +_check_add_dev $TID $? +_mkfs_mount_test /dev/ublkb"${dev_id}" +ERR_CODE=$? + +_cleanup_test "generic" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_09.sh b/tools/testing/selftests/ublk/test_generic_09.sh new file mode 100755 index 000000000000..bb6f77ca5522 --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_09.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_09" +ERR_CODE=0 + +if ! _have_feature "AUTO_BUF_REG"; then + exit "$UBLK_SKIP_CODE" +fi + +if ! _have_program fio; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "null" "basic IO test" + +dev_id=$(_add_ublk_dev -t null -z --auto_zc --auto_zc_fallback) +_check_add_dev $TID $? + +# run fio over the two disks +fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1 +ERR_CODE=$? + +_cleanup_test "null" + +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_10.sh b/tools/testing/selftests/ublk/test_generic_10.sh new file mode 100755 index 000000000000..abc11c3d416b --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_10.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_10" +ERR_CODE=0 + +if ! _have_feature "UPDATE_SIZE"; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "null" "check update size" + +dev_id=$(_add_ublk_dev -t null) +_check_add_dev $TID $? + +size=$(_get_disk_size /dev/ublkb"${dev_id}") +size=$(( size / 2 )) +if ! "$UBLK_PROG" update_size -n "$dev_id" -s "$size"; then + ERR_CODE=255 +fi + +new_size=$(_get_disk_size /dev/ublkb"${dev_id}") +if [ "$new_size" != "$size" ]; then + ERR_CODE=255 +fi + +_cleanup_test "null" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_generic_11.sh b/tools/testing/selftests/ublk/test_generic_11.sh new file mode 100755 index 000000000000..a00357a5ec6b --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_11.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_11" +ERR_CODE=0 + +ublk_run_quiesce_recover() +{ + run_io_and_recover "quiesce_dev" "$@" + ERR_CODE=$? + if [ ${ERR_CODE} -ne 0 ]; then + echo "$TID failure: $*" + _show_result $TID $ERR_CODE + fi +} + +if ! _have_feature "QUIESCE"; then + exit "$UBLK_SKIP_CODE" +fi + +if ! _have_program fio; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "quiesce" "basic quiesce & recover function verification" + +_create_backfile 0 256M +_create_backfile 1 128M +_create_backfile 2 128M + +ublk_run_quiesce_recover -t null -q 2 -r 1 & +ublk_run_quiesce_recover -t loop -q 2 -r 1 "${UBLK_BACKFILES[0]}" & +ublk_run_quiesce_recover -t stripe -q 2 -r 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & +wait + +ublk_run_quiesce_recover -t null -q 2 -r 1 -i 1 & +ublk_run_quiesce_recover -t loop -q 2 -r 1 -i 1 "${UBLK_BACKFILES[0]}" & +ublk_run_quiesce_recover -t stripe -q 2 -r 1 -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & +wait + +_cleanup_test "quiesce" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_02.sh b/tools/testing/selftests/ublk/test_stress_02.sh index 1a9065125ae1..4bdd921081e5 100755 --- a/tools/testing/selftests/ublk/test_stress_02.sh +++ b/tools/testing/selftests/ublk/test_stress_02.sh @@ -25,10 +25,12 @@ _create_backfile 0 256M _create_backfile 1 128M _create_backfile 2 128M -ublk_io_and_kill_daemon 8G -t null -q 4 & -ublk_io_and_kill_daemon 256M -t loop -q 4 "${UBLK_BACKFILES[0]}" & -ublk_io_and_kill_daemon 256M -t stripe -q 4 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & -wait +for nr_queue in 1 4; do + ublk_io_and_kill_daemon 8G -t null -q "$nr_queue" & + ublk_io_and_kill_daemon 256M -t loop -q "$nr_queue" "${UBLK_BACKFILES[0]}" & + ublk_io_and_kill_daemon 256M -t stripe -q "$nr_queue" "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + wait +done _cleanup_test "stress" _show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_03.sh b/tools/testing/selftests/ublk/test_stress_03.sh index e0854f71d35b..7d728ce50774 100755 --- a/tools/testing/selftests/ublk/test_stress_03.sh +++ b/tools/testing/selftests/ublk/test_stress_03.sh @@ -32,6 +32,13 @@ _create_backfile 2 128M ublk_io_and_remove 8G -t null -q 4 -z & ublk_io_and_remove 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" & ublk_io_and_remove 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + +if _have_feature "AUTO_BUF_REG"; then + ublk_io_and_remove 8G -t null -q 4 --auto_zc & + ublk_io_and_remove 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_remove 8G -t null -q 4 -z --auto_zc --auto_zc_fallback & +fi wait _cleanup_test "stress" diff --git a/tools/testing/selftests/ublk/test_stress_04.sh b/tools/testing/selftests/ublk/test_stress_04.sh index 1798a98387e8..9bcfa64ea1f0 100755 --- a/tools/testing/selftests/ublk/test_stress_04.sh +++ b/tools/testing/selftests/ublk/test_stress_04.sh @@ -31,6 +31,13 @@ _create_backfile 2 128M ublk_io_and_kill_daemon 8G -t null -q 4 -z & ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" & ublk_io_and_kill_daemon 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + +if _have_feature "AUTO_BUF_REG"; then + ublk_io_and_kill_daemon 8G -t null -q 4 --auto_zc & + ublk_io_and_kill_daemon 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" & + ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" & + ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback & +fi wait _cleanup_test "stress" diff --git a/tools/testing/selftests/ublk/test_stress_05.sh b/tools/testing/selftests/ublk/test_stress_05.sh index 88601b48f1cd..bcfc904cefc6 100755 --- a/tools/testing/selftests/ublk/test_stress_05.sh +++ b/tools/testing/selftests/ublk/test_stress_05.sh @@ -60,5 +60,14 @@ if _have_feature "ZERO_COPY"; then done fi +if _have_feature "AUTO_BUF_REG"; then + for reissue in $(seq 0 1); do + ublk_io_and_remove 8G -t null -q 4 -g --auto_zc -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 -g --auto_zc -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & + ublk_io_and_remove 8G -t null -q 4 -g -z --auto_zc --auto_zc_fallback -r 1 -i "$reissue" & + wait + done +fi + _cleanup_test "stress" _show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 28422c32cc8f..f703fcfe9f7c 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -19,7 +19,7 @@ TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip syscall_numbering \ - corrupt_xstate_header amx lam test_shadow_stack avx + corrupt_xstate_header amx lam test_shadow_stack avx apx # Some selftests require 32bit support enabled also on 64bit systems TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall @@ -136,3 +136,4 @@ $(OUTPUT)/nx_stack_64: CFLAGS += -Wl,-z,noexecstack $(OUTPUT)/avx_64: CFLAGS += -mno-avx -mno-avx512f $(OUTPUT)/amx_64: EXTRA_FILES += xstate.c $(OUTPUT)/avx_64: EXTRA_FILES += xstate.c +$(OUTPUT)/apx_64: EXTRA_FILES += xstate.c diff --git a/tools/testing/selftests/x86/apx.c b/tools/testing/selftests/x86/apx.c new file mode 100644 index 000000000000..d9c8d41b8c5a --- /dev/null +++ b/tools/testing/selftests/x86/apx.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include "xstate.h" + +int main(void) +{ + test_xstate(XFEATURE_APX); +} diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c index 18d736640ece..0873b0e5f48b 100644 --- a/tools/testing/selftests/x86/lam.c +++ b/tools/testing/selftests/x86/lam.c @@ -682,7 +682,7 @@ int do_uring(unsigned long lam) return 1; if (fstat(file_fd, &st) < 0) - return 1; + goto cleanup; off_t file_sz = st.st_size; @@ -690,7 +690,7 @@ int do_uring(unsigned long lam) fi = malloc(sizeof(*fi) + sizeof(struct iovec) * blocks); if (!fi) - return 1; + goto cleanup; fi->file_sz = file_sz; fi->file_fd = file_fd; @@ -698,7 +698,7 @@ int do_uring(unsigned long lam) ring = malloc(sizeof(*ring)); if (!ring) { free(fi); - return 1; + goto cleanup; } memset(ring, 0, sizeof(struct io_ring)); @@ -729,6 +729,8 @@ out: } free(fi); +cleanup: + close(file_fd); return ret; } @@ -1189,6 +1191,7 @@ void *allocate_dsa_pasid(void) wq = mmap(NULL, 0x1000, PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, 0); + close(fd); if (wq == MAP_FAILED) perror("mmap"); diff --git a/tools/testing/selftests/x86/xstate.c b/tools/testing/selftests/x86/xstate.c index 23c1d6c964ea..97fe4bd8bc77 100644 --- a/tools/testing/selftests/x86/xstate.c +++ b/tools/testing/selftests/x86/xstate.c @@ -31,7 +31,8 @@ (1 << XFEATURE_OPMASK) | \ (1 << XFEATURE_ZMM_Hi256) | \ (1 << XFEATURE_Hi16_ZMM) | \ - (1 << XFEATURE_XTILEDATA)) + (1 << XFEATURE_XTILEDATA) | \ + (1 << XFEATURE_APX)) static inline uint64_t xgetbv(uint32_t index) { diff --git a/tools/testing/selftests/x86/xstate.h b/tools/testing/selftests/x86/xstate.h index 42af36ec852f..e91e3092b5d2 100644 --- a/tools/testing/selftests/x86/xstate.h +++ b/tools/testing/selftests/x86/xstate.h @@ -33,6 +33,7 @@ enum xfeature { XFEATURE_RSRVD_COMP_16, XFEATURE_XTILECFG, XFEATURE_XTILEDATA, + XFEATURE_APX, XFEATURE_MAX, }; @@ -59,6 +60,7 @@ static const char *xfeature_names[] = "unknown xstate feature", "AMX Tile config", "AMX Tile data", + "APX registers", "unknown xstate feature", }; |